max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
manila/share/drivers/inspur/instorage/cli_helper.py | gouthampacha/manila | 159 | 12746364 | <filename>manila/share/drivers/inspur/instorage/cli_helper.py
# Copyright 2019 Inspur Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CLI helpers for Inspur InStorage
"""
import paramiko
import re
import six
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila import utils as manila_utils
LOG = log.getLogger(__name__)
class SSHRunner(object):
"""SSH runner is used to run ssh command on inspur instorage system."""
def __init__(self, host, port, login, password, privatekey=None):
self.host = host
self.port = port
self.login = login
self.password = password
self.privatekey = privatekey
self.ssh_conn_timeout = 60
self.ssh_min_pool_size = 1
self.ssh_max_pool_size = 10
self.sshpool = None
def __call__(self, cmd_list, check_exit_code=True, attempts=1):
"""SSH tool"""
manila_utils.check_ssh_injection(cmd_list)
command = ' '.join(cmd_list)
if not self.sshpool:
try:
self.sshpool = manila_utils.SSHPool(
self.host,
self.port,
self.ssh_conn_timeout,
self.login,
password=self.password,
privatekey=self.privatekey,
min_size=self.ssh_min_pool_size,
max_size=self.ssh_max_pool_size
)
except paramiko.SSHException:
LOG.error("Unable to create SSHPool")
raise
try:
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
except Exception:
LOG.error("Error running SSH command: %s", command)
raise
def _ssh_execute(self, sshpool, command,
check_exit_code=True, attempts=1):
try:
with sshpool.item() as ssh:
last_exception = None
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.exception('Error has occurred')
last_exception = e
greenthread.sleep(1)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s", command)
class CLIParser(object):
"""Parse MCS CLI output and generate iterable."""
def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
super(CLIParser, self).__init__()
if ssh_cmd:
self.ssh_cmd = ' '.join(ssh_cmd)
else:
self.ssh_cmd = 'None'
self.raw = raw
self.delim = delim
self.with_header = with_header
self.result = self._parse()
def __getitem__(self, key):
try:
return self.result[key]
except KeyError:
msg = (_('Did not find the expected key %(key)s in %(fun)s: '
'%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
'raw': self.raw})
raise exception.ShareBackendException(msg=msg)
def __iter__(self):
for a in self.result:
yield a
def __len__(self):
return len(self.result)
def _parse(self):
def get_reader(content, delim):
for line in content.lstrip().splitlines():
line = line.strip()
if line:
yield line.split(delim)
else:
yield []
if isinstance(self.raw, six.string_types):
stdout, stderr = self.raw, ''
else:
stdout, stderr = self.raw
reader = get_reader(stdout, self.delim)
result = []
if self.with_header:
hds = tuple()
for row in reader:
hds = row
break
for row in reader:
cur = dict()
if len(hds) != len(row):
msg = (_('Unexpected CLI response: header/row mismatch. '
'header: %(header)s, row: %(row)s.')
% {'header': hds,
'row': row})
raise exception.ShareBackendException(msg=msg)
for k, v in zip(hds, row):
CLIParser.append_dict(cur, k, v)
result.append(cur)
else:
cur = dict()
for row in reader:
if row:
CLIParser.append_dict(cur, row[0], ' '.join(row[1:]))
elif cur: # start new section
result.append(cur)
cur = dict()
if cur:
result.append(cur)
return result
@staticmethod
def append_dict(dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
class InStorageSSH(object):
"""SSH interface to Inspur InStorage systems."""
def __init__(self, ssh_runner):
self._ssh = ssh_runner
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.ShareBackendException(msg=msg)
def run_ssh_inq(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
LOG.debug('Response for cmd %s is %s', ssh_cmd, raw)
return CLIParser(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.ShareBackendException(msg=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.ShareBackendException(msg=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['mcsinq', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_inq(ssh_cmd, with_header=with_header)
def lsnaspool(self, pool_id=None):
ssh_cmd = ['mcsinq', 'lsnaspool', '-delim', '!']
if pool_id:
ssh_cmd.append(pool_id)
return self.run_ssh_inq(ssh_cmd, with_header=True)
def lsfs(self, node_name=None, fsname=None):
if fsname and not node_name:
msg = _('Node name should be set when file system name is set.')
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
ssh_cmd = ['mcsinq', 'lsfs', '-delim', '!']
to_append = []
if node_name:
to_append += ['-node', '"%s"' % node_name]
if fsname:
to_append += ['-name', '"%s"' % fsname]
if not to_append:
to_append += ['-all']
ssh_cmd += to_append
return self.run_ssh_inq(ssh_cmd, with_header=True)
def addfs(self, fsname, pool_name, size, node_name):
"""Create a file system on the storage.
:param fsname: file system name
:param pool_name: pool in which to create the file system
:param size: file system size in GB
:param node_name: the primary node name
:return:
"""
ssh_cmd = ['mcsop', 'addfs', '-name', '"%s"' % fsname, '-pool',
'"%s"' % pool_name, '-size', '%dg' % size,
'-node', '"%s"' % node_name]
self.run_ssh_assert_no_output(ssh_cmd)
def rmfs(self, fsname):
"""Remove the specific file system.
:param fsname: file system name to be removed
:return:
"""
ssh_cmd = ['mcsop', 'rmfs', '-name', '"%s"' % fsname]
self.run_ssh_assert_no_output(ssh_cmd)
def expandfs(self, fsname, size):
"""Expand the space of the specific file system.
:param fsname: file system name
:param size: the size(GB) to be expanded, origin + size = result
:return:
"""
ssh_cmd = ['mcsop', 'expandfs', '-name', '"%s"' % fsname,
'-size', '%dg' % size]
self.run_ssh_assert_no_output(ssh_cmd)
# NAS directory operation
def lsnasdir(self, dirpath):
"""List the child directory under dirpath.
:param dirpath: the parent directory to list with
:return:
"""
ssh_cmd = ['mcsinq', 'lsnasdir', '-delim', '!', '"%s"' % dirpath]
return self.run_ssh_inq(ssh_cmd, with_header=True)
def addnasdir(self, dirpath):
"""Create a new NAS directory indicated by dirpath."""
ssh_cmd = ['mcsop', 'addnasdir', '"%s"' % dirpath]
self.run_ssh_assert_no_output(ssh_cmd)
def chnasdir(self, old_path, new_path):
"""Rename the NAS directory name."""
ssh_cmd = ['mcsop', 'chnasdir', '-oldpath', '"%s"' % old_path,
'-newpath', '"%s"' % new_path]
self.run_ssh_assert_no_output(ssh_cmd)
def rmnasdir(self, dirpath):
"""Remove the specific dirpath."""
ssh_cmd = ['mcsop', 'rmnasdir', '"%s"' % dirpath]
self.run_ssh_assert_no_output(ssh_cmd)
# NFS operation
def rmnfs(self, share_path):
"""Remove the NFS indicated by path."""
ssh_cmd = ['mcsop', 'rmnfs', '"%s"' % share_path]
self.run_ssh_assert_no_output(ssh_cmd)
def lsnfslist(self, prefix=None):
"""List NFS shares on a system."""
ssh_cmd = ['mcsinq', 'lsnfslist', '-delim', '!']
if prefix:
ssh_cmd.append('"%s"' % prefix)
return self.run_ssh_inq(ssh_cmd, with_header=True)
def lsnfsinfo(self, share_path):
"""List a specific NFS share's information."""
ssh_cmd = ['mcsinq', 'lsnfsinfo', '-delim', '!', '"%s"' % share_path]
return self.run_ssh_inq(ssh_cmd, with_header=True)
def addnfsclient(self, share_path, client_spec):
"""Add a client access rule to NFS share.
:param share_path: the NFS share path.
:param client_spec: IP/MASK:RIGHTS:ALL_SQUASH:ROOT_SQUASH.
:return:
"""
ssh_cmd = ['mcsop', 'addnfsclient', '-path', '"%s"' % share_path,
'-client', client_spec]
self.run_ssh_assert_no_output(ssh_cmd)
def chnfsclient(self, share_path, client_spec):
"""Change a NFS share's client info."""
ssh_cmd = ['mcsop', 'chnfsclient', '-path', '"%s"' % share_path,
'-client', client_spec]
self.run_ssh_assert_no_output(ssh_cmd)
def rmnfsclient(self, share_path, client_spec):
"""Remove a client info from the NFS share."""
# client_spec parameter for rmnfsclient is IP/MASK,
# so we need remove the right part
client_spec = client_spec.split(':')[0]
ssh_cmd = ['mcsop', 'rmnfsclient', '-path', '"%s"' % share_path,
'-client', client_spec]
self.run_ssh_assert_no_output(ssh_cmd)
# CIFS operation
def lscifslist(self, filter=None):
"""List CIFS shares on the system."""
ssh_cmd = ['mcsinq', 'lscifslist', '-delim', '!']
if filter:
ssh_cmd.append('"%s"' % filter)
return self.run_ssh_inq(ssh_cmd, with_header=True)
def lscifsinfo(self, share_name):
"""List a specific CIFS share's information."""
ssh_cmd = ['mcsinq', 'lscifsinfo', '-delim', '!', '"%s"' % share_name]
return self.run_ssh_inq(ssh_cmd, with_header=True)
def addcifs(self, share_name, dirpath, oplocks='off'):
"""Create a CIFS share with given path."""
ssh_cmd = ['mcsop', 'addcifs', '-name', share_name, '-path', dirpath,
'-oplocks', oplocks]
self.run_ssh_assert_no_output(ssh_cmd)
def rmcifs(self, share_name):
"""Remove a CIFS share."""
ssh_cmd = ['mcsop', 'rmcifs', share_name]
self.run_ssh_assert_no_output(ssh_cmd)
def chcifs(self, share_name, oplocks='off'):
"""Change a CIFS share's attribute.
:param share_name: share's name
:param oplocks: 'off' or 'on'
:return:
"""
ssh_cmd = ['mcsop', 'chcifs', '-name', share_name, '-oplocks', oplocks]
self.run_ssh_assert_no_output(ssh_cmd)
def addcifsuser(self, share_name, rights):
"""Add a user access rule to CIFS share.
:param share_name: share's name
:param rights: [LU|LG]:xxx:[rw|ro]
:return:
"""
ssh_cmd = ['mcsop', 'addcifsuser', '-name', share_name,
'-rights', rights]
self.run_ssh_assert_no_output(ssh_cmd)
def chcifsuser(self, share_name, rights):
"""Change a user access rule."""
ssh_cmd = ['mcsop', 'chcifsuser', '-name', share_name,
'-rights', rights]
self.run_ssh_assert_no_output(ssh_cmd)
def rmcifsuser(self, share_name, rights):
"""Remove CIFS user from a CIFS share."""
# the rights parameter for rmcifsuser is LU:NAME
rights = ':'.join(rights.split(':')[0:-1])
ssh_cmd = ['mcsop', 'rmcifsuser', '-name', share_name,
'-rights', rights]
self.run_ssh_assert_no_output(ssh_cmd)
# NAS port ip
def lsnasportip(self):
"""List NAS service port ip address."""
ssh_cmd = ['mcsinq', 'lsnasportip', '-delim', '!']
return self.run_ssh_inq(ssh_cmd, with_header=True)
|
tests/interpretability/test_saliency_maps_gat.py | LarsNeR/stellargraph | 2,428 | 12746365 | <reponame>LarsNeR/stellargraph
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from stellargraph.interpretability.saliency_maps import IntegratedGradientsGAT
import numpy as np
from stellargraph.layer import GAT
from stellargraph.mapper import FullBatchNodeGenerator
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
import networkx as nx
from tensorflow.keras import backend as K
from ..test_utils.graphs import example_graph_1_saliency_maps as example_graph_1
def create_GAT_model(graph):
generator = FullBatchNodeGenerator(graph, sparse=False, method=None)
train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]]))
gat = GAT(
layer_sizes=[2, 2],
generator=generator,
bias=False,
in_dropout=0,
attn_dropout=0,
activations=["elu", "softmax"],
normalize=None,
saliency_map_support=True,
)
for layer in gat._layers:
layer._initializer = "ones"
x_inp, x_out = gat.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return gat, keras_model, generator, train_gen
def get_ego_node_num(graph, target_idx):
G_ego = nx.ego_graph(graph, target_idx, radius=2)
return G_ego.number_of_nodes()
def test_ig_saliency_map():
graph = example_graph_1(feature_size=4)
base_model, keras_model_gat, generator, train_gen = create_GAT_model(graph)
keras_model_gat.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
weights = [
np.array(
[
[0.47567585, 0.7989239],
[0.33588523, 0.19814175],
[0.15685713, 0.43643117],
[0.7725941, 0.68441933],
]
),
np.array([[0.71832293], [0.8542117]]),
np.array([[0.46560588], [0.8165422]]),
np.array(1.0),
np.array(0.0),
np.array([[0.4391179, 0.595691], [0.06000895, 0.2613866]]),
np.array([[0.43496376], [0.02840129]]),
np.array([[0.33972418], [0.22352563]]),
np.array(1.0),
np.array(0.0),
]
keras_model_gat.set_weights(weights)
# sanity check to make sure that the values of delta and non_exist_edges are not trainable
# the expected value should be delta = 1.0 and non_exist_edges = 0.0
for var in keras_model_gat.non_trainable_weights:
if "ig_delta" in var.name:
assert K.get_value(var) == 1.0
if "ig_non_exist_edge" in var.name:
assert K.get_value(var) == 0.0
ig_saliency = IntegratedGradientsGAT(
keras_model_gat, train_gen, generator.node_list
)
target_id = 0
class_of_interest = 0
ig_link_importance = ig_saliency.get_link_importance(
target_id, class_of_interest, steps=200
)
print(ig_link_importance)
ig_link_importance_ref = np.array(
[
[4.759e-11, 4.759e-11, 4.759e-11, 0, 0],
[-1.442e-10, -1.442e-10, 0, 0, 0],
[1.183e-10, 0, 1.183e-10, 1.183e-10, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
)
# Check the number of non-zero elements in the node importance matrix. We expect to see the number be same with the number of nodes in the ego network.
assert pytest.approx(
np.sum(np.ma.masked_array(ig_link_importance, mask=train_gen.A_dense)), 0
)
# TODO: write a better comparison test with larger floating point values
# commented out test because of floating point errors
# assert ig_link_importance == pytest.approx(ig_link_importance_ref, abs=1e-11)
non_zero_edge_importance = np.sum(np.abs(ig_link_importance) > 1e-11)
assert 8 == non_zero_edge_importance
ig_node_importance = ig_saliency.get_node_importance(
target_id, class_of_interest, steps=200
)
print(ig_node_importance)
assert pytest.approx(ig_node_importance, np.array([-13.06, -9.32, -7.46, -3.73, 0]))
non_zero_node_importance = np.sum(np.abs(ig_node_importance) > 1e-5)
assert 4 == non_zero_node_importance
|
examples/tensorflow/pruning/resnet_v2/benchmark.py | mdfaijul/neural-compressor | 172 | 12746378 | import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow import keras
import numpy as np
num_classes = 10
class EvalDataset(object):
def __init__(self, batch_size=100):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
self.test_images = x_test
self.test_labels = y_test
def __len__(self):
return len(self.test_images)
def __getitem__(self, idx):
return self.test_images[idx], self.test_labels[idx]
from neural_compressor.experimental import Benchmark, common
evaluator = Benchmark('benchmark.yaml')
evaluator.model = common.Model('./baseline_model')
evaluator.b_dataloader = common.DataLoader(EvalDataset())
evaluator('performance')
|
lib/spack/external/archspec/cpu/schema.py | kkauder/spack | 2,360 | 12746379 | # Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Global objects with the content of the microarchitecture
JSON file and its schema
"""
import json
import os.path
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping
class LazyDictionary(MutableMapping):
"""Lazy dictionary that gets constructed on first access to any object key
Args:
factory (callable): factory function to construct the dictionary
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
self.args = args
self.kwargs = kwargs
self._data = None
@property
def data(self):
"""Returns the lazily constructed dictionary"""
if self._data is None:
self._data = self.factory(*self.args, **self.kwargs)
return self._data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file:
return json.load(file)
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
|
tests/framework/AnalyticModels/optimizing/diagonal_valley_stochastic.py | rinelson456/raven | 159 | 12746399 | <reponame>rinelson456/raven
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
#static seed
np.random.seed(42)
def run(raven, Inputs):
coeffs = [1, 1, 0] # ax + by + c = 0 -> x = y
raven.ans = main(coeffs, raven.x, raven.y) #+ random(scale=raven.stoch)/10
def main(coeffs, x, y, thresh=0.01):
distance = dist_to_line(coeffs, x, y)
z = 3*(x+0.5)**2 + 3*(y-0.5)**2
z += distance * 10
return z
def dist_to_line(coeffs, x0, y0):
cx, cy = closest_point(coeffs, x0, y0)
dist = np.sqrt((x0 - cx)**2 + (y0 - cy)**2)
return dist
def closest_point(coeffs, x0, y0):
a, b, c = coeffs
denom = a*a + b*b
x = b * (b * x0 - a * y0) - a * c
x /= denom
y = a * (-b * x0 + a * y0) - b * c
y /= denom
return x, y
def random(scale=0.5,loc=-1.0):
return scale*(2.*np.random.rand()+loc)
|
language_modeling/language_utils.py | coasxu/FedMA | 254 | 12746420 | <gh_stars>100-1000
# Modified from: https://github.com/litian96/FedProx/blob/master/flearn/utils/language_utils.py
# credit goes to: <NAME> (litian96 @ GitHub)
"""Utils for language models."""
import re
import numpy as np
import torch
# ------------------------
# utils for shakespeare dataset
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
'''returns one-hot representation of given letter
'''
index = ALL_LETTERS.find(letter)
return _one_hot(index, NUM_LETTERS)
def word_to_indices(word):
'''returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
# ------------------------
# utils for sent140 dataset
def split_line(line):
'''split given line/phrase into list of words
Args:
line: string representing phrase to be split
Return:
list of strings, with each string representing a word
'''
return re.findall(r"[\w']+|[.,!?;]", line)
def _word_to_index(word, indd):
'''returns index of given word based on given lookup dictionary
returns the length of the lookup dictionary if word not found
Args:
word: string
indd: dictionary with string words as keys and int indices as values
'''
if word in indd:
return indd[word]
else:
return len(indd)
def line_to_indices(line, word2id, max_words=25):
'''converts given phrase into list of word indices
if the phrase has more than max_words words, returns a list containing
indices of the first max_words words
if the phrase has less than max_words words, repeatedly appends integer
representing unknown index to returned list until the list's length is
max_words
Args:
line: string representing phrase/sequence of words
word2id: dictionary with string words as keys and int indices as values
max_words: maximum number of word indices in returned list
Return:
indl: list of word indices, one index for each word in phrase
'''
unk_id = len(word2id)
line_list = split_line(line) # split phrase in words
indl = [word2id[w] if w in word2id else unk_id for w in line_list[:max_words]]
indl += [unk_id]*(max_words-len(indl))
return indl
def bag_of_words(line, vocab):
'''returns bag of words representation of given phrase using given vocab
Args:
line: string representing phrase to be parsed
vocab: dictionary with words as keys and indices as values
Return:
integer list
'''
bag = [0]*len(vocab)
words = split_line(line)
for w in words:
if w in vocab:
bag[vocab[w]] += 1
return bag
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def process_x(raw_x_batch):
x_batch = [word_to_indices(word) for word in raw_x_batch]
x_batch = np.array(x_batch).T
return x_batch
def process_y(raw_y_batch):
y_batch = [letter_to_vec(c) for c in raw_y_batch]
return np.array(y_batch)
def patch_h_weights(weights, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(weight, assignments, L_next):
new_w_j = np.zeros((L_next, L_next), dtype=np.float32)
new_w_j[np.ix_(assignments, assignments)] = weight # TODO(hwang): make sure if this is correct
return new_w_j
split_range = np.split(np.arange(weights.shape[0]), 4)
h_weights = []
for indices in split_range:
#logger.info("assignments: {}".format(assignments))
tempt_h_w = __permutate(weights[indices, :], assignments, L_next)
h_weights.append(tempt_h_w)
#logger.info("equal: {}".format(np.array_equal(tempt_h_w, weights[indices, :])))
return np.vstack(h_weights)
def patch_biases(biases, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(bias, assignments, L_next):
new_w_j = np.zeros(L_next)
new_w_j[assignments] = bias
return new_w_j
splitted_bias = np.split(biases, 4)
h_bias = [__permutate(sb, assignments, L_next) for sb in splitted_bias]
return np.hstack(h_bias)
def perm_i_weights(w_j, L_next, assignment_j_c):
split_range = np.split(np.arange(w_j.shape[0]), 4)
res = []
for i in range(4):
cand_w_j = w_j[split_range[i], :]
temp_new_w_j = np.zeros((L_next, w_j.shape[1]))
temp_new_w_j[assignment_j_c, :] = cand_w_j
res.append(temp_new_w_j)
return np.vstack(res)
def patch_i_weights(weights, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(weight, assignments, L_next):
new_w_j = np.zeros((L_next, L_next), dtype=np.float32)
new_w_j[np.ix_(assignments, assignments)] = weight # TODO(hwang): make sure if this is correct
return new_w_j
split_range = np.split(np.arange(weights.shape[0]), 4)
h_weights = [__permutate(weights[indices, :], assignments, L_next) for indices in split_range]
return np.hstack(h_weights).T
def patch_i_biases(biases, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(bias, assignments, L_next):
new_w_j = np.zeros(L_next, dtype=np.float32)
new_w_j[assignments] = bias
return new_w_j
splitted_bias = np.split(biases, 4)
h_bias = [__permutate(sb, assignments, L_next) for sb in splitted_bias]
return np.hstack(h_bias)
def perm_i_weights(w_j, L_next, assignment_j_c):
split_range = np.split(np.arange(w_j.shape[0]), 4)
res = []
for i in range(4):
cand_w_j = w_j[split_range[i], :]
temp_new_w_j = np.zeros((L_next, w_j.shape[1]))
temp_new_w_j[assignment_j_c, :] = cand_w_j
res.append(temp_new_w_j)
return np.vstack(res) |
homeassistant/components/alarmdecoder/const.py | MrDelik/core | 30,023 | 12746436 | <reponame>MrDelik/core
"""Constants for the AlarmDecoder component."""
CONF_ALT_NIGHT_MODE = "alt_night_mode"
CONF_AUTO_BYPASS = "auto_bypass"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_DEVICE_BAUD = "device_baudrate"
CONF_DEVICE_PATH = "device_path"
CONF_RELAY_ADDR = "zone_relayaddr"
CONF_RELAY_CHAN = "zone_relaychan"
CONF_ZONE_LOOP = "zone_loop"
CONF_ZONE_NAME = "zone_name"
CONF_ZONE_NUMBER = "zone_number"
CONF_ZONE_RFID = "zone_rfid"
CONF_ZONE_TYPE = "zone_type"
DATA_AD = "alarmdecoder"
DATA_REMOVE_STOP_LISTENER = "rm_stop_listener"
DATA_REMOVE_UPDATE_LISTENER = "rm_update_listener"
DATA_RESTART = "restart"
DEFAULT_ALT_NIGHT_MODE = False
DEFAULT_AUTO_BYPASS = False
DEFAULT_CODE_ARM_REQUIRED = True
DEFAULT_DEVICE_BAUD = 115200
DEFAULT_DEVICE_HOST = "alarmdecoder"
DEFAULT_DEVICE_PATH = "/dev/ttyUSB0"
DEFAULT_DEVICE_PORT = 10000
DEFAULT_ZONE_TYPE = "window"
DEFAULT_ARM_OPTIONS = {
CONF_ALT_NIGHT_MODE: DEFAULT_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS: DEFAULT_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED: DEFAULT_CODE_ARM_REQUIRED,
}
DEFAULT_ZONE_OPTIONS: dict = {}
DOMAIN = "alarmdecoder"
OPTIONS_ARM = "arm_options"
OPTIONS_ZONES = "zone_options"
PROTOCOL_SERIAL = "serial"
PROTOCOL_SOCKET = "socket"
SIGNAL_PANEL_MESSAGE = "alarmdecoder.panel_message"
SIGNAL_REL_MESSAGE = "alarmdecoder.rel_message"
SIGNAL_RFX_MESSAGE = "alarmdecoder.rfx_message"
SIGNAL_ZONE_FAULT = "alarmdecoder.zone_fault"
SIGNAL_ZONE_RESTORE = "alarmdecoder.zone_restore"
|
ocs_ci/ocs/bucket_utils.py | annagitel/ocs-ci | 130 | 12746437 | """
Helper functions file for working with object buckets
"""
import logging
import os
import shlex
from uuid import uuid4
import boto3
from botocore.handlers import disable_signing
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import TimeoutExpiredError, UnexpectedBehaviour
from ocs_ci.utility import templating
from ocs_ci.utility.ssl_certs import get_root_ca_cert
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.helpers.helpers import create_resource
logger = logging.getLogger(__name__)
def craft_s3_command(cmd, mcg_obj=None, api=False, signed_request_creds=None):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG class instance
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
signed_request_creds: a dictionary containing AWS S3 creds for a signed request
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
if mcg_obj.region:
region = f"AWS_DEFAULT_REGION={mcg_obj.region} "
else:
region = ""
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"{region}"
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
elif signed_request_creds:
if signed_request_creds.get("region"):
region = f'AWS_DEFAULT_REGION={signed_request_creds.get("region")} '
else:
region = ""
base_command = (
f'sh -c "AWS_ACCESS_KEY_ID={signed_request_creds.get("access_key_id")} '
f'AWS_SECRET_ACCESS_KEY={signed_request_creds.get("access_key")} '
f"{region}"
f"aws s3{api} "
f'--endpoint={signed_request_creds.get("endpoint")} '
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def verify_s3_object_integrity(original_object_path, result_object_path, awscli_pod):
"""
Verifies checksum between original object and result object on an awscli pod
Args:
original_object_path (str): The Object that is uploaded to the s3 bucket
result_object_path (str): The Object that is downloaded from the s3 bucket
awscli_pod (pod): A pod running the AWSCLI tools
Returns:
bool: True if checksum matches, False otherwise
"""
md5sum = shlex.split(
awscli_pod.exec_cmd_on_pod(
command=f"md5sum {original_object_path} {result_object_path}"
)
)
if md5sum[0] == md5sum[2]:
logger.info(
f"Passed: MD5 comparison for {original_object_path} and {result_object_path}"
)
return True
else:
logger.error(
f"Failed: MD5 comparison of {original_object_path} and {result_object_path} - "
f"{md5sum[0]} ≠ {md5sum[2]}"
)
return False
def retrieve_test_objects_to_pod(podobj, target_dir):
"""
Downloads all the test objects to a given directory in a given pod.
Args:
podobj (OCS): The pod object to download the objects to
target_dir: The fully qualified path of the download target folder
Returns:
list: A list of the downloaded objects' names
"""
sync_object_directory(podobj, f"s3://{constants.TEST_FILES_BUCKET}", target_dir)
downloaded_objects = podobj.exec_cmd_on_pod(f"ls -A1 {target_dir}").split(" ")
logger.info(f"Downloaded objects: {downloaded_objects}")
return downloaded_objects
def retrieve_anon_s3_resource():
"""
Returns an anonymous boto3 S3 resource by creating one and disabling signing
Disabling signing isn't documented anywhere, and this solution is based on
a comment by an AWS developer:
https://github.com/boto/boto3/issues/134#issuecomment-116766812
Returns:
boto3.resource(): An anonymous S3 resource
"""
anon_s3_resource = boto3.resource("s3")
anon_s3_resource.meta.client.meta.events.register(
"choose-signer.s3.*", disable_signing
)
return anon_s3_resource
def sync_object_directory(podobj, src, target, s3_obj=None, signed_request_creds=None):
"""
Syncs objects between a target and source directories
Args:
podobj (OCS): The pod on which to execute the commands and download the objects to
src (str): Fully qualified object source path
target (str): Fully qualified object target path
s3_obj (MCG, optional): The MCG object to use in case the target or source
are in an MCG
signed_request_creds (dictionary, optional): the access_key, secret_key,
endpoint and region to use when willing to send signed aws s3 requests
"""
logger.info(f"Syncing all objects and directories from {src} to {target}")
retrieve_cmd = f"sync {src} {target}"
if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
signed_request_creds.get("access_key"),
signed_request_creds.get("endpoint"),
]
else:
secrets = None
podobj.exec_cmd_on_pod(
command=craft_s3_command(
retrieve_cmd, s3_obj, signed_request_creds=signed_request_creds
),
out_yaml_format=False,
secrets=secrets,
), "Failed to sync objects"
# Todo: check that all objects were synced successfully
def rm_object_recursive(podobj, target, mcg_obj, option=""):
"""
Remove bucket objects with --recursive option
Args:
podobj (OCS): The pod on which to execute the commands and download
the objects to
target (str): Fully qualified bucket target path
mcg_obj (MCG, optional): The MCG object to use in case the target or
source are in an MCG
option (str): Extra s3 remove command option
"""
rm_command = f"rm s3://{target} --recursive {option}"
podobj.exec_cmd_on_pod(
command=craft_s3_command(rm_command, mcg_obj),
out_yaml_format=False,
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def get_rgw_restart_counts():
"""
Gets the restart count of the RGW pods
Returns:
list: restart counts of RGW pods
"""
# Internal import in order to avoid circular import
from ocs_ci.ocs.resources.pod import get_rgw_pods
rgw_pods = get_rgw_pods()
return [rgw_pod.restart_count for rgw_pod in rgw_pods]
def write_individual_s3_objects(
mcg_obj, awscli_pod, bucket_factory, downloaded_files, target_dir, bucket_name=None
):
"""
Writes objects one by one to an s3 bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
downloaded_files (list): List of downloaded object keys
target_dir (str): The fully qualified path of the download target folder
bucket_name (str): Name of the bucket
(default: none)
"""
bucketname = bucket_name or bucket_factory(1)[0].name
logger.info("Writing objects to bucket")
for obj_name in downloaded_files:
full_object_path = f"s3://{bucketname}/{obj_name}"
copycommand = f"cp {target_dir}{obj_name} {full_object_path}"
assert "Completed" in awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(copycommand, mcg_obj),
out_yaml_format=False,
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def upload_parts(
mcg_obj, awscli_pod, bucketname, object_key, body_path, upload_id, uploaded_parts
):
"""
Uploads individual parts to a bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucketname (str): Name of the bucket to upload parts on
object_key (list): Unique object Identifier
body_path (str): Path of the directory on the aws pod which contains the parts to be uploaded
upload_id (str): Multipart Upload-ID
uploaded_parts (list): list containing the name of the parts to be uploaded
Returns:
list: List containing the ETag of the parts
"""
parts = []
secrets = [mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_internal_endpoint]
for count, part in enumerate(uploaded_parts, 1):
upload_cmd = (
f"upload-part --bucket {bucketname} --key {object_key}"
f" --part-number {count} --body {body_path}/{part}"
f" --upload-id {upload_id}"
)
# upload_cmd will return ETag, upload_id etc which is then split to get just the ETag
part = (
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(upload_cmd, mcg_obj, api=True),
out_yaml_format=False,
secrets=secrets,
)
.split('"')[-3]
.split("\\")[0]
)
parts.append({"PartNumber": count, "ETag": f'"{part}"'})
return parts
def oc_create_aws_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with aws underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"] = {
"type": "aws-s3",
"awsS3": {
"targetBucket": uls_name,
"region": region,
"secret": {"name": cld_mgr.aws_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_aws_backingstore(mcg_obj, cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with aws underlying storage using noobaa cli command
Args:
mcg_obj (MCG): Used for execution for the NooBaa CLI command
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create aws-s3 {backingstore_name} "
f"--access-key {cld_mgr.aws_client.access_key} "
f"--secret-key {cld_mgr.aws_client.secret_key} "
f"--target-bucket {uls_name} --region {region}"
)
def oc_create_google_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with GCP underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["spec"] = {
"type": constants.BACKINGSTORE_TYPE_GOOGLE,
"googleCloudStorage": {
"targetBucket": uls_name,
"secret": {"name": cld_mgr.gcp_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_google_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with GCP underlying storage using a NooBaa CLI command
Args:
mcg_obj (MCG): Used for execution for the NooBaa CLI command
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create google-cloud-storage {backingstore_name} "
f"--private-key-json-file {constants.GOOGLE_CREDS_JSON_PATH} "
f"--target-bucket {uls_name}"
)
def oc_create_azure_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with Azure underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["spec"] = {
"type": constants.BACKINGSTORE_TYPE_AZURE,
"azureBlob": {
"targetBlobContainer": uls_name,
"secret": {"name": cld_mgr.azure_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_azure_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with aws underlying storage using noobaa cli command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create azure-blob {backingstore_name} "
f"--account-key {cld_mgr.azure_client.credential} "
f"--account-name {cld_mgr.azure_client.account_name} "
f"--target-blob-container {uls_name}"
)
def oc_create_ibmcos_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with IBM COS underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"] = {
"type": "ibm-cos",
"ibmCos": {
"targetBucket": uls_name,
"signatureVersion": "v2",
"endpoint": constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(
cld_mgr.ibmcos_client.region.lower()
),
"secret": {"name": cld_mgr.ibmcos_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_ibmcos_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with IBM COS underlying storage using a NooBaa CLI command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create ibm-cos {backingstore_name} "
f"--access-key {cld_mgr.ibmcos_client.access_key} "
f"--secret-key {cld_mgr.ibmcos_client.secret_key} "
f"""--endpoint {
constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(
cld_mgr.ibmcos_client.region.lower()
)
} """
f"--target-bucket {uls_name}"
)
def oc_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def cli_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def oc_create_pv_backingstore(backingstore_name, vol_num, size, storage_class):
"""
Create a new backingstore with pv underlying storage using oc create command
Args:
backingstore_name (str): backingstore name
vol_num (int): number of pv volumes
size (int): each volume size in GB
storage_class (str): which storage class to use
"""
bs_data = templating.load_yaml(constants.PV_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"]["pvPool"]["resources"]["requests"]["storage"] = str(size) + "Gi"
bs_data["spec"]["pvPool"]["numVolumes"] = vol_num
bs_data["spec"]["pvPool"]["storageClass"] = storage_class
create_resource(**bs_data)
wait_for_pv_backingstore(backingstore_name, config.ENV_DATA["cluster_namespace"])
def cli_create_pv_backingstore(
mcg_obj, backingstore_name, vol_num, size, storage_class
):
"""
Create a new backingstore with pv underlying storage using noobaa cli command
Args:
backingstore_name (str): backingstore name
vol_num (int): number of pv volumes
size (int): each volume size in GB
storage_class (str): which storage class to use
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create pv-pool {backingstore_name} --num-volumes "
f"{vol_num} --pv-size-gb {size} --storage-class {storage_class}"
)
wait_for_pv_backingstore(backingstore_name, config.ENV_DATA["cluster_namespace"])
def wait_for_pv_backingstore(backingstore_name, namespace=None):
"""
wait for existing pv backing store to reach OPTIMAL state
Args:
backingstore_name (str): backingstore name
namespace (str): backing store's namespace
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
sample = TimeoutSampler(
timeout=240,
sleep=15,
func=check_pv_backingstore_status,
backingstore_name=backingstore_name,
namespace=namespace,
)
if not sample.wait_for_func_status(result=True):
logger.error(f"Backing Store {backingstore_name} never reached OPTIMAL state")
raise TimeoutExpiredError
else:
logger.info(f"Backing Store {backingstore_name} created successfully")
def check_pv_backingstore_status(
backingstore_name, namespace=None, desired_status=constants.HEALTHY_PV_BS
):
"""
check if existing pv backing store is in OPTIMAL state
Args:
backingstore_name (str): backingstore name
namespace (str): backing store's namespace
desired_status (str): desired state for the backing store, if None is given then desired
is the Healthy status
Returns:
bool: True if backing store is in the desired state
"""
kubeconfig = os.getenv("KUBECONFIG")
kubeconfig = f"--kubeconfig {kubeconfig}" if kubeconfig else ""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
cmd = (
f"oc get backingstore -n {namespace} {kubeconfig} {backingstore_name} "
"-o=jsonpath=`{.status.mode.modeCode}`"
)
res = run_cmd(cmd=cmd)
return True if res in desired_status else False
def create_multipart_upload(s3_obj, bucketname, object_key):
"""
Initiates Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket on which multipart upload to be initiated on
object_key (str): Unique object Identifier
Returns:
str : Multipart Upload-ID
"""
mpu = s3_obj.s3_client.create_multipart_upload(Bucket=bucketname, Key=object_key)
upload_id = mpu["UploadId"]
return upload_id
def list_multipart_upload(s3_obj, bucketname):
"""
Lists the multipart upload details on a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Dictionary containing the multipart upload details
"""
return s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)
def list_uploaded_parts(s3_obj, bucketname, object_key, upload_id):
"""
Lists uploaded parts and their ETags
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
Returns:
dict : Dictionary containing the multipart upload details
"""
return s3_obj.s3_client.list_parts(
Bucket=bucketname, Key=object_key, UploadId=upload_id
)
def complete_multipart_upload(s3_obj, bucketname, object_key, upload_id, parts):
"""
Completes the Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
parts (list): List containing the uploaded parts which includes ETag and part number
Returns:
dict : Dictionary containing the completed multipart upload details
"""
result = s3_obj.s3_client.complete_multipart_upload(
Bucket=bucketname,
Key=object_key,
UploadId=upload_id,
MultipartUpload={"Parts": parts},
)
return result
def abort_all_multipart_upload(s3_obj, bucketname, object_key):
"""
Abort all Multipart Uploads for this Bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
Returns:
list : List of aborted upload ids
"""
multipart_list = s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)
logger.info(f"Aborting{len(multipart_list)} uploads")
if "Uploads" in multipart_list:
return [
s3_obj.s3_client.abort_multipart_upload(
Bucket=bucketname, Key=object_key, UploadId=upload["UploadId"]
)
for upload in multipart_list["Uploads"]
]
else:
return None
def abort_multipart(s3_obj, bucketname, object_key, upload_id):
"""
Aborts a Multipart Upload for this Bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
Returns:
str : aborted upload id
"""
return s3_obj.s3_client.abort_multipart_upload(
Bucket=bucketname, Key=object_key, UploadId=upload_id
)
def put_bucket_policy(s3_obj, bucketname, policy):
"""
Adds bucket policy to a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
policy (str): Bucket policy in Json format
Returns:
dict : Bucket policy response
"""
return s3_obj.s3_client.put_bucket_policy(Bucket=bucketname, Policy=policy)
def get_bucket_policy(s3_obj, bucketname):
"""
Gets bucket policy from a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Get Bucket policy response
"""
return s3_obj.s3_client.get_bucket_policy(Bucket=bucketname)
def delete_bucket_policy(s3_obj, bucketname):
"""
Deletes bucket policy
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Delete Bucket policy response
"""
return s3_obj.s3_client.delete_bucket_policy(Bucket=bucketname)
def s3_put_object(s3_obj, bucketname, object_key, data, content_type=""):
"""
Simple Boto3 client based Put object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
data (str): string content to write to a new S3 object
content_type (str): Type of object data. eg: html, txt etc,
Returns:
dict : Put object response
"""
return s3_obj.s3_client.put_object(
Bucket=bucketname, Key=object_key, Body=data, ContentType=content_type
)
def s3_get_object(s3_obj, bucketname, object_key, versionid=""):
"""
Simple Boto3 client based Get object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
versionid (str): Unique version number of an object
Returns:
dict : Get object response
"""
return s3_obj.s3_client.get_object(
Bucket=bucketname, Key=object_key, VersionId=versionid
)
def s3_delete_object(s3_obj, bucketname, object_key, versionid=None):
"""
Simple Boto3 client based Delete object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
versionid (str): Unique version number of an object
Returns:
dict : Delete object response
"""
if versionid:
return s3_obj.s3_client.delete_object(
Bucket=bucketname, Key=object_key, VersionId=versionid
)
else:
return s3_obj.s3_client.delete_object(Bucket=bucketname, Key=object_key)
def s3_put_bucket_website(s3_obj, bucketname, website_config):
"""
Boto3 client based Put bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
website_config (dict): Website configuration info
Returns:
dict : PutBucketWebsite response
"""
return s3_obj.s3_client.put_bucket_website(
Bucket=bucketname, WebsiteConfiguration=website_config
)
def s3_get_bucket_website(s3_obj, bucketname):
"""
Boto3 client based Get bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : GetBucketWebsite response
"""
return s3_obj.s3_client.get_bucket_website(Bucket=bucketname)
def s3_delete_bucket_website(s3_obj, bucketname):
"""
Boto3 client based Delete bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : DeleteBucketWebsite response
"""
return s3_obj.s3_client.delete_bucket_website(Bucket=bucketname)
def s3_put_bucket_versioning(s3_obj, bucketname, status="Enabled", s3_client=None):
"""
Boto3 client based Put Bucket Versioning function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
status (str): 'Enabled' or 'Suspended'. Default 'Enabled'
s3_client : Any s3 client resource
Returns:
dict : PutBucketVersioning response
"""
if s3_client:
return s3_client.put_bucket_versioning(
Bucket=bucketname, VersioningConfiguration={"Status": status}
)
else:
return s3_obj.s3_client.put_bucket_versioning(
Bucket=bucketname, VersioningConfiguration={"Status": status}
)
def s3_get_bucket_versioning(s3_obj, bucketname, s3_client=None):
"""
Boto3 client based Get Bucket Versioning function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
s3_client: Any s3 client resource
Returns:
dict : GetBucketVersioning response
"""
if s3_client:
return s3_client.get_bucket_versioning(Bucket=bucketname)
else:
return s3_obj.s3_client.get_bucket_versioning(Bucket=bucketname)
def s3_list_object_versions(s3_obj, bucketname, prefix=""):
"""
Boto3 client based list object Versionfunction
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Object key prefix
Returns:
dict : List object version response
"""
return s3_obj.s3_client.list_object_versions(Bucket=bucketname, Prefix=prefix)
def s3_io_create_delete(mcg_obj, awscli_pod, bucket_factory):
"""
Running IOs on s3 bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
"""
target_dir = "/aws/" + uuid4().hex + "_original/"
downloaded_files = retrieve_test_objects_to_pod(awscli_pod, target_dir)
bucketname = bucket_factory(1)[0].name
uploaded_objects_paths = get_full_path_object(downloaded_files, bucketname)
write_individual_s3_objects(
mcg_obj,
awscli_pod,
bucket_factory,
downloaded_files,
target_dir,
bucket_name=bucketname,
)
del_objects(uploaded_objects_paths, awscli_pod, mcg_obj)
awscli_pod.exec_cmd_on_pod(command=f"rm -rf {target_dir}")
def del_objects(uploaded_objects_paths, awscli_pod, mcg_obj):
"""
Deleting objects from bucket
Args:
uploaded_objects_paths (list): List of object paths
awscli_pod (pod): A pod running the AWSCLI tools
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
"""
for uploaded_filename in uploaded_objects_paths:
logger.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(mcg_obj, "rm " + uploaded_filename),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def get_full_path_object(downloaded_files, bucket_name):
"""
Getting full of object in the bucket
Args:
downloaded_files (list): List of downloaded files
bucket_name (str): Name of the bucket
Returns:
uploaded_objects_paths (list) : List of full paths of objects
"""
uploaded_objects_paths = []
for uploaded_filename in downloaded_files:
uploaded_objects_paths.append(f"s3://{bucket_name}/{uploaded_filename}")
return uploaded_objects_paths
def obc_io_create_delete(mcg_obj, awscli_pod, bucket_factory):
"""
Running IOs on OBC interface
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
"""
dir = "/aws/" + uuid4().hex + "_original/"
downloaded_files = retrieve_test_objects_to_pod(awscli_pod, dir)
bucket_name = bucket_factory(amount=1, interface="OC")[0].name
mcg_bucket_path = f"s3://{bucket_name}/"
uploaded_objects_paths = get_full_path_object(downloaded_files, bucket_name)
sync_object_directory(awscli_pod, dir, mcg_bucket_path, mcg_obj)
del_objects(uploaded_objects_paths, awscli_pod, mcg_obj)
awscli_pod.exec_cmd_on_pod(command=f"rm -rf {dir}")
def retrieve_verification_mode():
if config.ENV_DATA["platform"].lower() == "ibm_cloud":
verify = True
elif config.DEPLOYMENT.get("use_custom_ingress_ssl_cert"):
verify = get_root_ca_cert()
else:
verify = constants.DEFAULT_INGRESS_CRT_LOCAL_PATH
logger.debug(f"verification: '{verify}'")
return verify
def namespace_bucket_update(mcg_obj, bucket_name, read_resource, write_resource):
"""
Edits MCG namespace bucket resources
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the bucket
read_resource (list): Resource names to provide read access
write_resource (str): Resource name to provide write access
"""
mcg_obj.send_rpc_query(
"bucket_api",
"update_bucket",
{
"name": bucket_name,
"namespace": {
"read_resources": read_resource,
"write_resource": write_resource,
},
},
)
def write_random_objects_in_pod(io_pod, file_dir, amount, pattern="ObjKey"):
"""
Uses /dev/urandom to create and write random files in a given
directory in a pod
Args:
io_pod (ocs_ci.ocs.ocp.OCP): The pod object in which the files should be
generated and written
file_dir (str): A string describing the path in which
to write the files to
amount (int): The amount of files to generate
pattern (str): The file name pattern to use
Returns:
list: A list with the names of all written objects
"""
obj_lst = []
for i in range(amount):
object_key = pattern + "-{}".format(i)
obj_lst.append(object_key)
io_pod.exec_cmd_on_pod(
f"dd if=/dev/urandom of={file_dir}/{object_key} bs=1M count=1 status=none"
)
return obj_lst
def setup_base_objects(awscli_pod, original_dir, result_dir, amount=2):
"""
Prepares two directories and populate one of them with objects
Args:
awscli_pod (Pod): A pod running the AWS CLI tools
original_dir (str): original directory name
result_dir (str): result directory name
amount (Int): Number of test objects to create
"""
awscli_pod.exec_cmd_on_pod(command=f"mkdir {original_dir} {result_dir}")
write_random_objects_in_pod(awscli_pod, original_dir, amount)
def check_cached_objects_by_name(mcg_obj, bucket_name, expected_objects_names=None):
"""
Check if the names of cached objects in a cache bucket are as expected using rpc call
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the cache bucket
expected_objects_names (list): Expected objects to be cached
Returns:
bool: True if all the objects exist in the cache as expected, False otherwise
"""
res = mcg_obj.send_rpc_query(
"object_api",
"list_objects",
{
"bucket": bucket_name,
},
).json()
list_objects_res = [name["key"] for name in res.get("reply").get("objects")]
if not expected_objects_names:
expected_objects_names = []
if set(expected_objects_names) == set(list_objects_res):
logger.info("Files cached as expected")
return True
logger.warning(
"Objects did not cache properly, \n"
f"Expected: [{expected_objects_names}]\n"
f"Cached: [{list_objects_res}]"
)
return False
def wait_for_cache(mcg_obj, bucket_name, expected_objects_names=None):
"""
wait for existing cache bucket to cache all required objects
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the cache bucket
expected_objects_names (list): Expected objects to be cached
"""
sample = TimeoutSampler(
timeout=60,
sleep=10,
func=check_cached_objects_by_name,
mcg_obj=mcg_obj,
bucket_name=bucket_name,
expected_objects_names=expected_objects_names,
)
if not sample.wait_for_func_status(result=True):
logger.error("Objects were not able to cache properly")
raise UnexpectedBehaviour
def compare_directory(awscli_pod, original_dir, result_dir, amount=2):
"""
Compares object checksums on original and result directories
Args:
awscli_pod (pod): A pod running the AWS CLI tools
original_dir (str): original directory name
result_dir (str): result directory name
amount (int): Number of test objects to create
"""
for i in range(amount):
file_name = f"ObjKey-{i}"
assert verify_s3_object_integrity(
original_object_path=f"{original_dir}/{file_name}",
result_object_path=f"{result_dir}/{file_name}",
awscli_pod=awscli_pod,
), "Checksum comparision between original and result object failed"
def s3_copy_object(s3_obj, bucketname, source, object_key):
"""
Boto3 client based copy object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
source (str): Source object key. eg: '<bucket>/<key>
object_key (str): Unique object Identifier for copied object
Returns:
dict : Copy object response
"""
return s3_obj.s3_client.copy_object(
Bucket=bucketname, CopySource=source, Key=object_key
)
def s3_upload_part_copy(
s3_obj, bucketname, copy_source, object_key, part_number, upload_id
):
"""
Boto3 client based upload_part_copy operation
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
copy_source (str): Name of the source bucket and key name. {bucket}/{key}
part_number (int): Part number
upload_id (str): Upload Id
object_key (str): Unique object Identifier for copied object
Returns:
dict : upload_part_copy response
"""
return s3_obj.s3_client.upload_part_copy(
Bucket=bucketname,
CopySource=copy_source,
Key=object_key,
PartNumber=part_number,
UploadId=upload_id,
)
def s3_get_object_acl(s3_obj, bucketname, object_key):
"""
Boto3 client based get_object_acl operation
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier for copied object
Returns:
dict : get object acl response
"""
return s3_obj.s3_client.get_object_acl(Bucket=bucketname, Key=object_key)
def s3_head_object(s3_obj, bucketname, object_key, if_match=None):
"""
Boto3 client based head_object operation to retrieve only metadata
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier for copied object
if_match (str): Return the object only if its entity tag (ETag)
is the same as the one specified,
Returns:
dict : head object response
"""
if if_match:
return s3_obj.s3_client.head_object(
Bucket=bucketname, Key=object_key, IfMatch=if_match
)
else:
return s3_obj.s3_client.head_object(Bucket=bucketname, Key=object_key)
def s3_list_objects_v1(
s3_obj, bucketname, prefix="", delimiter="", max_keys=1000, marker=""
):
"""
Boto3 client based list object version1
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Limits the response to keys that begin with the specified prefix.
delimiter (str): Character used to group keys.
max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys.
marker (str): key to start with when listing objects in a bucket.
Returns:
dict : list object v1 response
"""
return s3_obj.s3_client.list_objects(
Bucket=bucketname,
Prefix=prefix,
Delimiter=delimiter,
MaxKeys=max_keys,
Marker=marker,
)
def s3_list_objects_v2(
s3_obj,
bucketname,
prefix="",
delimiter="",
max_keys=1000,
con_token="",
fetch_owner=False,
):
"""
Boto3 client based list object version2
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Limits the response to keys that begin with the specified prefix.
delimiter (str): Character used to group keys.
max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys.
con_token (str): Token used to continue the list
fetch_owner (bool): Unique object Identifier
Returns:
dict : list object v2 response
"""
return s3_obj.s3_client.list_objects_v2(
Bucket=bucketname,
Prefix=prefix,
Delimiter=delimiter,
MaxKeys=max_keys,
ContinuationToken=con_token,
FetchOwner=fetch_owner,
)
def s3_delete_objects(s3_obj, bucketname, object_keys):
"""
Boto3 client based delete objects
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_keys (list): The objects to delete. Format: {'Key': 'object_key', 'VersionId': ''}
Returns:
dict : delete objects response
"""
return s3_obj.s3_client.delete_objects(
Bucket=bucketname, Delete={"Objects": object_keys}
)
def bucket_read_api(mcg_obj, bucket_name):
"""
Fetches the bucket metadata like size, tiers etc
Args:
mcg_obj (obj): MCG object
bucket_name (str): Name of the bucket
Returns:
dict : Bucket policy response
"""
resp = mcg_obj.send_rpc_query(
"bucket_api", "read_bucket", params={"name": bucket_name}
)
bucket_read_resp = resp.json().get("reply")
return bucket_read_resp
def get_bucket_available_size(mcg_obj, bucket_name):
"""
Function to get the bucket available size
Args:
mcg_obj (obj): MCG object
bucket_name (str): Name of the bucket
Returns:
int : Available size in the bucket
"""
resp = bucket_read_api(mcg_obj, bucket_name)
bucket_size = resp["storage"]["values"]["free"]
return bucket_size
def compare_bucket_object_list(mcg_obj, first_bucket_name, second_bucket_name):
"""
Compares the object lists of two given buckets
Args:
mcg_obj (MCG): An initialized MCG object
first_bucket_name (str): The name of the first bucket to compare
second_bucket_name (str): The name of the second bucket to compare
Returns:
bool: True if both buckets contain the same object names in all objects,
False otherwise
"""
def _comparison_logic():
first_bucket_object_set = {
obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(first_bucket_name)
}
second_bucket_object_set = {
obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(second_bucket_name)
}
if first_bucket_object_set == second_bucket_object_set:
logger.info("Objects in both buckets are identical")
return True
else:
logger.warning(
f"""Buckets {first_bucket_name} and {second_bucket_name} do not contain the same objects.
{first_bucket_name} objects:
{first_bucket_object_set}
{second_bucket_name} objects:
{second_bucket_object_set}
"""
)
return False
try:
for comparison_result in TimeoutSampler(600, 30, _comparison_logic):
if comparison_result:
return True
except TimeoutExpiredError:
logger.error(
"The compared buckets did not contain the same set of objects after ten minutes"
)
return False
def write_random_test_objects_to_bucket(
io_pod,
bucket_to_write,
file_dir,
amount=1,
mcg_obj=None,
s3_creds=None,
):
"""
Write files generated by /dev/urandom to a bucket
Args:
io_pod (ocs_ci.ocs.ocp.OCP): The pod which should handle all needed IO operations
bucket_to_write (str): The bucket name to write the random files to
file_dir (str): The path to the folder where all random files will be
generated and copied from
amount (int, optional): The amount of random objects to write. Defaults to 1.
mcg_obj (MCG, optional): An MCG class instance
s3_creds (dict, optional): A dictionary containing S3-compatible credentials
for writing objects directly to buckets outside of the MCG. Defaults to None.
Returns:
list: A list containing the names of the random files that were written
"""
full_object_path = f"s3://{bucket_to_write}"
obj_lst = write_random_objects_in_pod(io_pod, file_dir, amount)
sync_object_directory(
io_pod,
file_dir,
full_object_path,
s3_obj=mcg_obj,
signed_request_creds=s3_creds,
)
return obj_lst
|
paper_experiments/utils/tracker.py | noskill/JRMOT_ROS | 112 | 12746447 | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
import pdb
from . import kf_2d, kf_3d, double_measurement_kf, imm
from . import linear_assignment
from . import iou_matching
from .track import Track
from . import JPDA_matching
from . import tracking_utils
import math
from nn_matching import NearestNeighborDistanceMetric
import cv2
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : EKF.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, max_age=5, n_init=3,
JPDA=False, m_best_sol=1, assn_thresh=0.0,
matching_strategy=None,
kf_appearance_feature=None,
gate_full_state=False, lstm = None, cuda = False, appearance_model = None,
calib = None, kf_vel_params=(1./20, 1./160, 1, 1, 2), dummy_node_cost_iou=0.4, dummy_node_cost_app=0.2, nn_budget = None, use_imm=False, kf_walk_params=(1./20, 1./160, 1, 1, 2),
markov=(0.9, 0.7), uncertainty_limit=1.8, optical_flow=False, gate_limit=400):
self.max_age = max_age
self.n_init = n_init
self.metric = NearestNeighborDistanceMetric("euclidean", nn_budget)
if not use_imm:
self.kf = kf_2d.KalmanFilter2D(*kf_vel_params, gate_limit)
self.use_imm = False
else:
self.kf = imm.IMMFilter2D(kf_vel_params, kf_walk_params, markov=markov)
self.use_imm = True
self.tracks = []
self._next_id = 1
self.JPDA = JPDA
self.m_best_sol = m_best_sol
self.assn_thresh = assn_thresh
self.matching_strategy = matching_strategy
self.kf_appearance_feature = kf_appearance_feature
self.gate_only_position = not gate_full_state
self.lstm = lstm
self.cuda = cuda
self.dummy_node_cost_app = dummy_node_cost_app
self.dummy_node_cost_iou = dummy_node_cost_iou
self.appearance_model = appearance_model
self.prev_frame = None
self.uncertainty_limit = uncertainty_limit
self.optical_flow = optical_flow
# @profile
def gated_metric(self, tracks, dets, track_indices, detection_indices, compare_2d = False):
targets = np.array([tracks[i].track_id for i in track_indices])
if not compare_2d and self.metric.check_samples(targets):
compare_2d = True
if compare_2d:
features = np.array([dets[i].appearance_feature for i in detection_indices])
else:
features = np.array([dets[i].feature for i in detection_indices])
#cost_matrix = self.metric.distance(features, targets, compare_2d)
cost_matrix_appearance = self.metric.distance_torch(features, targets, compare_2d)
cost_matrix_iou = iou_matching.iou_cost(tracks, dets, track_indices, detection_indices)
gate_mask = linear_assignment.gate_cost_matrix(
self.kf, tracks, dets, track_indices,
detection_indices, only_position=self.gate_only_position)
cost_matrix = np.dstack((cost_matrix_appearance, cost_matrix_iou))
return cost_matrix, gate_mask
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
# @profile
def update(self, cur_frame, detections, compare_2d = False):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
self.cur_frame = cv2.cvtColor((255*cur_frame).permute(1,2,0).cpu().numpy(), cv2.COLOR_BGR2GRAY)
matches, unmatched_tracks, unmatched_detections = \
self._match(detections, compare_2d)
# update filter for each assigned track
# Only do this for non-JPDA because in JPDA the kf states are updated
# during the matching process
if not self.JPDA:
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks:
track_detection_map[t] = -1
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(self.kf, detections,
detection_idx=detection_idx, JPDA=self.JPDA,
cur_frame = self.cur_frame, appearance_model = self.appearance_model,
lstm = self.lstm)
# update track state for unmatched tracks
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
# create new tracks
self.prune_tracks()
flow = None
if unmatched_detections:
if self.optical_flow and self.prev_frame is not None:
flow = cv2.calcOpticalFlowFarneback(self.prev_frame, self.cur_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], flow)
# Update distance metric.
active_targets = [t.track_id for t in self.tracks]
features, features_2d, targets, targets_2d = [], [], [], []
for track in self.tracks:
features += track.features
features_2d += track.features_2d
targets += [track.track_id for _ in track.features]
targets_2d += [track.track_id for _ in track.features_2d]
track.features = []
track.features_2d = []
self.metric.partial_fit(
np.asarray(features), np.asarray(features_2d), np.asarray(targets), np.asarray(targets_2d), active_targets)
self.prev_frame = self.cur_frame
# @profile
def _match(self, detections, compare_2d):
# Associate all tracks using combined cost matrices.
if self.JPDA:
# Run JPDA on all tracks
marginalizations = \
linear_assignment.JPDA(self.gated_metric, self.dummy_node_cost_app, self.dummy_node_cost_iou, self.tracks, \
detections, m=self.m_best_sol, compare_2d = compare_2d)
# for track in self.tracks: #TODO: REMOVE
# print(track.track_id)
# print(marginalizations)
jpda_matcher = JPDA_matching.Matcher(
detections, marginalizations, range(len(self.tracks)),
self.matching_strategy, assignment_threshold=self.assn_thresh)
matches_a, unmatched_tracks_a, unmatched_detections = jpda_matcher.match()
# Map matched tracks to detections
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches_a}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks_a:
track_detection_map[t] = -1
# update Kalman state
if marginalizations.shape[0] > 0:
for i in range(len(self.tracks)):
self.tracks[i].update(self.kf, detections,
marginalization=marginalizations[i,:], detection_idx=track_detection_map[i],
JPDA=self.JPDA, cur_frame = self.cur_frame, appearance_model = self.appearance_model, lstm = self.lstm)
else:
confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed()]
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
self.gated_metric, self.dummy_node_cost_iou, self.max_age,
self.tracks, detections, confirmed_tracks, compare_2d = compare_2d)
return matches_a, unmatched_tracks_a, unmatched_detections
def _initiate_track(self, detection, flow=None):
if self.use_imm:
mean, covariance, model_probabilities = self.kf.initiate(detection.to_xywh(), flow)
else:
mean, covariance = self.kf.initiate(detection.to_xywh(), flow)
model_probabilities = None
self.tracks.append(Track(
mean, covariance, model_probabilities, self._next_id, self.n_init, self.max_age,
kf_appearance_feature = self.kf_appearance_feature,
feature=detection.feature, appearance_feature = detection.appearance_feature,
cuda = self.cuda, lstm = self.lstm, last_det = detection))
self._next_id += 1
def prune_tracks(self):
h, w = self.cur_frame.shape
for track in self.tracks:
# Check if track is leaving
if self.use_imm:
predicted_mean, predicted_cov = self.kf.combine_states(track.mean, track.covariance, track.model_probabilities) #TODO: This doesn't predict. Mean should def predict
else:
predicted_mean = self.kf.predict_mean(track.mean)
predicted_cov = track.covariance
predicted_pos = predicted_mean[:2]
predicted_vel = predicted_mean[4:6]
predicted_pos[0] -= w/2
predicted_pos[1] -= h/2
cos_theta = np.dot(predicted_pos, predicted_vel)/(np.linalg.norm(predicted_pos)*
np.linalg.norm(predicted_vel) + 1e-6)
predicted_pos[0] += w/2
predicted_pos[1] += h/2
# Thresholds for deciding whether track is outside image
BORDER_VALUE = 0
if (cos_theta > 0 and
(predicted_pos[0] - track.mean[2]/2<= BORDER_VALUE or
predicted_pos[0] + track.mean[2]/2 >= w - BORDER_VALUE)):
if track.is_exiting() and not track.matched:
track.delete_track()
else:
track.mark_exiting()
# Check if track is too uncertain
# cov_axis,_ = np.linalg.eigh(predicted_cov)
# if np.abs(np.sqrt(cov_axis[-1]))*6 > self.uncertainty_limit*np.linalg.norm(predicted_mean[2:4]):
# track.delete_track()
self.tracks = [t for t in self.tracks if not t.is_deleted()]
|
zarr/tests/test_info.py | parthxtripathi/zarr-python | 203 | 12746462 | import numcodecs
import pytest
import zarr
from zarr.util import InfoReporter
@pytest.mark.parametrize('array_size', [10, 15000])
def test_info(array_size):
# setup
g = zarr.group(store=dict(), chunk_store=dict(),
synchronizer=zarr.ThreadSynchronizer())
g.create_group('foo')
z = g.zeros('bar', shape=array_size, filters=[numcodecs.Adler32()])
# test group info
items = g.info_items()
keys = sorted([k for k, _ in items])
expected_keys = sorted([
'Type', 'Read-only', 'Synchronizer type', 'Store type', 'Chunk store type',
'No. members', 'No. arrays', 'No. groups', 'Arrays', 'Groups', 'Name'
])
assert expected_keys == keys
# can also get a string representation of info via the info attribute
assert isinstance(g.info, InfoReporter)
assert "Type" in repr(g.info)
# test array info
items = z.info_items()
keys = sorted([k for k, _ in items])
expected_keys = sorted([
'Type', 'Data type', 'Shape', 'Chunk shape', 'Order', 'Read-only', 'Filter [0]',
'Compressor', 'Synchronizer type', 'Store type', 'Chunk store type', 'No. bytes',
'No. bytes stored', 'Storage ratio', 'Chunks initialized', 'Name'
])
assert expected_keys == keys
# can also get a string representation of info via the info attribute
assert isinstance(z.info, InfoReporter)
assert "Type" in repr(z.info)
|
test/functional/tests/io_class/io_class_common.py | Ostrokrzew/open-cas-linux | 139 | 12746476 | <filename>test/functional/tests/io_class/io_class_common.py
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas import casadm
from api.cas import ioclass_config
from api.cas.cache_config import (
CacheLineSize,
CacheMode,
CleaningPolicy,
SeqCutOffPolicy,
)
from core.test_run import TestRun
from test_tools.dd import Dd
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev, sync
from test_utils.os_utils import drop_caches, DropCachesMode
from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
def prepare(
cache_size=Size(10, Unit.GibiByte),
core_size=Size(40, Unit.GibiByte),
cache_mode=CacheMode.WB,
cache_line_size=CacheLineSize.LINE_4KiB,
default_allocation="0.00"
):
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([cache_size])
core_device.create_partitions([core_size])
cache_device = cache_device.partitions[0]
core_device = core_device.partitions[0]
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(
cache_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
Udev.disable()
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core device")
core = casadm.add_core(cache, core_dev=core_device)
TestRun.LOGGER.info(f"Setting seq cutoff policy to never")
core.set_seq_cutoff_policy(SeqCutOffPolicy.never)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_config.add_ioclass(
ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID,
eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
allocation=default_allocation,
rule=ioclass_config.DEFAULT_IO_CLASS_RULE,
ioclass_config_path=ioclass_config_path,
)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, core
def get_io_class_occupancy(cache, io_class_id, percent=False):
return get_io_class_usage(cache, io_class_id, percent).occupancy
def get_io_class_dirty(cache, io_class_id):
return get_io_class_usage(cache, io_class_id).dirty
def get_io_class_usage(cache, io_class_id, percent=False):
return cache.get_io_class_statistics(
io_class_id=io_class_id, percentage_val=percent
).usage_stats
def run_io_dir(path, size_4k, offset=0):
dd = (
Dd()
.input("/dev/zero")
.output(f"{path}")
.count(size_4k)
.block_size(Size(1, Unit.Blocks4096))
.seek(offset)
)
TestRun.LOGGER.info(f"{dd}")
output = dd.run()
if output.exit_code != 0:
TestRun.fail(f"Failed to execute dd.\n {output.stdout}\n{output.stderr}")
sync()
drop_caches(DropCachesMode.ALL)
def run_io_dir_read(path):
dd = Dd().output("/dev/null").input(f"{path}")
output = dd.run()
if output.exit_code != 0:
TestRun.fail(f"Failed to execute dd.\n {output.stdout}\n{output.stderr}")
sync()
drop_caches(DropCachesMode.ALL)
def run_fio_count(core, blocksize, num_ios):
(
Fio()
.create_command()
.target(core)
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randread)
.block_size(blocksize)
.direct()
.file_size(Size(10, Unit.GibiByte))
.num_ios(num_ios)
.run()
)
|
salt/modules/rebootmgr.py | tomdoherty/salt | 9,425 | 12746483 | """
:maintainer: <NAME> <<EMAIL>>
:maturity: new
:depends: None
:platform: Linux
.. versionadded:: 3004
"""
import logging
import re
import salt.exceptions
log = logging.getLogger(__name__)
def __virtual__():
"""rebootmgrctl command is required."""
if __utils__["path.which"]("rebootmgrctl") is not None:
return True
else:
return (False, "Module rebootmgt requires the command rebootmgrctl")
def _cmd(cmd, retcode=False):
"""Utility function to run commands."""
result = __salt__["cmd.run_all"](cmd)
if retcode:
return result["retcode"]
if result["retcode"]:
raise salt.exceptions.CommandExecutionError(result["stderr"])
return result["stdout"]
def version():
"""Return the version of rebootmgrd
CLI Example:
.. code-block:: bash
salt microos rebootmgr version
"""
cmd = ["rebootmgrctl", "--version"]
return _cmd(cmd).split()[-1]
def is_active():
"""Check if the rebootmgrd is running and active or not.
CLI Example:
.. code-block:: bash
salt microos rebootmgr is_active
"""
cmd = ["rebootmgrctl", "is_active", "--quiet"]
return _cmd(cmd, retcode=True) == 0
def reboot(order=None):
"""Tells rebootmgr to schedule a reboot.
With the [now] option, a forced reboot is done, no lock from etcd
is requested and a set maintenance window is ignored. With the
[fast] option, a lock from etcd is requested if needed, but a
defined maintenance window is ignored.
order
If specified, can be "now" or "fast"
CLI Example:
.. code-block:: bash
salt microos rebootmgr reboot
salt microos rebootmgt reboot order=now
"""
if order and order not in ("now", "fast"):
raise salt.exceptions.CommandExecutionError(
"Order parameter, if specified, must be 'now' or 'fast'"
)
cmd = ["rebootmgrctl", "reboot"]
if order:
cmd.append(order)
return _cmd(cmd)
def cancel():
"""Cancels an already running reboot.
CLI Example:
.. code-block:: bash
salt microos rebootmgr cancel
"""
cmd = ["rebootmgrctl", "cancel"]
return _cmd(cmd)
def status():
"""Returns the current status of rebootmgrd.
Valid returned values are:
0 - No reboot requested
1 - Reboot requested
2 - Reboot requested, waiting for maintenance window
3 - Reboot requested, waiting for etcd lock.
CLI Example:
.. code-block:: bash
salt microos rebootmgr status
"""
cmd = ["rebootmgrctl", "status", "--quiet"]
return _cmd(cmd, retcode=True)
def set_strategy(strategy=None):
"""A new strategy to reboot the machine is set and written into
/etc/rebootmgr.conf.
strategy
If specified, must be one of those options:
best-effort - This is the default strategy. If etcd is
running, etcd-lock is used. If no etcd is running, but a
maintenance window is specified, the strategy will be
maint-window. If no maintenance window is specified, the
machine is immediately rebooted (instantly).
etcd-lock - A lock at etcd for the specified lock-group will
be acquired before reboot. If a maintenance window is
specified, the lock is only acquired during this window.
maint-window - Reboot does happen only during a specified
maintenance window. If no window is specified, the
instantly strategy is followed.
instantly - Other services will be informed that a reboot will
happen. Reboot will be done without getting any locks or
waiting for a maintenance window.
off - Reboot requests are temporary
ignored. /etc/rebootmgr.conf is not modified.
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_strategy stragegy=off
"""
if strategy and strategy not in (
"best-effort",
"etcd-lock",
"maint-window",
"instantly",
"off",
):
raise salt.exceptions.CommandExecutionError("Strategy parameter not valid")
cmd = ["rebootmgrctl", "set-strategy"]
if strategy:
cmd.append(strategy)
return _cmd(cmd)
def get_strategy():
"""The currently used reboot strategy of rebootmgrd will be printed.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_strategy
"""
cmd = ["rebootmgrctl", "get-strategy"]
return _cmd(cmd).split(":")[-1].strip()
def set_window(time, duration):
"""Set's the maintenance window.
time
The format of time is the same as described in
systemd.time(7).
duration
The format of duration is "[XXh][YYm]".
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_window time="Thu,Fri 2020-*-1,5 11:12:13" duration=1h
"""
cmd = ["rebootmgrctl", "set-window", time, duration]
return _cmd(cmd)
def get_window():
"""The currently set maintenance window will be printed.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_window
"""
cmd = ["rebootmgrctl", "get-window"]
window = _cmd(cmd)
return dict(
zip(
("time", "duration"),
re.search(
r"Maintenance window is set to (.*), lasting (.*).", window
).groups(),
)
)
def set_group(group):
"""Set the group, to which this machine belongs to get a reboot lock
from etcd.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_group group=group_1
"""
cmd = ["rebootmgrctl", "set-group", group]
return _cmd(cmd)
def get_group():
"""The currently set lock group for etcd.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_group
"""
cmd = ["rebootmgrctl", "get-group"]
group = _cmd(cmd)
return re.search(r"Etcd lock group is set to (.*)", group).groups()[0]
def set_max(max_locks, group=None):
"""Set the maximal number of hosts in a group, which are allowed to
reboot at the same time.
number
Maximal number of hosts in a group
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_max 4
"""
cmd = ["rebootmgrctl", "set-max"]
if group:
cmd.extend(["--group", group])
cmd.append(max_locks)
return _cmd(cmd)
def lock(machine_id=None, group=None):
"""Lock a machine. If no group is specified, the local default group
will be used. If no machine-id is specified, the local machine
will be locked.
machine_id
The machine-id is a network wide, unique ID. Per default the
ID from /etc/machine-id is used.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr lock group=group1
"""
cmd = ["rebootmgrctl", "lock"]
if group:
cmd.extend(["--group", group])
if machine_id:
cmd.append(machine_id)
return _cmd(cmd)
def unlock(machine_id=None, group=None):
"""Unlock a machine. If no group is specified, the local default group
will be used. If no machine-id is specified, the local machine
will be locked.
machine_id
The machine-id is a network wide, unique ID. Per default the
ID from /etc/machine-id is used.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr unlock group=group1
"""
cmd = ["rebootmgrctl", "unlock"]
if group:
cmd.extend(["--group", group])
if machine_id:
cmd.append(machine_id)
return _cmd(cmd)
|
tests/test_perspective_queue.py | sguzman/castero | 483 | 12746494 | <reponame>sguzman/castero<filename>tests/test_perspective_queue.py
import os
from unittest import mock
from castero.config import Config
from castero.episode import Episode
from castero.feed import Feed
from castero.player import Player
from castero.queue import Queue
my_dir = os.path.dirname(os.path.realpath(__file__))
feed = Feed(file=my_dir + "/feeds/valid_basic.xml")
episode = Episode(
feed,
title="episode title",
description="episode description",
link="episode link",
pubdate="episode pubdate",
copyright="episode copyright",
enclosure="episode enclosure",
)
player1 = Player("MLK Dream", my_dir + "/media/MLK_Dream_10s.mp3", episode)
player2 = Player("MLK Dream", my_dir + "/media/MLK_Dream_10s.mp3", episode)
player3 = Player("MLK Dream", my_dir + "/media/MLK_Dream_10s.mp3", episode)
queue = Queue(mock.MagicMock())
queue.add(player1)
queue.add(player2)
def get_queue_perspective(display):
"""Retrieve the Queue perspective.
:param display the display containing the loaded perspective
:returns Queue: the loaded Queue perspective
"""
display._active_perspective = 2
return display.perspectives[2]
def test_perspective_queue_borders(display):
perspective = get_queue_perspective(display)
display.display()
assert perspective._queue_window.hline.call_count == 1
assert perspective._queue_window.vline.call_count == 1
assert perspective._metadata_window.hline.call_count == 1
display._stdscr.reset_mock()
def test_perspective_queue_display_episode_metadata(display):
perspective = get_queue_perspective(display)
display._queue = queue
perspective._draw_metadata = mock.MagicMock()
display.display()
perspective._draw_metadata.assert_called_with(perspective._metadata_window)
display._stdscr.reset_mock()
def test_perspective_queue_input_keys(display):
perspective = get_queue_perspective(display)
display._queue = queue
display._footer_window.getch = mock.MagicMock(return_value=10)
ret_val = perspective.handle_input(ord("h"))
assert ret_val
display._stdscr.reset_mock()
movement_keys = [
display.KEY_MAPPING[Config["key_up"]],
display.KEY_MAPPING[Config["key_right"]],
display.KEY_MAPPING[Config["key_down"]],
display.KEY_MAPPING[Config["key_left"]],
display.KEY_MAPPING[Config["key_scroll_up"]],
display.KEY_MAPPING[Config["key_scroll_down"]],
]
for key in movement_keys:
perspective._metadata_updated = True
ret_val = perspective.handle_input(key)
assert ret_val
assert not perspective._metadata_updated
operation_keys = [
display.KEY_MAPPING[Config["key_delete"]],
display.KEY_MAPPING[Config["key_remove"]],
display.KEY_MAPPING[Config["key_reload"]],
display.KEY_MAPPING[Config["key_reload_selected"]],
display.KEY_MAPPING[Config["key_play_selected"]],
display.KEY_MAPPING[Config["key_add_selected"]],
display.KEY_MAPPING[Config["key_clear"]],
display.KEY_MAPPING[Config["key_next"]],
display.KEY_MAPPING[Config["key_pause_play"]],
display.KEY_MAPPING[Config["key_pause_play_alt"]],
display.KEY_MAPPING[Config["key_seek_forward"]],
display.KEY_MAPPING[Config["key_seek_forward_alt"]],
display.KEY_MAPPING[Config["key_seek_backward"]],
display.KEY_MAPPING[Config["key_seek_backward_alt"]],
display.KEY_MAPPING[Config["key_execute"]],
]
for key in operation_keys:
ret_val = perspective.handle_input(key)
assert ret_val
ret_val = perspective.handle_input(ord("q"))
assert not ret_val
display._stdscr.reset_mock()
def test_perspective_queue_draw_metadata(display):
perspective = get_queue_perspective(display)
display.database.replace_feed(feed)
display.database.replace_episodes(feed, [episode])
display.menus_valid = False
perspective._draw_metadata(perspective._metadata_window)
perspective._draw_metadata(perspective._metadata_window)
def test_perspective_queue_get_active_menu(display):
perspective = get_queue_perspective(display)
perspective._active_window = 0
assert perspective._get_active_menu() == perspective._queue_menu
def test_perspective_queue_remove_selected_first(display):
perspective = get_queue_perspective(display)
perspective._queue_menu = mock.MagicMock()
perspective._queue_menu.item = player1
queue1 = Queue(display)
queue1.add(player1)
queue1.add(player2)
queue1.add(player3)
display._queue = queue1
perspective._remove_selected_from_queue()
assert queue1.first == player2
assert queue1.length == 2
def test_perspective_queue_remove_selected_middle(display):
perspective = get_queue_perspective(display)
perspective._queue_menu = mock.MagicMock()
perspective._queue_menu.item = player2
queue1 = Queue(display)
queue1.add(player1)
queue1.add(player2)
queue1.add(player3)
display._queue = queue1
perspective._remove_selected_from_queue()
assert queue1.first == player1
assert queue1.length == 2
|
media/mca/structgen.py | rio-31/android_frameworks_base-1 | 164 | 12746510 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
hFileTemplate = """/**
* This file is auto-generated by platform/system/media/mca/structgen.py! Do NOT modify!
**/
#ifndef %s
#define %s
%s
#endif // %s
"""
jniFileTemplate = """/**
* This file is auto-generated by platform/system/media/mca/structgen.py! Do NOT modify!
**/
#include <stdint.h>
#include "native/%s.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "jni.h"
// Helper functions ////////////////////////////////////////////////////////////////////////////////
%s* Get%sAtIndex(JNIEnv* env, jobject buffer, int index) {
jclass base_class = (*env)->FindClass(env, "android/filterfw/core/NativeBuffer");
jfieldID ptr_field = (*env)->GetFieldID(env, base_class, "mDataPointer", "J");
uintptr_t data_ptr = (*env)->GetLongField(env, buffer, ptr_field);
%s* array = (%s*)data_ptr;
(*env)->DeleteLocalRef(env, base_class);
return &array[index];
}
// Declarations ////////////////////////////////////////////////////////////////////////////////////
JNIEXPORT jint JNICALL
Java_%s_getElementSize(JNIEnv* env, jobject thiz);
%s
#ifdef __cplusplus
}
#endif
// Implementation //////////////////////////////////////////////////////////////////////////////////
jint Java_%s_getElementSize(JNIEnv* env, jobject thiz) {
return sizeof(%s);
}
%s
"""
javaFileTemplate = """/**
* This file is auto-generated by platform/system/media/mca/structgen.py! Do NOT modify!
**/
package %s;
import android.filterfw.core.NativeBuffer;
%s
"""
def ToJavaName(cname, start_upper_at = 1):
lower = cname.split("_")
upper = [c.title() for c in lower]
return "".join(lower[:start_upper_at] + upper[start_upper_at:])
def ToJNIPackage(package, jclassname):
return "%s_%s" % (package.replace(".", "_"), jclassname)
def ToMacroDefName(cname, pname):
return "%s_%s" % (pname.replace(".", "_").upper(), cname.upper())
class ParseError:
def __init__(self, lineno, message):
self.lineno = lineno
self.message = message
def __str__(self):
return "On line %d: %s" % (self.lineno, self.message)
class FieldType_BasePOD:
def __init__(self, name, structname, jclassname, package, javatype, ctype, jtype, defval):
self.name = name
self.structname = structname
self.jclassname = jclassname
self.package = package
self.javatype = javatype
self.ctype = ctype
self.jtype = jtype
self.defval = defval
def cString(self):
return " %s %s;" % (self.ctype, self.name)
def javaGetter(self):
return " public %s get%s(int index) {\n"\
" assertReadable();\n"\
" return nativeGet%s(index);\n"\
" }" % (self.javatype, ToJavaName(self.name, 0), ToJavaName(self.name, 0))
def javaSetter(self):
return " public void set%s(int index, %s value) {\n"\
" assertWritable();\n"\
" nativeSet%s(index, value);\n"\
" }" % (ToJavaName(self.name, 0), self.javatype, ToJavaName(self.name, 0))
def javaNativeGetter(self):
return " private native %s nativeGet%s(int index);"\
% (self.javatype, ToJavaName(self.name, 0))
def javaNativeSetter(self):
return " private native boolean nativeSet%s(int index, %s value);"\
% (ToJavaName(self.name, 0), self.javatype)
def jniGetterDefString(self):
return "JNIEXPORT %s JNICALL\n" \
"Java_%s_nativeGet%s(JNIEnv* env, jobject thiz, jint index);" \
% (self.jtype, ToJNIPackage(self.package, self.jclassname), ToJavaName(self.name, 0))
def jniGetterImplString(self):
return \
"%s Java_%s_nativeGet%s(JNIEnv* env, jobject thiz, jint index) {\n"\
" %s* instance = Get%sAtIndex(env, thiz, index);\n"\
" return instance ? instance->%s : %s;\n"\
"}\n" % (self.jtype, ToJNIPackage(self.package, self.jclassname), ToJavaName(self.name, 0),\
self.structname, self.structname, self.name, self.defval)
def jniSetterDefString(self):
return "JNIEXPORT jboolean JNICALL\n" \
"Java_%s_nativeSet%s(JNIEnv* env, jobject thiz, jint index, %s value);" \
% (ToJNIPackage(self.package, self.jclassname), ToJavaName(self.name, 0), self.jtype)
def jniSetterImplString(self):
return \
"jboolean Java_%s_nativeSet%s(JNIEnv* env, jobject thiz, jint index, %s value) {\n"\
" %s* instance = Get%sAtIndex(env, thiz, index);\n"\
" if (instance) {\n"\
" instance->%s = value;\n"\
" return JNI_TRUE;\n"\
" }\n"\
" return JNI_FALSE;\n"\
"}\n" % (ToJNIPackage(self.package, self.jclassname), ToJavaName(self.name, 0),\
self.jtype, self.structname, self.structname, self.name)
class FieldType_Float(FieldType_BasePOD):
def __init__(self, name, structname, jclassname, package):
FieldType_BasePOD.__init__(self, name, structname, jclassname, package, "float", "float", "jfloat", "0.0")
class FieldType_Int(FieldType_BasePOD):
def __init__(self, name, structname, jclassname, package):
FieldType_BasePOD.__init__(self, name, structname, jclassname, package, "int", "int", "jint", "0")
class FieldType_Long(FieldType_BasePOD):
def __init__(self, name, structname, jclassname, package):
FieldType_BasePOD.__init__(self, name, structname, jclassname, package, "long", "long long", "jlong", "0")
class StructSpec:
def parseTextFile(self, filepath):
# Init
self.name = None
self.package = None
self.fields = []
self.structname = None
self.jclassname = None
self.libname = None
# Open the file
txtfile = open(filepath)
# Parse it line by line
lineno = 0
for line in txtfile:
# Split line into components
linecomps = line.split()
if len(linecomps) == 0:
continue
# Execute command
cmd = linecomps[0]
if cmd == "@name":
self.commandArgAssert(linecomps, 1, lineno)
self.name = linecomps[1]
if not self.structname:
self.structname = self.name
if not self.jclassname:
self.jclassname = self.name
elif cmd == "@package":
self.commandArgAssert(linecomps, 1, lineno)
self.package = linecomps[1]
elif cmd == "@libname":
self.commandArgAssert(linecomps, 1, lineno)
self.libname = linecomps[1]
elif cmd == "@structname":
self.commandArgAssert(linecomps, 1, lineno)
self.structname = linecomps[1]
elif cmd == "@javaclassname":
self.commandArgAssert(linecomps, 1, lineno)
self.jclassname = linecomps[1]
elif cmd == "@field":
self.commandArgAssert(linecomps, 2, lineno)
typestr = linecomps[1]
if typestr == "int":
fieldtype = FieldType_Int(linecomps[2], self.structname, self.jclassname, self.package)
elif typestr == "long":
fieldtype = FieldType_Long(linecomps[2], self.structname, self.jclassname, self.package)
elif typestr == "float":
fieldtype = FieldType_Float(linecomps[2], self.structname, self.jclassname, self.package)
else:
raise ParseError(lineno, "Unknown field type '%s'!" % typestr)
self.fields.append(fieldtype)
else:
raise ParseError(lineno, "Unknown command: '%s'!" % cmd)
lineno = lineno + 1
# Make sure we have all required info
if not self.name:
raise ParseError(lineno, "Required field '@name' missing!")
elif not self.package:
raise ParseError(lineno, "Required field '@package' missing!")
elif not self.libname:
raise ParseError(lineno, "Required field '@libname' missing!")
# Normalize values
if self.libname[:3] == "lib":
self.libname = self.libname[3:]
def commandArgAssert(self, linecomps, expectedcount, lineno):
foundcount = len(linecomps) - 1
if foundcount < expectedcount:
raise ParseError(lineno, "Not enough arguments specifed for command '%s'! Expected %d, " \
"but got only %d!" % (linecomps[0], expectedcount, foundcount))
elif foundcount > expectedcount + 1:
raise ParseError(lineno, "Too many arguments specifed for command '%s'! Expected %d, " \
"but got %d!" % (linecomps[0], expectedcount, foundcount))
def cStructString(self):
cfields = [f.cString() for f in self.fields]
return "typedef struct Struct%s {\n%s\n} %s;\n" % (self.structname,\
"\n".join(cfields),\
self.structname)
def javaClassString(self):
jgetters = [f.javaGetter() for f in self.fields]
jsetters = [f.javaSetter() for f in self.fields]
jnativesetters = [f.javaNativeSetter() for f in self.fields]
jnativegetters = [f.javaNativeGetter() for f in self.fields]
return "public class %s extends NativeBuffer {\n\n"\
" public %s() {\n"\
" super();\n"\
" }\n"\
"\n"\
" public %s(int count) {\n"\
" super(count);\n"\
" }\n"\
"\n"\
" public native int getElementSize();\n"\
"\n"\
"%s\n\n"\
"%s\n\n"\
"%s\n\n"\
"%s\n\n"\
" static {\n"\
" System.loadLibrary(\"%s\");\n"\
" }\n"\
"\n"\
"};\n" % (self.jclassname,\
self.jclassname,\
self.jclassname,\
"\n\n".join(jgetters),\
"\n\n".join(jsetters),\
"\n\n".join(jnativegetters),\
"\n\n".join(jnativesetters),\
self.libname)
def jniDeclString(self):
jnigetters = [f.jniGetterDefString() for f in self.fields]
jnisetters = [f.jniSetterDefString() for f in self.fields]
return "\n\n".join(jnigetters + jnisetters)
def jniImplString(self):
jnigetters = [f.jniGetterImplString() for f in self.fields]
jnisetters = [f.jniSetterImplString() for f in self.fields]
return "\n\n".join(jnigetters + jnisetters)
def hFileString(self):
defname = ToMacroDefName(self.structname, self.package)
return hFileTemplate % (defname, defname, self.cStructString(), defname)
def javaFileString(self):
return javaFileTemplate % (self.package, self.javaClassString())
def jniFileString(self):
return jniFileTemplate % (self.structname.lower(),\
self.structname,\
self.structname,\
self.structname,\
self.structname,\
ToJNIPackage(self.package, self.jclassname),\
self.jniDeclString(),\
ToJNIPackage(self.package, self.jclassname),\
self.structname,
self.jniImplString())
def main(argv):
if len(argv) != 2:
print("Usage: %s <file.struct>" % argv[0])
return -1
filepath = argv[1]
structspec = StructSpec()
structspec.parseTextFile(filepath)
hfilename = "%s.h" % structspec.structname.lower()
javafilename = "%s.java" % structspec.jclassname
jnifilename = "jni_%s.c" % structspec.structname.lower()
javapackagepath = structspec.package.replace('.','/')
rootdir = os.path.dirname(filepath)
hfilepath = "%s/../native/%s" % (rootdir, hfilename)
javafilepath = "%s/../java/%s/%s" % (rootdir, javapackagepath, javafilename)
jnifilepath = "%s/../jni/%s" % (rootdir, jnifilename)
hfile = open(hfilepath, 'w')
hfile.write(structspec.hFileString())
hfile.close()
javafile = open(javafilepath, 'w')
javafile.write(structspec.javaFileString())
javafile.close()
jnifile = open(jnifilepath, 'w')
jnifile.write(structspec.jniFileString())
jnifile.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
src/appengine/libs/query/base.py | ABHIsHEk122811/clusterfuzz | 5,023 | 12746546 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represent the interface for Query. This is important because our access
control logic needs a unified way to specify conditions for both BigQuery
query and Datastore query.
This must be compatible with libs.filters and libs.crash_access."""
class Query(object):
"""Represent the interface for Query."""
def filter(self, field, value, operator='='):
"""Filter by a single value."""
raise NotImplementedError
def filter_in(self, field, values):
"""Filter by multiple values."""
raise NotImplementedError
def union(self, *queries):
"""Union all queries with OR conditions."""
raise NotImplementedError
def new_subquery(self):
"""Instantiate a query that is compatible with the current query."""
raise NotImplementedError
|
tensorflow_forward_ad/setup_cbfs.py | renmengye/tensorflow-forward-ad | 147 | 12746567 | <reponame>renmengye/tensorflow-forward-ad
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
setup(
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension(
"cbfs", sources=["cbfs.pyx"], include_dirs=[numpy.get_include()])
],)
|
tests/providers/google/cloud/transfers/test_gdrive_to_gcs.py | npodewitz/airflow | 8,092 | 12746584 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
from airflow.providers.google.cloud.transfers.gdrive_to_gcs import GoogleDriveToGCSOperator
FOLDER_ID = os.environ.get("GCP_GDRIVE_FOLDER_ID", "abcd1234")
DRIVE_ID = os.environ.get("GCP_GDRIVE_DRIVE_ID", "abcd1234")
FILE_NAME = os.environ.get("GCP_GDRIVE_TO_GCS_FILE_NAME", "gdrive_to_gcs_file.txt")
BUCKET = os.environ.get("GCP_GDRIVE_TO_GCS_BUCKET", "gdrive-to-gcs-bucket")
OBJECT = "prefix/test.txt"
GCP_CONN_ID = "google_cloud_default"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
class TestGoogleDriveToGCSOperator:
@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook")
def test_execute(self, mock_gdrive_hook, mock_gcs_hook):
context = {}
op = GoogleDriveToGCSOperator(
task_id="test_task",
folder_id=FOLDER_ID,
file_name=FILE_NAME,
drive_id=DRIVE_ID,
bucket_name=BUCKET,
object_name=OBJECT,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
meta = {"id": "123xyz"}
mock_gdrive_hook.return_value.get_file_id.return_value = meta
op.execute(context)
mock_gdrive_hook.return_value.get_file_id.assert_called_once_with(
folder_id=FOLDER_ID, file_name=FILE_NAME, drive_id=DRIVE_ID
)
mock_gdrive_hook.return_value.download_file.assert_called_once_with(
file_id=meta["id"], file_handle=mock.ANY
)
mock_gcs_hook.return_value.provide_file_and_upload.assert_called_once_with(
bucket_name=BUCKET, object_name=OBJECT
)
assert op.dry_run() is None
|
RecoLocalCalo/EcalRecProducers/test/testMultipleEcalRecoLocal_cfg.py | ckamtsikis/cmssw | 852 | 12746608 | <filename>RecoLocalCalo/EcalRecProducers/test/testMultipleEcalRecoLocal_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("RECO2")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.GlobalTag.globaltag = 'GR_R_74_V10A'
process.GlobalTag.toGet = cms.VPSet(
cms.PSet(record = cms.string("GeometryFileRcd"),
tag = cms.string("XMLFILE_Geometry_2015_72YV2_Extended2015ZeroMaterial_mc"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_COND_GEOMETRY_000"),
# label = cms.untracked.string("Extended2015ZeroMaterial")
),
cms.PSet(record = cms.string("EcalTBWeightsRcd"),
tag = cms.string("EcalTBWeights_3p5_time_mc"),
connect = cms.untracked.string("frontier://FrontierPrep/CMS_COND_ECAL")
)
)
#### CONFIGURE IT HERE
isMC = True
#####################
process.MessageLogger.cerr.FwkReport.reportEvery = 1
# start from RAW format for more flexibility
process.raw2digi_step = cms.Sequence(process.RawToDigi)
# get uncalibrechits with global method / time from ratio
import RecoLocalCalo.EcalRecProducers.ecalGlobalUncalibRecHit_cfi
process.ecalGlobalUncalibRecHit = RecoLocalCalo.EcalRecProducers.ecalGlobalUncalibRecHit_cfi.ecalGlobalUncalibRecHit.clone()
# get uncalib rechits from multifit method / time from ratio
import RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi
process.ecalMultiFitUncalibRecHit = RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi.ecalMultiFitUncalibRecHit.clone()
process.ecalMultiFitUncalibRecHit.algoPSet.activeBXs = cms.vint32(-5,-4,-3,-2,-1,0,1,2,3,4)
process.ecalMultiFitUncalibRecHit.algoPSet.useLumiInfoRunHeader = cms.bool( False )
# get uncalib rechits from multifit method / time from weights
process.ecalMultiFit2UncalibRecHit = RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi.ecalMultiFitUncalibRecHit.clone()
process.ecalMultiFit2UncalibRecHit.algoPSet.timealgo = cms.string("WeightsMethod")
process.ecalMultiFit2UncalibRecHit.algoPSet.activeBXs = cms.vint32(-5,-4,-3,-2,-1,0,1,2,3,4)
process.ecalMultiFit2UncalibRecHit.algoPSet.useLumiInfoRunHeader = cms.bool ( False )
# get the recovered digis
if isMC:
process.ecalDetIdToBeRecovered.ebSrFlagCollection = 'simEcalDigis:ebSrFlags'
process.ecalDetIdToBeRecovered.eeSrFlagCollection = 'simEcalDigis:eeSrFlags'
process.ecalRecHit.recoverEBFE = False
process.ecalRecHit.recoverEEFE = False
process.ecalRecHit.killDeadChannels = False
process.ecalRecHit.ebDetIdToBeRecovered = ''
process.ecalRecHit.eeDetIdToBeRecovered = ''
process.ecalRecHit.ebFEToBeRecovered = ''
process.ecalRecHit.eeFEToBeRecovered = ''
process.ecalRecHitGlobal = process.ecalRecHit.clone()
process.ecalRecHitGlobal.EBuncalibRecHitCollection = 'ecalGlobalUncalibRecHit:EcalUncalibRecHitsEB'
process.ecalRecHitGlobal.EEuncalibRecHitCollection = 'ecalGlobalUncalibRecHit:EcalUncalibRecHitsEE'
process.ecalRecHitGlobal.EBrechitCollection = 'EcalRecHitsGlobalEB'
process.ecalRecHitGlobal.EErechitCollection = 'EcalRecHitsGlobalEE'
process.ecalRecHitMultiFit = process.ecalRecHit.clone()
process.ecalRecHitMultiFit.EBuncalibRecHitCollection = 'ecalMultiFitUncalibRecHit:EcalUncalibRecHitsEB'
process.ecalRecHitMultiFit.EEuncalibRecHitCollection = 'ecalMultiFitUncalibRecHit:EcalUncalibRecHitsEE'
process.ecalRecHitMultiFit.EBrechitCollection = 'EcalRecHitsMultiFitEB'
process.ecalRecHitMultiFit.EErechitCollection = 'EcalRecHitsMultiFitEE'
process.ecalRecHitMultiFit2 = process.ecalRecHit.clone()
process.ecalRecHitMultiFit2.EBuncalibRecHitCollection = 'ecalMultiFit2UncalibRecHit:EcalUncalibRecHitsEB'
process.ecalRecHitMultiFit2.EEuncalibRecHitCollection = 'ecalMultiFit2UncalibRecHit:EcalUncalibRecHitsEE'
process.ecalRecHitMultiFit2.EBrechitCollection = 'EcalRecHitsMultiFit2EB'
process.ecalRecHitMultiFit2.EErechitCollection = 'EcalRecHitsMultiFit2EE'
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
path = '/store/data/Run2012D/DoubleElectron/RAW-RECO/ZElectron-22Jan2013-v1/10000/'
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(path+'0008202C-E78F-E211-AADB-0026189437FD.root'
))
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep *_ecalUncalib*_*_RECO2',
'keep *_ecalRecHit*_*_RECO2',
'keep *_offlineBeamSpot_*_*',
'keep *_addPileupInfo_*_*'
),
fileName = cms.untracked.string('reco2_pu40.root')
)
process.ecalAmplitudeReco = cms.Sequence( process.ecalGlobalUncalibRecHit *
process.ecalMultiFitUncalibRecHit *
process.ecalMultiFit2UncalibRecHit
)
process.ecalRecHitsReco = cms.Sequence( process.ecalRecHitGlobal *
process.ecalRecHitMultiFit *
process.ecalRecHitMultiFit2 )
process.ecalTestRecoLocal = cms.Sequence( process.raw2digi_step *
process.ecalAmplitudeReco *
process.ecalRecHitsReco )
from PhysicsTools.PatAlgos.tools.helpers import *
process.p = cms.Path(process.ecalTestRecoLocal)
process.outpath = cms.EndPath(process.out)
#########################
# Time Profiling #
#########################
#https://twiki.cern.ch/twiki/bin/viewauth/CMS/FastTimerService
process.MessageLogger.cerr.FastReport = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) )
# remove any instance of the FastTimerService
if 'FastTimerService' in process.__dict__:
del process.FastTimerService
# instrument the menu with the FastTimerService
process.load( "HLTrigger.Timer.FastTimerService_cfi" )
# print a text summary at the end of the job
process.FastTimerService.printJobSummary = True
# enable per-event DQM plots
process.FastTimerService.enableDQM = True
# enable per-module DQM plots
process.FastTimerService.enableDQMbyModule = True
# enable per-event DQM plots by lumisection
process.FastTimerService.enableDQMbyLumiSection = True
process.FastTimerService.dqmLumiSectionsRange = 2500 # lumisections (23.31 s)
# set the time resolution of the DQM plots
process.FastTimerService.dqmTimeRange = 1000. # ms
process.FastTimerService.dqmTimeResolution = 5. # ms
process.FastTimerService.dqmPathTimeRange = 100. # ms
process.FastTimerService.dqmPathTimeResolution = 0.5 # ms
process.FastTimerService.dqmModuleTimeRange = 1000. # ms
process.FastTimerService.dqmModuleTimeResolution = 0.5 # ms
# set the base DQM folder for the plots
process.FastTimerService.dqmPath = "HLT/TimerService"
process.FastTimerService.enableDQMbyProcesses = True
# save the DQM plots in the DQMIO format
process.dqmOutput = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("DQM_pu40.root")
)
process.FastTimerOutput = cms.EndPath( process.dqmOutput )
|
featuretools/tests/primitive_tests/primitives_to_install/custom_mean.py | Featuretools/featuretools | 4,299 | 12746614 | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class CustomMean(AggregationPrimitive):
name = "custom_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
|
parallel_wavegan/utils/utils.py | A-Quarter-Mile/ParallelWaveGAN | 1,023 | 12746627 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import logging
import os
import sys
import tarfile
from distutils.version import LooseVersion
from filelock import FileLock
import h5py
import numpy as np
import torch
import yaml
PRETRAINED_MODEL_LIST = {
"ljspeech_parallel_wavegan.v1": "1PdZv37JhAQH6AwNh31QlqruqrvjTBq7U",
"ljspeech_parallel_wavegan.v1.long": "1A9TsrD9fHxFviJVFjCk5W6lkzWXwhftv",
"ljspeech_parallel_wavegan.v1.no_limit": "1CdWKSiKoFNPZyF1lo7Dsj6cPKmfLJe72",
"ljspeech_parallel_wavegan.v3": "1-oZpwpWZMMolDYsCqeL12dFkXSBD9VBq",
"ljspeech_melgan.v1": "1i7-FPf9LPsYLHM6yNPoJdw5Q9d28C-ip",
"ljspeech_melgan.v1.long": "1x1b_R7d2561nqweK3FPb2muTdcFIYTu6",
"ljspeech_melgan.v3": "1J5gJ_FUZhOAKiRFWiAK6FcO5Z6oYJbmQ",
"ljspeech_melgan.v3.long": "124JnaLcRe7TsuAGh3XIClS3C7Wom9AU2",
"ljspeech_full_band_melgan.v2": "1Kb7q5zBeQ30Wsnma0X23G08zvgDG5oen",
"ljspeech_multi_band_melgan.v2": "1b70pJefKI8DhGYz4SxbEHpxm92tj1_qC",
"ljspeech_hifigan.v1": "1i6-hR_ksEssCYNlNII86v3AoeA1JcuWD",
"ljspeech_style_melgan.v1": "10aJSZfmCAobQJgRGio6cNyw6Xlgmme9-",
"jsut_parallel_wavegan.v1": "1qok91A6wuubuz4be-P9R2zKhNmQXG0VQ",
"jsut_multi_band_melgan.v2": "1chTt-76q2p69WPpZ1t1tt8szcM96IKad",
"jsut_hifigan.v1": "1vdgqTu9YKyGMCn-G7H2fI6UBC_4_55XB",
"jsut_style_melgan.v1": "1VIkjSxYxAGUVEvJxNLaOaJ7Twe48SH-s",
"csmsc_parallel_wavegan.v1": "1QTOAokhD5dtRnqlMPTXTW91-CG7jf74e",
"csmsc_multi_band_melgan.v2": "1G6trTmt0Szq-jWv2QDhqglMdWqQxiXQT",
"csmsc_hifigan.v1": "1fVKGEUrdhGjIilc21Sf0jODulAq6D1qY",
"csmsc_style_melgan.v1": "1kGUC_b9oVSv24vZRi66AAbSNUKJmbSCX",
"arctic_slt_parallel_wavegan.v1": "1_MXePg40-7DTjD0CDVzyduwQuW_O9aA1",
"jnas_parallel_wavegan.v1": "1D2TgvO206ixdLI90IqG787V6ySoXLsV_",
"vctk_parallel_wavegan.v1": "1bqEFLgAroDcgUy5ZFP4g2O2MwcwWLEca",
"vctk_parallel_wavegan.v1.long": "1tO4-mFrZ3aVYotgg7M519oobYkD4O_0-",
"vctk_multi_band_melgan.v2": "10PRQpHMFPE7RjF-MHYqvupK9S0xwBlJ_",
"vctk_hifigan.v1": "1oVOC4Vf0DYLdDp4r7GChfgj7Xh5xd0ex",
"vctk_style_melgan.v1": "14ThSEgjvl_iuFMdEGuNp7d3DulJHS9Mk",
"libritts_parallel_wavegan.v1": "1zHQl8kUYEuZ_i1qEFU6g2MEu99k3sHmR",
"libritts_parallel_wavegan.v1.long": "1b9zyBYGCCaJu0TIus5GXoMF8M3YEbqOw",
"libritts_multi_band_melgan.v2": "1kIDSBjrQvAsRewHPiFwBZ3FDelTWMp64",
"libritts_hifigan.v1": "1_TVFIvVtMn-Z4NiQrtrS20uSJOvBsnu1",
"libritts_style_melgan.v1": "1yuQakiMP0ECdB55IoxEGCbXDnNkWCoBg",
"kss_parallel_wavegan.v1": "1mLtQAzZHLiGSWguKCGG0EZa4C_xUO5gX",
"hui_acg_hokuspokus_parallel_wavegan.v1": "1irKf3okMLau56WNeOnhr2ZfSVESyQCGS",
"ruslan_parallel_wavegan.v1": "1M3UM6HN6wrfSe5jdgXwBnAIl_lJzLzuI",
}
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def read_hdf5(hdf5_name, hdf5_path):
"""Read hdf5 dataset.
Args:
hdf5_name (str): Filename of hdf5 file.
hdf5_path (str): Dataset name in hdf5 file.
Return:
any: Dataset values.
"""
if not os.path.exists(hdf5_name):
logging.error(f"There is no such a hdf5 file ({hdf5_name}).")
sys.exit(1)
hdf5_file = h5py.File(hdf5_name, "r")
if hdf5_path not in hdf5_file:
logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")
sys.exit(1)
hdf5_data = hdf5_file[hdf5_path][()]
hdf5_file.close()
return hdf5_data
def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):
"""Write dataset to hdf5.
Args:
hdf5_name (str): Hdf5 dataset filename.
hdf5_path (str): Dataset path in hdf5.
write_data (ndarray): Data to write.
is_overwrite (bool): Whether to overwrite dataset.
"""
# convert to numpy array
write_data = np.array(write_data)
# check folder existence
folder_name, _ = os.path.split(hdf5_name)
if not os.path.exists(folder_name) and len(folder_name) != 0:
os.makedirs(folder_name)
# check hdf5 existence
if os.path.exists(hdf5_name):
# if already exists, open with r+ mode
hdf5_file = h5py.File(hdf5_name, "r+")
# check dataset existence
if hdf5_path in hdf5_file:
if is_overwrite:
logging.warning(
"Dataset in hdf5 file already exists. " "recreate dataset in hdf5."
)
hdf5_file.__delitem__(hdf5_path)
else:
logging.error(
"Dataset in hdf5 file already exists. "
"if you want to overwrite, please set is_overwrite = True."
)
hdf5_file.close()
sys.exit(1)
else:
# if not exists, open with w mode
hdf5_file = h5py.File(hdf5_name, "w")
# write data to hdf5
hdf5_file.create_dataset(hdf5_path, data=write_data)
hdf5_file.flush()
hdf5_file.close()
class HDF5ScpLoader(object):
"""Loader class for a fests.scp file of hdf5 file.
Examples:
key1 /some/path/a.h5:feats
key2 /some/path/b.h5:feats
key3 /some/path/c.h5:feats
key4 /some/path/d.h5:feats
...
>>> loader = HDF5ScpLoader("hdf5.scp")
>>> array = loader["key1"]
key1 /some/path/a.h5
key2 /some/path/b.h5
key3 /some/path/c.h5
key4 /some/path/d.h5
...
>>> loader = HDF5ScpLoader("hdf5.scp", "feats")
>>> array = loader["key1"]
key1 /some/path/a.h5:feats_1,feats_2
key2 /some/path/b.h5:feats_1,feats_2
key3 /some/path/c.h5:feats_1,feats_2
key4 /some/path/d.h5:feats_1,feats_2
...
>>> loader = HDF5ScpLoader("hdf5.scp")
# feats_1 and feats_2 will be concatenated
>>> array = loader["key1"]
"""
def __init__(self, feats_scp, default_hdf5_path="feats"):
"""Initialize HDF5 scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with hdf5 format.
default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used.
"""
self.default_hdf5_path = default_hdf5_path
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get hdf5 file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
p = self.data[key]
if ":" in p:
if len(p.split(",")) == 1:
return read_hdf5(*p.split(":"))
else:
p1, p2 = p.split(":")
feats = [read_hdf5(p1, p) for p in p2.split(",")]
return np.concatenate(
[f if len(f.shape) != 1 else f.reshape(-1, 1) for f in feats], 1
)
else:
return read_hdf5(p, self.default_hdf5_path)
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
def values(self):
"""Return the values of the scp file."""
for key in self.keys():
yield self[key]
class NpyScpLoader(object):
"""Loader class for a fests.scp file of npy file.
Examples:
key1 /some/path/a.npy
key2 /some/path/b.npy
key3 /some/path/c.npy
key4 /some/path/d.npy
...
>>> loader = NpyScpLoader("feats.scp")
>>> array = loader["key1"]
"""
def __init__(self, feats_scp):
"""Initialize npy scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with npy format.
"""
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get npy file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
return np.load(self.data[key])
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
def values(self):
"""Return the values of the scp file."""
for key in self.keys():
yield self[key]
def load_model(checkpoint, config=None, stats=None):
"""Load trained model.
Args:
checkpoint (str): Checkpoint path.
config (dict): Configuration dict.
stats (str): Statistics file path.
Return:
torch.nn.Module: Model instance.
"""
# load config if not provided
if config is None:
dirname = os.path.dirname(checkpoint)
config = os.path.join(dirname, "config.yml")
with open(config) as f:
config = yaml.load(f, Loader=yaml.Loader)
# lazy load for circular error
import parallel_wavegan.models
# get model and load parameters
model_class = getattr(
parallel_wavegan.models,
config.get("generator_type", "ParallelWaveGANGenerator"),
)
# workaround for typo #295
generator_params = {
k.replace("upsample_kernal_sizes", "upsample_kernel_sizes"): v
for k, v in config["generator_params"].items()
}
model = model_class(**generator_params)
model.load_state_dict(
torch.load(checkpoint, map_location="cpu")["model"]["generator"]
)
# check stats existence
if stats is None:
dirname = os.path.dirname(checkpoint)
if config["format"] == "hdf5":
ext = "h5"
else:
ext = "npy"
if os.path.exists(os.path.join(dirname, f"stats.{ext}")):
stats = os.path.join(dirname, f"stats.{ext}")
# load stats
if stats is not None:
model.register_stats(stats)
# add pqmf if needed
if config["generator_params"]["out_channels"] > 1:
# lazy load for circular error
from parallel_wavegan.layers import PQMF
pqmf_params = {}
if LooseVersion(config.get("version", "0.1.0")) <= LooseVersion("0.4.2"):
# For compatibility, here we set default values in version <= 0.4.2
pqmf_params.update(taps=62, cutoff_ratio=0.15, beta=9.0)
model.pqmf = PQMF(
subbands=config["generator_params"]["out_channels"],
**config.get("pqmf_params", pqmf_params),
)
return model
def download_pretrained_model(tag, download_dir=None):
"""Download pretrained model form google drive.
Args:
tag (str): Pretrained model tag.
download_dir (str): Directory to save downloaded files.
Returns:
str: Path of downloaded model checkpoint.
"""
assert tag in PRETRAINED_MODEL_LIST, f"{tag} does not exists."
id_ = PRETRAINED_MODEL_LIST[tag]
if download_dir is None:
download_dir = os.path.expanduser("~/.cache/parallel_wavegan")
output_path = f"{download_dir}/{tag}.tar.gz"
os.makedirs(f"{download_dir}", exist_ok=True)
with FileLock(output_path + ".lock"):
if not os.path.exists(output_path):
# lazy load for compatibility
import gdown
gdown.download(
f"https://drive.google.com/uc?id={id_}", output_path, quiet=False
)
with tarfile.open(output_path, "r:*") as tar:
for member in tar.getmembers():
if member.isreg():
member.name = os.path.basename(member.name)
tar.extract(member, f"{download_dir}/{tag}")
checkpoint_path = find_files(f"{download_dir}/{tag}", "checkpoint*.pkl")
return checkpoint_path[0]
|
Codeforces/9A.py | Shaswat-2203/HacktoberfestForBeginners | 115 | 12746631 | s = input()
y = s[0]
w = s[2]
if int(y) > int(w):
p = 7 - int(y)
else:
p = 7 - int(w)
if p == 1:
print('1/6')
if p == 2:
print('1/3')
if p == 3:
print('1/2')
if p == 4:
print('2/3')
if p == 5:
print('5/6')
if p == 6:
print('1/1')
|
forms-flow-api/migrations/versions/166054bd81b5_allow_modified_date_as_nullable.py | andrepestana-aot/forms-flow-ai | 132 | 12746637 | <filename>forms-flow-api/migrations/versions/166054bd81b5_allow_modified_date_as_nullable.py
"""allow modified date as nullable
Revision ID: 166054bd81b5
Revises: <PASSWORD>
Create Date: 2021-10-11 03:47:20.983464
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "80b8d<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"application", "modified", existing_type=postgresql.TIMESTAMP(), nullable=True
)
op.alter_column(
"form_process_mapper",
"modified",
existing_type=postgresql.TIMESTAMP(),
nullable=True,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"form_process_mapper",
"modified",
existing_type=postgresql.TIMESTAMP(),
nullable=False,
)
op.alter_column(
"application", "modified", existing_type=postgresql.TIMESTAMP(), nullable=False
)
# ### end Alembic commands ###
|
pyjokes/jokes_it.py | r0d0dendr0n/pyjokes | 293 | 12746646 | # -*- coding: utf-8 -*-
"""
Jokes from stackoverflow - provided under CC BY-SA 3.0
http://stackoverflow.com/questions/234075/what-is-your-best-programmer-joke?page=4&tab=votes#tab-top
"""
neutral = [
"Trionfalmente, Beth ha rimosso Python 2.7 dal server nel 2020.'Finalmente!' ha detto con gioia, solo per vedere l'annuncio di Python 4.4.",
"Una query SQL entra in un bar, cammina verso a due table e chiede: 'Posso unirvi?'.",
"Quando il tuo martello e` C ++, tutto inizia a sembrare un pollice.",
"Se metti un milione di scimmie su un milione di tastiere, uno di loro alla fine scrivera` un programma Java, il resto scrivera` Perl.",
"Per comprendere la ricorsione devi prima capire la ricorsione.",
"Gli amici non permettono agli amici di usare Python 2.7.",
"Ho suggerito di tenere un 'Python Object Oriented Programming Seminar', ma l'acronimo era impopolare.",
"'toc, toc'. 'Chi e` la`?' ... pausa molto lunga ... Java.",
"Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, e` un problema hardware.",
"Qual e` il modo orientato agli oggetti per diventare ricchi? Ereditarieta`.",
"Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, possono rendere l'oscurita` uno standard.",
"I vecchi programmatori C non muoiono, sono solo gettati nel void.",
"Gli sviluppatori di software amano risolvere i problemi: se non ci sono problemi facilmente disponibili li creeranno.",
".NET e` stato chiamato .NET in modo che non si visualizzasse in un elenco di directory Unix.",
"Hardware: la parte di un computer che puoi calciare.",
"Un programmatore e` stato trovato morto nella doccia, accanto al corpo c'era uno shampoo con le istruzioni:Insapona, risciacqua ripeti.",
"Ottimista: il bicchiere e` mezzo pieno Pessimista: il bicchiere e` mezzo vuoto Programmatore: il bicchiere e` il doppio del necessario.",
"In C abbiamo dovuto codificare i nostri bug. In C ++ possiamo ereditarli.",
"Come mai non c'e` una gara Perl offuscato? Perche` tutti vincerebbero.",
"Se riproduci un CD di Windows all'indietro, ascolterai il canto satanico ... peggio ancora, se lo riproduci in avanti, installa Windows.",
"Quanti programmatori ci vogliono per uccidere uno scarafaggio? Due: uno tiene, l'altro installa Windows su di esso.",
"Come si chiama un programmatore finlandese? Nerdic.",
"Cosa ha detto il codice Java al codice C? : Non hai classe.",
"Perche` Microsoft ha chiamato il proprio motore di ricerca BING? Because It's Not Google.",
"I venditori di software e i venditori di auto usate si differenziano perche` questi ultimi sanno quando mentono.",
"Bambino: 'papa', perche` il sole sorge ad est e tramonta ad ovest?' Papa': 'figlio, sta funzionando, non toccarlo'.",
"Quanti programmatori Prolog ci vogliono per cambiare una lampadina? Falso.",
"I veri programmatori possono scrivere codice assembly in qualsiasi lingua.",
"Cameriere: 'le piacerebbe un caffe` o un te`?' Programmatore: 'Si'.",
"Un programmatore entra in un foo ...",
"Qual e` il secondo nome di <NAME>? <NAME>.",
"Perche` sorridi sempre? Questa e` solo la mia ... espressione regolare.",
"Domanda stupida ASCII, ottiene uno stupido ANSI.",
"Un programmatore aveva un problema: penso` tra se stesso: 'lo risolvo con i threads!', ora ha due problemi.",
"Java: scrivi una volta e scappa.",
"Ti direi una battuta su UDP, ma non lo capiresti mai.",
"Un ingegnere di QA entra in un bar, si imbatte in un bar, striscia in un bar, balla in un bar, punta i piedi in un bar...",
"Ho avuto un problema quindi ho pensato di utilizzare Java. Ora ho una ProblemFactory.",
"L'ingegnere del QA entra in un bar, ordina una birra, ordina 0 birre, 99999 birre, una lucertola, -1 birre, uno sfdeljknesv.",
"Un responsabile di prodotto entra in un bar, chiede un drink, il barista dice NO, ma prendera` in considerazione l'aggiunta successiva.",
"Come si genera una stringa casuale? Metti uno studente di Informatica del primo anno davanti a Vim e gli chiedi di salvare ed uscire.",
"Uso Vim da molto tempo ormai, principalmente perche` non riesco a capire come uscire.",
"Come fai a sapere se una persona e` un utente Vim? Non ti preoccupare, te lo diranno.",
"un cameriere urla: 'sta soffocando! Qualcuno e` un dottore?' Programmatore: 'sono un utente Vim'.",
"3 Database Admins sono entrati in un bar NoSQL e poco dopo sono usciti perche` non sono riusciti a trovare un table.",
"Come spiegare il film Inception a un programmatore? Quando esegui una VM dentro una VM dentro un' altra VM tutto procede molto lentamente.",
"Come si chiama un pappagallo che dice 'Squawk! Pezzi di nove! Pezzi di nove!' Un errore a pappagallo.",
"Ci sono solo due problemi difficili in Informatica: invalidazione della cache, denominazione delle cose e errori di off-by-one.",
"Ci sono 10 tipi di persone: quelli che comprendono il binario e quelli che non lo sanno.",
"Ci sono 2 tipi di persone: quelli che possono estrapolare dati da insiemi incompleti ...",
"Esistono II tipi di persone: quelli che comprendono i numeri romani e quelli che non li conoscono.",
"Ci sono 10 tipi di persone: quelli che comprendono l'esadecimale e altri 15.",
"Ci sono 10 tipi di persone: quelli che capiscono il trinario, quelli che non lo fanno e quelli che non ne hanno mai sentito parlare.",
"Come chiami otto hobbit? Un hob byte.",
"La cosa migliore di un booleano e` che anche se ti sbagli, sei solo fuori di un bit.",
"Un buon programmatore e` qualcuno che guarda sempre in entrambe le direzioni prima di attraversare una strada a senso unico.",
"Esistono due modi per scrivere programmi privi di errori: solo il terzo funziona.",
"I controlli di qualita` consistono nel 55% di acqua, 30% di sangue e 15% di ticket in Jira.",
"Quanti QA servono per cambiare una lampadina? Hanno notato che la stanza era buia,: non risolvono i problemi, li trovano.",
"Un programmatore si schianta contro un'auto , l'uomo chiede 'cosa e` successo', l'altro risponde'Non so. Facciamo il backup e riprova'.",
"Scrivere PHP e` come fare pipi` in piscina, tutti lo hanno fatto, ma non hanno bisogno di renderlo pubblico.",
"Numero di giorni da quando ho riscontrato un errore di indice di array: -1.",
"gli appuntamenti veloci sono inutili, 5 minuti non sono sufficienti per spiegare correttamente i benefici della filosofia Unix.",
"Microsoft ha ogni quindici giorni una 'settimana produttiva' dove usa Google invece di Bing.",
"Trovare un buon sviluppatore PHP e` come cercare un ago in un pagliaio o e` un hackstack in un ago?.",
"Unix e` user friendly, e` solo molto particolare nella scelta di chi siano i suoi amici.",
"Un programmatore COBOL guadagna milioni con la riparazione Y2K e decide di congelarsi criogenicamente. L'anno e` 9999.",
"Il linguaggio C combina tutta la potenza del linguaggio assembly con tutta la facilita` d'uso del linguaggio assembly.",
"Un esperto SEO entra in un bar, pub, pub irlandese, taverna, barista, birra, liquore, vino, alcolici, liquori ...",
"Che cosa significa Emacs? Utilizzato esclusivamente dagli scienziati informatici di mezza eta`.",
"Che cosa hanno in comune le battute di PyJokes con Adobe Flash? Si aggiornano sempre, ma non migliorano.",
"Quanti demosceners sono necessari per cambiare una lampadina? Meta`. Con uno intero non ci sono sfide.",
]
"""
Jokes from The Internet Chuck Norris DB (ICNDB) (http://www.icndb.com/) - provided under CC BY-SA 3.0
http://api.icndb.com/jokes/
"""
chuck = [
"Tutti gli array che Chuck Norris dichiara sono di dimensioni infinite, perche` Chuck Norris non conosce limiti.",
"Chuck Norris non ha la latenza del disco perche` il disco rigido sa sbrigarsi, altrimenti sono guai.",
"Chuck Norris scrive codice che si ottimizza da solo.",
"Chuck Norris non puo` testare l'uguaglianza perche` non ha eguali.",
"Chuck Norris non ha bisogno di garbage collection perche` non chiama .Dispose (), chiama .DropKick ().",
"Il primo programma di Chuck Norris e` stato kill -9.",
"Chuck Norris ha scoppiato la bolla delle dot com.",
"Tutti i browser supportano le definizioni esadecimali #chuck e #norris per i colori nero e blu.",
"MySpace non e` proprio il tuo spazio, e` di Chuck (te lo lascia solo usare).",
"Chuck Norris puo` scrivere funzioni infinitamente ricorsive e farle tornare.",
"Chuck Norris puo` risolvere le Torri di Hanoi in una mossa.",
"L'unico modello di design che Chuck Norris conosce e` il God Object Pattern.",
"Chuck Norris ha terminato World of Warcraft.",
"I project manager non chiedono mai a Chuck Norris le stime.",
"Chuck Norris non usa gli standard web in quanto il web si conformera` a lui.",
"'Funziona sulla mia macchina' e` sempre vero per Chuck Norris.",
"Chuck Norris non fa i grafici di Burn Down, fa i grafici di Smack Down.",
"Chuck Norris puo` cancellare il cestino.",
"La barba di Chuck Norris puo` scrivere 140 parole al minuto.",
"Chuck Norris puo` testare tutte le applicazioni con un'unica affermazione, 'funziona'.",
"La tastiera di Chuck Norris non ha un tasto Ctrl perche` niente controlla Chuck Norris.",
"Chuck Norris puo` far traboccare il tuo stack solo guardandolo.",
"Per Chuck Norris, tutto contiene una vulnerabilita`.",
"Chuck Norris non usa sudo, la shell sa solo che e` lui e fa quello che gli viene detto.",
"Chuck Norris non ha bisogno di un debugger, si limita a fissare il codice finche` non confessa.",
"Chuck Norris puo` accedere a metodi privati.",
"Chuck Norris puo` istanziare una classe astratta.",
"L'oggetto classe eredita da Chuck Norris.",
"Chuck Norris conosce l'ultima cifra del Pi greco.",
"La connessione di Chuck Norris e` piu' veloce in up che in down perche` i dati sono incentivati a correre via da lui.",
"Nessuna affermazione puo` prendere la ChuckNorrisException.",
"Chuck Norris puo` scrivere applicazioni multi-thread con un singolo thread.",
"Chuck Norris non ha bisogno di usare AJAX perche` le pagine hanno troppa paura di postback comunque.",
"Chuck Norris non usa la riflessione, la riflessione chiede educatamente il suo aiuto.",
"Non c'e` alcun tasto Esc sulla tastiera di Chuck Norris, perche` nessuno sfugge a Chuck Norris.",
"Chuck Norris puo` eseguire la ricerca binaria di dati non ordinati.",
"Chuck Norris non ha bisogno di tentativi di cattura, le eccezioni sono troppo spaventate da sollevarsi.",
"Chuck Norris e` uscito da un ciclo infinito.",
"Se Chuck Norris scrive codice con bug, gli errori si risolvono da soli.",
"L'hosting di Chuck Norris e` garantito al 101% di uptime.",
"La tastiera di Chuck Norris ha il tasto Any.",
"Chuck Norris puo` accedere al database dall'interfaccia utente.",
"I programmi di Chuck Norris non escono mai, sono terminati.",
"I programmi di Chuck Norris occupano il 150% della CPU, anche quando non sono in esecuzione.",
"Chuck Norris puo` generare thread che si completano prima di essere avviati.",
"I programmi di Chuck Norris non accettano input.",
"Chuck Norris puo` installare iTunes senza installare Quicktime.",
"Chuck Norris non ha bisogno di un sistema operativo.",
"Il modello di rete OSI di Chuck Norris ha un solo livello: fisico.",
"Chuck Norris puo` compilare errori di sintassi.",
"Chuck Norris non ha bisogno di digitare cast. Il Chuck-Norris Compiler (CNC) vede attraverso le cose, fino in fondo sempre.",
"Chuck Norris comprime i suoi file con un calcio rotante sul disco rigido.",
"Con Chuck Norris P = NP. Non c'e` alcun nondeterminismo con le decisioni di Chuck Norris.",
"Chuck Norris puo` recuperare qualsiasi cosa da / dev / null.",
"Nessuno ha mai programmato in coppia con Chuck Norris ed e`vissuto per raccontare la storia.",
"Nessuno ha mai parlato durante la revisione del codice di Chuck Norris ed e` vissuto per raccontare la storia.",
"Chuck Norris non usa una GUI, preferisce la linea di comando.",
"Chuck Norris non usa Oracle, lui e` l'Oracle.",
"Chuck Norris puo` dereferenziare NULL.",
"Una differenza tra il tuo codice e quello di Chuck Norris e` infinita.",
"Il plugin Chuck Norris Eclipse e` diventato un contatto alieno.",
"Chuck Norris e` l'ultimo mutex, tutti i thread lo temono.",
"Non preoccuparti dei test, i test case di Chuck Norris coprono anche il tuo codice.",
"Le dichiarazioni del registro di Chuck Norris sono sempre al livello FATAL.",
"Chuck Norris ha completato World of Warcraft.",
"Quando Chuck Norris rompe la build, non e` possibile risolverla, perche` non c'e` una sola riga di codice.",
"Chuck Norris scrive con un dito, lo punta alla tastiera e la tastiera fa il resto.",
"I programmi di Chuck Norris possono superare il test di Turing fissando l'interrogatore.",
"Se provi kill -9 con i programmi di Chuck Norris, si ritorce contro.",
"Chuck Norris esegue loop infiniti in meno di 4 secondi.",
"Chuck Norris puo` sovrascrivere una variabile bloccata.",
"Chuck Norris conosce il valore di NULL.",
"Chuck Norris puo` installare un sistema operativo a 64 bit su macchine a 32 bit.",
"Chuck Norris puo` scrivere su un flusso di output.",
"Chuck Norris puo` leggere da un flusso di input.",
"Chuck Norris non ha mai scritto il suo programma in codice macchina. Le macchine hanno imparato a interpretare il codice di Chuck Norris.",
"I test unitari di Chuck Norris non girano, muoiono.",
"Chuck Norris causa la schermata blu della morte.",
"Chuck Norris puo` fare una classe che e` sia astratta che finale.",
"Chuck Norris potrebbe usare qualsiasi cosa in java.util.* per ucciderti, inclusi i javadoc.",
"Il codice gira piu` velocemente quando <NAME> lo guarda.",
"<NAME> non usa REST, lui aspetta.",
"Su Facebook a tutti piace <NAME>, che lo scelgano o no.",
"Non puoi seguire <NAME> su Twitter, perche` lui ti segue.",
"La calcolatrice di <NAME> ha solo 3 tasti: 0, 1 e NAND.",
"<NAME> utilizza solo variabili globali. Non ha nulla da nascondere.",
"<NAME> scrive direttamente in binario. Quindi scrive il codice sorgente come documentazione per altri programmatori.",
]
jokes_it = {
'neutral': neutral,
'chuck': chuck,
'all': neutral + chuck,
}
|
submission/migrations/0002_auto_20170509_1203.py | lizehongss/oj_backend | 5,237 | 12746652 | <reponame>lizehongss/oj_backend<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-05-09 12:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='code',
field=models.TextField(),
),
migrations.RenameField(
model_name='submission',
old_name='accepted_info',
new_name='statistic_info',
),
migrations.RemoveField(
model_name='submission',
name='accepted_time',
),
migrations.RenameField(
model_name='submission',
old_name='created_time',
new_name='create_time',
),
migrations.AlterModelOptions(
name='submission',
options={'ordering': ('-create_time',)},
)
]
|
butterfree/transform/utils/date_range.py | fossabot/butterfree | 208 | 12746657 | """Utils for date range generation."""
from datetime import datetime
from typing import Union
from pyspark.sql import DataFrame, functions
from butterfree.clients import SparkClient
from butterfree.constants import DataType
from butterfree.constants.columns import TIMESTAMP_COLUMN
def get_date_range(
client: SparkClient,
start_date: Union[str, datetime],
end_date: Union[str, datetime],
step: int = None,
) -> DataFrame:
"""Create a date range dataframe.
The dataframe returning from this method will containing a single column
TIMESTAMP_COLUMN, of timestamp type, with dates between start and end.
Args:
client: a spark client.
start_date: range beginning value (inclusive).
end_date: range last value (exclusive)
step: optional step, in seconds.
Returns:
A single column date range spark dataframe.
"""
day_in_seconds = 60 * 60 * 24
step = step or day_in_seconds
start_date = (
start_date if isinstance(start_date, str) else start_date.strftime("%Y-%m-%d")
)
end_date = end_date if isinstance(end_date, str) else end_date.strftime("%Y-%m-%d")
date_df = client.conn.createDataFrame(
[(start_date, end_date)], ("start_date", "end_date")
).select(
[
functions.col(c).cast(DataType.TIMESTAMP.spark).cast(DataType.BIGINT.spark)
for c in ("start_date", "end_date")
]
)
start_date, end_date = date_df.first()
return client.conn.range(
start_date, end_date + day_in_seconds, step # type: ignore
).select(functions.col("id").cast(DataType.TIMESTAMP.spark).alias(TIMESTAMP_COLUMN))
|
src/http_server.py | Tomasz-Kluczkowski/celery-exporter | 132 | 12746659 | <gh_stars>100-1000
from threading import Thread
import kombu.exceptions
from flask import Blueprint, Flask, current_app, request
from loguru import logger
from prometheus_client.exposition import choose_encoder
from waitress import serve
blueprint = Blueprint("celery_exporter", __name__)
@blueprint.route("/")
def index():
return """
<!doctype html>
<html lang="en">
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<title>celery-exporter</title>
</head>
<body>
<h1>Celery Exporter</h1>
<p><a href="/metrics">Metrics</a></p>
</body>
</html>
"""
@blueprint.route("/metrics")
def metrics():
encoder, content_type = choose_encoder(request.headers.get("accept"))
output = encoder(current_app.config["registry"])
return output, 200, {"Content-Type": content_type}
@blueprint.route("/health")
def health():
conn = current_app.config["celery_connection"]
uri = conn.as_uri()
try:
conn.ensure_connection(max_retries=3)
except kombu.exceptions.OperationalError:
logger.error("Failed to connect to broker='{}'", uri)
return (f"Failed to connect to broker: '{uri}'", 500)
except Exception: # pylint: disable=broad-except
logger.exception("Unrecognized error")
return ("Unknown exception", 500)
return f"Connected to the broker {conn.as_uri()}"
def start_http_server(registry, celery_connection, port):
app = Flask(__name__)
app.config["registry"] = registry
app.config["celery_connection"] = celery_connection
app.register_blueprint(blueprint)
Thread(
target=serve,
args=(app,),
kwargs=dict(host="0.0.0.0", port=port, _quiet=True),
daemon=True,
).start()
logger.info("Started celery-exporter at port='{}'", port)
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmics_cff.py | ckamtsikis/cmssw | 852 | 12746678 | # Author : <NAME>
# Date : July 19th, 2007
# last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlCosmicsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
#________________________________Track selection____________________________________
# AlCaReco for track based alignment using Cosmic muons reconstructed by Combinatorial Track Finder
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlCosmicsCTF = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(
src = 'ctfWithMaterialTracksP5',
filter = True,
applyBasicCuts = True,
ptMin = 0., ##10
ptMax = 99999.,
pMin = 4., ##10
pMax = 99999.,
etaMin = -99., ##-2.4 keep also what is going through...
etaMax = 99., ## 2.4 ...both TEC with flat slope
nHitMin = 7,
nHitMin2D = 2,
chi2nMax = 999999.,
applyMultiplicityFilter = False,
applyNHighestPt = True, ## select only highest pT track
nHighestPt = 1
)
# AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmic Track Finder
# (same cuts)
ALCARECOTkAlCosmicsCosmicTF = ALCARECOTkAlCosmicsCTF.clone(
src = 'cosmictrackfinderP5' ## different for CTF
)
# AlCaReco for track based alignment using Cosmic muons reconstructed by Regional Cosmic Tracking
# (same cuts)
ALCARECOTkAlCosmicsRegional = ALCARECOTkAlCosmicsCTF.clone(
src = 'regionalCosmicTracks'
)
#________________________________Sequences____________________________________
seqALCARECOTkAlCosmicsCTF = cms.Sequence(ALCARECOTkAlCosmicsCTF)
seqALCARECOTkAlCosmicsCosmicTF = cms.Sequence(ALCARECOTkAlCosmicsCosmicTF)
seqALCARECOTkAlCosmicsRegional = cms.Sequence(ALCARECOTkAlCosmicsRegional)
|
configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py | evgps/mmdetection_trashcan | 426 | 12746687 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
# fp16 settings
fp16 = dict(loss_scale=512.)
|
elliot/recommender/latent_factor_models/MF/__init__.py | gategill/elliot | 175 | 12746723 | <filename>elliot/recommender/latent_factor_models/MF/__init__.py
from .matrix_factorization import MF
|
examples/low_level/create_a_view_low_level.py | pingod/python-jenkins_api | 556 | 12746744 | """
A low level example:
This is how JenkinsAPI creates views
"""
from __future__ import print_function
import json
import requests
url = 'http://localhost:8080/createView'
str_view_name = "blahblah123"
params = {} # {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"name": str_view_name,
"mode": "hudson.model.ListView",
"Submit": "OK",
"json": json.dumps(
{"name": str_view_name, "mode": "hudson.model.ListView"}
)
}
# Try 1
result = requests.post(url, params=params, data=data, headers=headers)
print(result.text.encode('UTF-8'))
|
lib/tool_shed/test/functional/test_1460_data_managers.py | quacksawbones/galaxy-1 | 1,085 | 12746769 | import logging
from ..base.twilltestcase import common, ShedTwillTestCase
log = logging.getLogger(__name__)
category_name = 'Test 1460 Data Manager'
category_description = 'Test script 1460 for testing Data Managers'
data_manager_repository_name = 'data_manager_1460'
data_manager_repository_description = 'Repository that contains a Data Manager'
data_manager_repository_long_description = f'{data_manager_repository_name}: {data_manager_repository_description}'
data_manager_name = 'testing_data_manager'
data_manager_tar_file = '1460_files/data_manager_files/test_data_manager.tar'
'''
1. Add a Data Manager to toolshed
2. install Data Manager
3. Check that Data Manager tool
'''
# TODO: Allow testing actual Execution of installed Data Manager Tool.
class TestDataManagers(ShedTwillTestCase):
'''Test installing a repository containing a Data Manager.'''
def test_0000_initiate_users_and_category(self):
"""Create necessary user accounts and login as an admin user."""
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, f'Problem retrieving user with email {common.admin_email} from the database'
self.test_db_util.get_private_role(admin_user)
self.create_category(name=category_name, description=category_description)
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
assert test_user_2 is not None, f'Problem retrieving user with email {common.test_user_2_email} from the database'
self.test_db_util.get_private_role(test_user_2)
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, f'Problem retrieving user with email {common.test_user_1_email} from the database'
self.test_db_util.get_private_role(test_user_1)
def test_0010_create_data_manager_repository(self):
'''Create and populate data_manager_1460.
This is step 1 - Create repository data_manager_1460.
Create and populate a repository that contains a Data manager.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=data_manager_repository_name,
description=data_manager_repository_description,
long_description=data_manager_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Upload the data manager files to the repository.
self.upload_file(repository,
filename=data_manager_tar_file,
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message=f'Populate {data_manager_repository_name} with a data manager configuration.',
strings_displayed=[],
strings_not_displayed=[])
def test_0020_install_data_manager_repository(self):
'''Install the data_manager_1460 repository to galaxy.
This is step 3 - Attempt to install the repository into a galaxy instance, verify that it is installed.
'''
self.galaxy_login(email=common.admin_email, username=common.admin_username)
post_submit_strings_displayed = [data_manager_repository_name]
self.install_repository(data_manager_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=True,
post_submit_strings_displayed=post_submit_strings_displayed)
def test_0030_verify_data_manager_tool(self):
'''Verify that the data_manager_1460 repository is installed and Data Manager tool appears in list in Galaxy.'''
repository = self.test_db_util.get_installed_repository_by_name_owner(data_manager_repository_name, common.test_user_1_name)
strings_displayed = ['status', 'jobs', data_manager_name]
self.display_installed_jobs_list_page(repository, data_manager_names=data_manager_name, strings_displayed=strings_displayed)
def test_0040_verify_data_manager_data_table(self):
'''Verify that the installed repository populated shed_tool_data_table.xml and the sample files.'''
self.verify_installed_repository_data_table_entries(required_data_table_entries=['data_manager_test_data_table'])
|
evosax/utils/es_logger.py | mahi97/evosax | 102 | 12746776 | <reponame>mahi97/evosax
import pickle
import jax
import jax.numpy as jnp
import chex
from functools import partial
class ESLog(object):
def __init__(
self, num_dims: int, num_generations: int, top_k: int, maximize: bool
):
"""Simple jittable logging tool for ES rollouts."""
self.num_dims = num_dims
self.num_generations = num_generations
self.top_k = top_k
self.maximize = maximize
@partial(jax.jit, static_argnums=(0,))
def initialize(self) -> chex.ArrayTree:
"""Initialize the logger storage."""
log = {
"top_fitness": jnp.zeros(self.top_k)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"top_params": jnp.zeros((self.top_k, self.num_dims))
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_top_1": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_top_mean": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_top_std": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_gen_1": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_gen_mean": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"log_gen_std": jnp.zeros(self.num_generations)
- 1e10 * self.maximize
+ 1e10 * (1 - self.maximize),
"gen_counter": 0,
}
return log
# @partial(jax.jit, static_argnums=(0,))
def update(
self, log: chex.ArrayTree, x: chex.Array, fitness: chex.Array
) -> chex.ArrayTree:
"""Update the logging storage with newest data."""
# Check if there are solutions better than current archive
vals = jnp.hstack([log["top_fitness"], fitness])
params = jnp.vstack([log["top_params"], x])
top_idx = (
self.maximize * ((-1) * vals).argsort()
+ ((1 - self.maximize) * vals).argsort()
)
log["top_fitness"] = vals[top_idx[: self.top_k]]
log["top_params"] = params[top_idx[: self.top_k]]
log["log_top_1"] = (
log["log_top_1"].at[log["gen_counter"]].set(log["top_fitness"][0])
)
log["log_top_mean"] = (
log["log_top_mean"]
.at[log["gen_counter"]]
.set(jnp.mean(log["top_fitness"]))
)
log["log_top_std"] = (
log["log_top_std"]
.at[log["gen_counter"]]
.set(jnp.std(log["top_fitness"]))
)
log["log_gen_1"] = (
log["log_gen_1"]
.at[log["gen_counter"]]
.set(
self.maximize * jnp.max(fitness)
+ (1 - self.maximize) * jnp.min(fitness)
)
)
log["log_gen_mean"] = (
log["log_gen_mean"].at[log["gen_counter"]].set(jnp.mean(fitness))
)
log["log_gen_std"] = (
log["log_gen_std"].at[log["gen_counter"]].set(jnp.std(fitness))
)
log["gen_counter"] += 1
return log
def save(self, log: chex.ArrayTree, filename: str):
"""Save different parts of logger in .pkl file."""
with open(filename, "wb") as handle:
pickle.dump(log, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filename: str):
"""Reload the pickle logger and return dictionary."""
with open(filename, "rb") as handle:
es_logger = pickle.load(handle)
return es_logger
def plot(
self,
log,
title,
ylims=None,
fig=None,
ax=None,
no_legend=False,
):
"""Plot fitness trajectory from evo logger over generations."""
import matplotlib.pyplot as plt
if fig is None or ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
int_range = jnp.arange(1, log["gen_counter"] + 1)
ax.plot(
int_range, log["log_top_1"][: log["gen_counter"]], label="Top 1"
)
ax.plot(
int_range,
log["log_top_mean"][: log["gen_counter"]],
label=f"Top-{self.top_k} Mean",
)
ax.plot(
int_range, log["log_gen_1"][: log["gen_counter"]], label="Gen. 1"
)
ax.plot(
int_range,
log["log_gen_mean"][: log["gen_counter"]],
label="Gen. Mean",
)
if ylims is not None:
ax.set_ylim(ylims)
if not no_legend:
ax.legend()
if title is not None:
ax.set_title(title)
ax.set_xlabel("Number of Generations")
ax.set_ylabel("Fitness Score")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
return fig, ax
|
test/programytest/clients/restful/flask/viber/test_client.py | cdoebler1/AIML2 | 345 | 12746803 | <filename>test/programytest/clients/restful/flask/viber/test_client.py
import logging
import unittest.mock
from viberbot import Api
from viberbot.api.user_profile import UserProfile
from viberbot.api.viber_requests import ViberConversationStartedRequest
from viberbot.api.viber_requests import ViberFailedRequest
from viberbot.api.viber_requests import ViberMessageRequest
from viberbot.api.viber_requests import ViberSubscribedRequest
from viberbot.api.viber_requests import ViberUnsubscribedRequest
from programy.clients.restful.flask.viber.client import ViberBotClient
from programy.clients.restful.flask.viber.config import ViberConfiguration
from programy.clients.render.text import TextRenderer
from programytest.clients.arguments import MockArgumentParser
class MockViberApi(Api):
def __init__(self, configuration, request=None, verified=True):
self._logger = logging.getLogger()
self._messages = []
self._request = request
self._verified = verified
def set_webhook(self, url, webhook_events=None, is_inline=False):
pass
def send_messages(self, to, messages, chat_id=None):
self._messages = messages
def verify_signature(self, request_data, signature):
return self._verified
def parse_request(self, request_data):
if self._request is None:
super(MockViberApi, self).parse_request(request_data)
return self._request
class MockViberBotClient(ViberBotClient):
def __init__(self, argument_parser=None, viber_client=None):
self.test_viber_client = viber_client
self.test_question = None
ViberBotClient.__init__(self, argument_parser)
def parse_configuration(self):
self.configuration.client_configuration._name = "ViberBot"
self.configuration.client_configuration._avatar = "viber.svg"
self.configuration.client_configuration._webhook = "http://localhost/webhook"
def set_question(self, question):
self.test_question = question
def get_license_keys(self):
self._viber_token = "VIBER_TOKEN"
def ask_question(self, sessionid, question):
if self.test_question is not None:
return self.test_question
return super(MockViberBotClient, self).ask_question(sessionid, question)
def create_viber_api(self, configuration):
return MockViberApi(configuration)
def create_viber_bot(self, viber_token):
if self.test_viber_client is not None:
return self.test_viber_client
return super(MockViberBotClient,self).create_viber_bot(viber_token)
class ViberBotClientTests(unittest.TestCase):
def test_viber_client_init(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments)
self.assertIsNotNone(client)
self.assertEqual("VIBER_TOKEN", client._viber_token)
self.assertIsNotNone(client._viber_bot)
self.assertIsInstance(client.get_client_configuration(), ViberConfiguration)
self.assertIsInstance(client._viber_bot, Api)
self.assertFalse(client._render_callback())
self.assertIsInstance(client.renderer, TextRenderer)
def test_create_viber_bot_no_token(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments)
self.assertIsNotNone(client)
bot = client.create_viber_bot(None)
self.assertIsNone(bot)
def test_create_viber_bot_no_name(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments)
self.assertIsNotNone(client)
client.configuration.client_configuration._name = None
bot = client.create_viber_bot("TOKEN")
self.assertIsNone(bot)
def test_create_viber_bot_no_avatar(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments)
self.assertIsNotNone(client)
client.configuration.client_configuration._avatar = None
bot = client.create_viber_bot("TOKEN")
self.assertIsNone(bot)
def test_create_viber_bot_no_webhook(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments)
self.assertIsNotNone(client)
client.configuration.client_configuration._webhook = None
bot = client.create_viber_bot("TOKEN")
self.assertIsNone(bot)
def test_handle_message_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = ViberMessageRequest()
request._message = "Hello"
request._sender = UserProfile(user_id="User123")
client.test_question = "Hi there"
client.handle_message_request(request)
self.assertIsNotNone(client.test_viber_client)
self.assertIsNotNone(client.test_viber_client._messages)
self.assertEqual(1, len(client.test_viber_client._messages))
self.assertEqual("Hi there", client.test_viber_client._messages[0].text)
def test_handle_subscribed_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = ViberSubscribedRequest ()
request._user = UserProfile(user_id="User123")
client.handle_subscribed_request(request)
self.assertIsNotNone(client.test_viber_client)
self.assertIsNotNone(client.test_viber_client._messages)
self.assertEqual(1, len(client.test_viber_client._messages))
self.assertEqual("Thanks for subscribing!", client.test_viber_client._messages[0].text)
def test_handle_unsubscribed_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = ViberUnsubscribedRequest()
request._user_id = "User123"
client.handle_unsubscribed_request(request)
def test_handle_conversation_started_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = ViberConversationStartedRequest()
request._user = UserProfile(user_id="User123")
client.handle_conversation_started_request(request)
def test_handle_failed_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = ViberFailedRequest()
request._user_id = "User123"
request._desc = "Whoops, I know nothing!"
client.handle_failed_request(request)
def test_handle_unknown_request(self):
arguments = MockArgumentParser()
client = MockViberBotClient(arguments, viber_client=MockViberApi(None))
self.assertIsNotNone(client)
request = unittest.mock.Mock()
client.handle_unknown_request(request)
def test_receive_message(self):
arguments = MockArgumentParser()
viber_api = MockViberApi(None)
client = MockViberBotClient(arguments, viber_client=viber_api)
self.assertIsNotNone(client)
request = unittest.mock.Mock()
request.get_data.return_value = "{}"
request.headers = {"X-Viber-Content-Signature": "SIGNATURE"}
return_request = ViberMessageRequest()
return_request._message = "Hello"
return_request._sender = UserProfile(user_id="User123")
viber_api._request = return_request
client.receive_message(request) |
scenic/projects/baselines/bert/bert_base_model.py | techthiyanes/scenic | 688 | 12746804 | <filename>scenic/projects/baselines/bert/bert_base_model.py
"""Base class for models working with bert."""
from typing import Callable, Dict, Optional, Tuple, Union
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
from scenic.model_lib.base_models import base_model
from scenic.model_lib.base_models import model_utils
# Aliases for custom types:
Batch = Dict[str, jnp.ndarray]
MetricFn = Callable[[Dict[str, jnp.ndarray], Batch], Dict[str, Tuple[float,
int]]]
LossFn = Callable[[Dict[str, jnp.ndarray], Batch, Optional[jnp.ndarray]], float]
def num_examples(
logits: jnp.ndarray,
weights: Optional[jnp.ndarray] = None) -> Union[jnp.ndarray, int]:
if weights is None:
return logits.shape[0]
return weights.sum()
def sparse_weighted_unnormalized_softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
mlm_weights: jnp.ndarray,
batch_mask_weights: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes sparse weighted softmax cross entropy give logits and targets.
Args:
logits: Logits of shape [batch_size, length, vocab_size].
labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length].
mlm_weights: Weights of shape [batch_size, length], indicating masked tokens
in masked language modeling task.
batch_mask_weights: None or array of shape [batch,] indicating masked
examples.
Returns:
Per example Loss value.
"""
batch_size, length, vocab_size = logits.shape
logits = jax.nn.log_softmax(logits)
logits, mlm_weights = logits.ravel(), mlm_weights.ravel()
offsets = (np.arange(batch_size * length) * vocab_size).reshape((-1, length))
labels = (labels + offsets).ravel()
loss = -jnp.take(logits, labels, axis=0)
loss = loss * mlm_weights
loss = loss.sum(axis=-1) / (mlm_weights.sum(axis=-1) + 1e-8)
if batch_mask_weights is not None:
loss = model_utils.apply_weights(loss, batch_mask_weights)
return loss
def sparse_weighted_softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
mlm_weights: jnp.ndarray,
batch_mask_weights: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Same as weighted_unnormalized, but additionally takes a mean.
Args:
logits: Logits of shape [batch_size, length, vocab_size].
labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length].
mlm_weights: Weights of shape [batch_size, length], indicating masked tokens
in masked language modeling task.
batch_mask_weights: None or array of shape [batch,] indicating masked
examples.
Returns:
The mean cross entropy of the examples in the given batch as a scalar.
"""
if batch_mask_weights is not None:
normalization = batch_mask_weights.sum()
else:
normalization = mlm_weights.shape[0] # Batch size.
sparse_unnormalized_softmax_ce = sparse_weighted_unnormalized_softmax_cross_entropy(
logits, labels, mlm_weights, batch_mask_weights)
return jnp.sum(sparse_unnormalized_softmax_ce) / (normalization + 1e-8)
def sparse_weighted_per_example_accuracy(
logits: jnp.ndarray,
labels: jnp.ndarray,
mlm_weights: jnp.ndarray,
batch_mask_weights: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes weighted number of correctly classified over the given batch.
This computes the weighted number of correctly classified masked tokens in a
single, potentially padded minibatch. If the minibatch/inputs is padded (i.e.,
it contains null examples/pad pixels) it is assumed that batch_mask_weights
is a binary mask where 0 indicates that the example/pixel is null/padded.
We assume the trainer will aggregate and divide by number of samples.
Args:
logits: Logits of shape [batch_size, length, vocab_size].
labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length].
mlm_weights: Weights of shape [batch_size, length], indicating masked tokens
in masked language modeling task.
batch_mask_weights: None or array of shape [batch,] indicating masked
examples.
Returns:
Per example accuracy of predicted masked tokens.
"""
preds = jnp.argmax(logits, axis=-1)
correct = jnp.equal(preds, labels)
correct = correct * mlm_weights
# Shape of per example acccuracy will be (batch_size,).
per_ex_accuracy = correct.sum(axis=-1) / (mlm_weights.sum(axis=-1) + 1e-8)
if batch_mask_weights is not None:
per_ex_accuracy = model_utils.apply_weights(per_ex_accuracy,
batch_mask_weights)
return per_ex_accuracy
def bert_metrics_function(outputs: Dict[str, jnp.ndarray],
batch: Batch) -> Dict[str, Tuple[float, int]]:
"""Calcualte metrics for the BERT task.
Args:
outputs: Output of model that has masked LM logits of shape [batch, length,
vocab_size], and next sentence prediction logits of shape [batch, 2].
batch: Batch of data that has 'masked_lm_ids', 'masked_lm_weights' and
'next_sentence_labels'.
Returns:
A dict of metrics, in which keys are metrics name and values are tuples of
(metric, normalizer).
"""
mlm_logits = outputs['mlm_logits']
nsp_logits = outputs['nsp_logits']
next_sentence_labels = common_utils.onehot(batch['next_sentence_labels'], 2)
batch_weights = batch.get('batch_mask') # batch_mask might not be defined
per_ex_nsp_loss = model_utils.weighted_unnormalized_softmax_cross_entropy(
nsp_logits, next_sentence_labels, batch_weights)
per_ex_nsp_accuracy = model_utils.weighted_correctly_classified(
nsp_logits, next_sentence_labels, batch_weights)
per_ex_mlm_loss = sparse_weighted_unnormalized_softmax_cross_entropy(
mlm_logits, batch['masked_lm_ids'], batch['masked_lm_weights'],
batch_weights)
per_ex_mlm_accuracy = sparse_weighted_per_example_accuracy(
mlm_logits, batch['masked_lm_ids'], batch['masked_lm_weights'],
batch_weights)
# This psum is required to correctly evaluate with multihost. Only host 0
# will report the metrics, so we must aggregate across all hosts. The psum
# will map an array of shape [n_global_devices, batch_size] -> [batch_size]
# by summing across the devices dimension. The outer sum then sums across the
# batch dim. The result is then we have summed across all samples in the
# sharded batch.
evaluated_metrics = {}
normalizer = num_examples(mlm_logits, batch_weights)
for name, value in zip(
['nsp_loss', 'nsp_accuracy', 'mlm_loss', 'mlm_accuracy', 'loss'], [
per_ex_nsp_loss, per_ex_nsp_accuracy, per_ex_mlm_loss,
per_ex_mlm_accuracy, per_ex_nsp_loss + per_ex_mlm_loss
]):
evaluated_metrics[name] = model_utils.psum_metric_normalizer(
(value, normalizer))
return evaluated_metrics
def compute_bert_loss(mlm_logits: jnp.ndarray, nsp_logits: jnp.ndarray,
batch: Batch) -> float:
"""Computes BERT loss.
Args:
mlm_logits: Masked LM logits of shape [batch, length, vocab_size].
nsp_logits: Next sentence prediction logits of shape [batch, 2].
batch: Batch of data that has 'masked_lm_ids', 'masked_lm_weights' and
'next_sentence_labels'.
Returns:
Loss value.
"""
next_sentence_labels = common_utils.onehot(batch['next_sentence_labels'], 2)
batch_weights = batch.get('batch_mask') # batch_mask might not be defined
nsp_loss = model_utils.weighted_softmax_cross_entropy(nsp_logits,
next_sentence_labels,
batch_weights)
mlm_loss = sparse_weighted_softmax_cross_entropy(mlm_logits,
batch['masked_lm_ids'],
batch['masked_lm_weights'],
batch_weights)
return nsp_loss + mlm_loss
class BERTBaseModel(base_model.BaseModel):
"""Defines BERT base models.
A model is class with three members: get_metrics_fn, loss_fn, and a
flax_model.
get_metrics_fn returns a callable function, metric_fn, that calculates the
metrics and returns a dictionary. The metric function computes f(x_i, y_i) on
a minibatch, it has API:
```metric_fn(logits, label, weights).```
The trainer will then aggregate and compute the mean across all samples
evaluated.
loss_fn is a function of API
loss = loss_fn(logits, batch, model_params=None).
This model class defines a softmax_cross_entropy_loss with weight decay,
where the weight decay factor is determined by config.l2_decay_factor.
flax_model is returned from the build_flax_model function. A typical
usage pattern will be:
```
model_cls = bert_model.BERTModel
model = model_cls(config, dataset.meta_data)
flax_model = model.build_flax_model
dummy_input = {name: jnp.zeros(input_shape, model_input_dtype), ...}
model_state, params = flax_model.init(
rng, dummy_input, train=False).pop('params')
```
And this is how to call the model:s
```
variables = {'params': params, **model_state}
output, new_model_state = flax_model.apply(variables, inputs, ...)
```
"""
def get_metrics_fn(self, split: Optional[str] = None) -> MetricFn:
"""Returns a callable metric function for the model.
Args:
split: The split for which we calculate the metrics. It should be one of
the ['train', 'validation', 'test'].
Returns: A metric function with the following API: ```metrics_fn(outputs,
batch)```
"""
del split # For all splits, we return the same metric functions.
return bert_metrics_function
def loss_function(self,
outputs: Dict[str, jnp.ndarray],
batch: Batch,
model_params: Optional[jnp.ndarray] = None) -> float:
"""Returns softmax cross entropy loss with an L2 penalty on the weights.
Args:
outputs: a dictionary containing either 'logits' key of shape [batch,
length, num_classes] or 'nsp_logits' of shape [batch, 2] and
'mlm_logits' of shape [batch, length, vocab_size] (for 'BERT' task).
batch: Batch of data that has 'label' and optionally 'batch_mask'.
model_params: Parameters of the model, for optionally applying
regularization.
Returns:
Total loss.
"""
total_loss = compute_bert_loss(outputs['mlm_logits'], outputs['nsp_logits'],
batch)
if self.config.get('l2_decay_factor'):
l2_loss = model_utils.l2_regularization(model_params)
total_loss += 0.5 * self.config.l2_decay_factor * l2_loss
return total_loss
def build_flax_model(self):
raise NotImplementedError('Subclasses must implement build_flax_model().')
def default_flax_model_config(self):
"""Default config for the flax model that is built in `build_flax_model`.
This function in particular serves the testing functions and supposed to
provide config tha are passed to the flax_model when it's build in
`build_flax_model` function, e.g., `model_dtype_str`.
"""
raise NotImplementedError(
'Subclasses must implement default_flax_model_config().')
|
aliyun-python-sdk-cloudmarketing/aliyunsdkcloudmarketing/request/v20180910/DescribeFileRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12746811 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudmarketing', '2018-09-10', 'DescribeFile')
self.set_method('POST')
def get_FileName(self):
return self.get_query_params().get('FileName')
def set_FileName(self,FileName):
self.add_query_param('FileName',FileName)
def get_DataSchemaStatusLists(self):
return self.get_query_params().get('DataSchemaStatusLists')
def set_DataSchemaStatusLists(self, DataSchemaStatusLists):
for depth1 in range(len(DataSchemaStatusLists)):
if DataSchemaStatusLists[depth1] is not None:
self.add_query_param('DataSchemaStatusList.' + str(depth1 + 1) , DataSchemaStatusLists[depth1])
def get_PageNo(self):
return self.get_query_params().get('PageNo')
def set_PageNo(self,PageNo):
self.add_query_param('PageNo',PageNo)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_FileId(self):
return self.get_query_params().get('FileId')
def set_FileId(self,FileId):
self.add_query_param('FileId',FileId) |
cpmpy/cpmpy_hakank.py | hakank/hakank | 279 | 12746828 | """
This package includes my constraints/utilities/etc for cpmpy.
This cpmpy model was written by <NAME> (<EMAIL>)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
import sys, math, re
import itertools
import numpy as np
from functools import reduce
from cpmpy import *
from cpmpy.expressions.globalconstraints import GlobalConstraint
from cpmpy.solvers import *
from ortools.sat.python import cp_model as ort
from cpmpy.transformations.flatten_model import flatten_constraint, flatten_model
from cpmpy.transformations.get_variables import print_variables
def AllDifferent_except_0(args):
"""
Ensure that all arguments that are != 0 must have distinct values.
"""
# Note: The parenthesis around (var1 != 0) are needed!
return [ ((var1!= 0) & (var2 != 0)).implies(var1 != var2) for var1, var2 in all_pairs(args)]
def all_different_except_0(args):
"""
Alias for AllDifferent_except_0(args).
"""
return AllDifferent_except_0(args)
def to_num(a,n,base):
"""
to_num(a, n, base)
Ensure that the digits in array `a` corresponds to the number `n` in base `base`.
"""
tlen = len(a)
return n == sum([(base ** (tlen - i - 1)) * a[i] for i in range(tlen)])
def increasing(args):
"""
Ensure that the values in args are increasing.
"""
return [args[i-1] <= args[i] for i in range(1,len(args))]
def increasing_strict(args):
"""
Ensure that the values in args are strict increasing.
"""
return [args[i-1] < args[i] for i in range(1,len(args))]
def decreasing(args):
"""
Ensure that the values in args are decreasing.
"""
return [args[i-1] >= args[i] for i in range(1,len(args))]
def decreasing_strict(args):
"""
Ensure that the values in args are strict decreasing.
"""
return [args[i-1] >= args[i] for i in range(1,len(args))]
def all_pairs(args):
"""
Generate all pairs from the list of lists args.
(stolen from cmppy/globalconstraints.py)
"""
return list(itertools.combinations(args, 2))
def get_different_solution(m,x):
"""
Add the current solution (x) in the model to generate
other solutions.
Usage:
# ...
ss = CPM_ortools(model)
if ss.solve():
print(x.value())
get_different_solution(ss, x)
Note: The array in x must be a flattened array. If there are
many decision variables, use flatten_lists(a) to
flatten out the array. E.g.
# ...
ss = CPM_ortools(model)
while ss.solve():
print(x.value()) # an array
print(y.value()) # a variable
print(z.value()) # another variable
get_different_solution(ss,flatten_lists([x,[y,z]])
Note that this might be slow for larger models or models with
many solutions. If so, try to use
- ortools_wrapper()
or the simple solution printers such as
- ORT_simple_printer
- ORT_arrays_printer
- ORT_simple_printer_matrix
- ORT_simple_function_printer
or define a similiar solution printer.
"""
# n = len(x)
# m += [any([x[i].value() != x[i] for i in range(n)])]
m += [any([t.value() != t for t in x])]
def flatten_lists(a):
"""
Flatten a list of lists.
Note: a must be an array of arrays (list of lists).
See get_different_solution for examples.
"""
return [item for sublist in a for item in sublist]
class ORT_simple_printer(ort.CpSolverSolutionCallback):
"""
A simple printer callback for single array printing.
"""
def __init__(self, varmap, a, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1 # I always start at 1. :-)
# populate values before printing
# For array of arrays (Tias' original)
# for wm in self.vars:
# for cpm_var in wm:
# cpm_var._value = self.Value(self.varmap[cpm_var])
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}: {a.value()}")
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_arrays_printer(ort.CpSolverSolutionCallback):
"""
A simple printer callback for array of arrays.
"""
def __init__(self, varmap, a, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1 # I always start at 1. :-)
# populate values before printing
# For array of arrays (Tias' original)
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}: {a.value()}")
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_printer_matrix(ort.CpSolverSolutionCallback):
"""
A simple printer callback for printing a matrix.
"""
def __init__(self, varmap, a, rows,cols, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.rows = rows
self.cols = cols
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}:")
for i in range(self.rows):
for j in range(self.cols):
print("%3d" % a[i*self.cols+j].value(), end=" ")
print()
print()
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_function_printer(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array a, which should be structured by the user and
including .value() for the variables.
Note that the data array a must be a flattening array
to be used with this printer callback.
Example of a printer function:
def f(a):
print(a[0].value(),"+",a[1].value(),"=",a[2].value())
which will print a solution such as
2 + 3 = 5
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"\n#{self.solcount}:")
self.cb_fun(a)
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_solution_counter(ort.CpSolverSolutionCallback):
"""
This is a solution 'printer' that just count the solutions.
"""
def __init__(self, varmap, a):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
class ORT_function_printer_arrays(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array of arrays a, which should be structured by the user and
including .value() for the variables.
This version t prints solution number.
Example of a printer function:
def print_solution(a):
print('x:', a[0].value())
print('y:', a[1].value())
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"sol #{self.solcount}")
self.cb_fun(a)
print()
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_function_printer_arrays2(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array of arrays a, which should be structured by the user and
including .value() for the variables.
This version don't print solution number.
Example of a printer function:
def print_solution(a):
print('x:', a[0].value())
print('y:', a[1].value())
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
self.cb_fun(a)
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
def print_solution(a):
"""
print_solution(a)
Default callback method for printing the solution in a printer callback.
Note: a must be an array of arrays to be used with ortools_wrapper
(defined below).
"""
for x in a:
print(x.value())
def ortools_wrapper(model,var_array,print_solution=print_solution,num_sols=0):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the solutions of a model and tends
to be (significantly) faster than using
ss = CPM_ortools(model)
while ss.solve():
# ...
get_different_solution(ss,flatten_lists(var_array))
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
Note: For optimality problems, use ortools_wrapper_opt(.) instead.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays(ss.varmap,var_array,print_solution,num_sols)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ss._after_solve(ort_status)
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper2(model,var_array,print_solution=print_solution,num_sols=0):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the solutions of a model and tends
to be (significantly) faster than using
ss = CPM_ortools(model)
while ss.solve():
# ...
get_different_solution(ss,flatten_lists(var_array))
This version don't print the solution number.
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
Note: For optimality problems, use ortools_wrapper_opt(.) instead.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays2(ss.varmap,var_array,print_solution,num_sols)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
print()
ss._after_solve(ort_status) # post-process after solve() call...
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper_opt(model,var_array,print_solution=print_solution,num_sols=1,num_procs=1):
"""
ortools_wrapper_opt((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the _optimal_ solution of a model.
This tends to be (significantly) faster than using
if model.solve():
# ...
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays(ss.varmap,var_array,print_solution,1)
# Flags to experiment with
if num_procs > 1:
ss.ort_solver.parameters.num_search_workers = num_procs
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
# Note: This is the real difference between this method and ortool_wrapper.
# For optimal problems one cannot use SearchForAllSolutions. Instead
# one must use ss.ort_solver.Solve(,)
# ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ort_status = ss.ort_solver.Solve(ss.ort_model, cb)
ss._after_solve(ort_status) # post-process after solve() call...
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper_count_solutions(model,var_array):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for just counting the solutions of a model.
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
"""
ss = CPM_ortools(model)
cb = ORT_simple_solution_counter(ss.varmap,var_array)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ss._after_solve(ort_status)
return cb.solcount
def base_array(n):
"""
Returns an array of length `n` with base coefficients.
Example: `base_array(4)` returns the array [1000,100,10,1]
"""
return np.array([10**i for i in range(n-1,-1,-1)])
def scalar_product(a,b):
"""
`scalar_product(a,b)`
Returns the scalar product of the arrays `a` and `b`.
Assumption: `len(a) == len(b)`
"""
assert len(a) == len(a), f"len(a) == len(b)"
# return np.dot(a,b)
return sum(a*b)
def scalar_product1(a):
"""
`scalar_product1(a)`
Returns the scalar product of the array `a` and a base_array of appropriate length.
Assumption: `len(a) == len(b)`
"""
assert len(a) == len(a), f"len(a) == len(b)"
# return np.dot(a,base_array(len(a)))
return sum(a*base_array(len(a)))
def my_circuit(x):
"""
circuit(x)
Exsures that x is a circuit.
Note: This assumes that x is has the domain 0..len(x)-1,
i.e. 0-based.
"""
assert x[0].lb == 0, f"circuit: lb is {x[0].lb}, but must be 0"
n = len(x)
z = intvar(0, n-1,shape=n,name='z')
constraints = [
AllDifferent(x),
AllDifferent(z),
# put the orbit of x[0] in in z[1..n]
z[0] == x[0],
[ z[i] == x[z[i-1]] for i in range(1, n-1)],
# may not be 0 for i < n-1
[ z[i] != 0 for i in range(1, n-1)],
# when i = n-1 it must be 0
z[n-1] == 0
]
return constraints
def my_circuit_path(x,z):
"""
circuit(x,z)
Ensures that x is an circuit and z is the path.
Note: This assumes that x is has the domain 0..len(x)-1,
i.e. 0-based.
"""
assert x[0].lb == 0, f"circuit: x[0].lb is {x[0].lb}, but must be 0"
n = len(x)
constraints = [
AllDifferent(x),
AllDifferent(z),
# put the orbit of x[0] in in z[1..n]
z[0] == x[0],
[ z[i] == x[z[i-1]] for i in range(1, n-1)],
# may not be 0 for i < n-1
[ z[i] != 0 for i in range(1, n-1)],
# when i = n-1 it must be 0
z[n-1] == 0
]
return constraints
def count(a,val,c):
"""
count(a,val,c)
c is the number of occurrences of val in array a.
"""
return [c == sum([a[i] == val for i in range(len(a))])
]
def atmost(a,val,c):
"""
atmost(a,val,c)
Ensure that the number of occurrences of val in a is atmost c.
"""
return [sum([a[i] == val for i in range(len(a))]) <= c]
def atleast(a,val,c):
"""
atleast(a,val,c)
Ensure that the number of occurrences of val in a is atmost c.
"""
return [sum([a[i] == val for i in range(len(a))]) >= c]
def exactly(a,val,c):
"""
exactly(a,val,c)
Ensure that the number of occurrences of val in a is exactly c.
"""
return [sum([a[i] == val for i in range(len(a))]) == c]
def global_cardinality_count(a,gcc):
"""
global_cardinality_count(a,gcc)
Global cardinality count: Collect the number of occurrences of each value 0..a.ub
in gcc. The array gcc must be of length 0..ub.
"""
n = len(a)
ub = max([a[i].ub for i in range(n)])
constraints = []
for i in range(ub+1):
constraints += [count(a,i,gcc[i])]
return constraints
def inverse(x,y):
"""
inverse(x,y)
Ensures that:
x[i] == j #<=> y[j] == i
Note: inverse(x,y) is sometimes called assignment(x,y).
There is an alternative version: inverse(x) which can
be simulated by inverse(x,x)
"""
n = len(x)
assert n == len(y), "x and y must be of equal length"
constraints = []
for i in range(n):
for j in range(n):
constraints += [(x[i] == j) == (y[j] == i)]
return constraints
def my_cumulative(s, d, r, b):
"""
Decompositon of cumulative.
Inspired by the MiniZinc implementation.
The MiniZinc decomposition is discussed in the paper:
<NAME>, <NAME>, <NAME>, and <NAME>.
'Why cumulative decomposition is not as bad as it sounds.'
Parameters:
s: start_times assumption: array of varint
d: durations assumption: array of int
r: resources assumption: array of int
b: resource limit assumption: varint or int
"""
constraints = []
max_d = max(d)
tasks = [i for i in range(len(s)) if r[i] > 0 and d[i] > 0]
times_min = min([s[i].lb for i in tasks])
times_max = max([s[i].ub + max_d for i in tasks])
for t in range(times_min, times_max + 1):
constraints += [ b >= sum([((s[i] <= t) & (t < s[i] + d[i])) * r[i] for i in tasks])]
# Somewhat experimental:
# This constraint is needed to contrain the upper limit of b.
if not isinstance(b, int):
constraints += [b <= sum(r)]
return constraints
def member_of(x, val):
"""
member_of(x, val)
Ensures that the value `val` is in the array `x`.
"""
n = len(x)
# cc = intvar(0,n)
# constraints = [count(x, val, cc), cc > 0]
constraints = [sum([x[i] == val for i in range(n)]) > 0]
return constraints
def regular(x, Q, S, d, q0, F):
"""
Global constraint regular
This is a translation of MiniZinc's regular constraint (defined in
lib/zinc/globals.mzn), via the Comet code refered above.
All comments are from the MiniZinc code.
'''
The sequence of values in array 'x' (which must all be in the range 1..S)
is accepted by the DFA of 'Q' states with input 1..S and transition
function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
(which must be in 1..Q) and accepting states 'F' (which all must be in
1..Q). We reserve state 0 to be an always failing state.
'''
x : IntVar array
Q : number of states
S : input_max
d : transition matrix
q0: initial state
F : accepting states
Note: As mentioned above the states must start at 1 since 0 is
represents a failed state.
Note: Compare with regular_table which use the Table constraints
instead of Element constraint in the main loop.
"""
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, len(x)))
m = 0
n = len(x)
a = [intvar(0, Q + 1) for i in range(m, n + 1)]
constraints = []
# Check that the final state is in F
constraints += [member_of(F,a[-1])]
# First state is q0
constraints += [a[m] == q0]
for i in x_range:
constraints += [x[i] >= 1]
constraints += [x[i] <= S]
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
constraints += [
a[i + 1] == Element(d2_flatten,(a[i]) * S + (x[i] - 1))
]
return constraints
def regular_table(x, Q, S, d, q0, F):
"""
Global constraint regular_table
This is a translation of MiniZinc's regular constraint (defined in
lib/zinc/globals.mzn), via the Comet code refered above.
All comments are from the MiniZinc code.
'''
The sequence of values in array 'x' (which must all be in the range 1..S)
is accepted by the DFA of 'Q' states with input 1..S and transition
function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
(which must be in 1..Q) and accepting states 'F' (which all must be in
1..Q). We reserve state 0 to be an always failing state.
'''
x : IntVar array
Q : number of states
S : input_max
d : transition matrix
q0: initial state
F : accepting states
Note: As mentioned above the states must start at 1 since 0 is
represents a failed state.
The difference between this version (regular_table) and
regular is that this version use Table constraint instead
of Element constraint.
"""
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
# This is different from regular(.)
row.append((0,j,0))
else:
# This is different from regular(.)
row.append((i,j, d[i - 1][j]))
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, len(x)))
m = 0
n = len(x)
a = [intvar(0, Q + 1) for i in range(m, n + 1)]
constraints = []
# Check that the final state is in F
constraints += [member_of(F,a[-1])]
# First state is q0
constraints += [a[m] == q0]
x_lb, x_ub = get_min_max_domain(x)
for i in x_range:
constraints += [x[i] >= 1]
constraints += [x[i] <= S]
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
xi1 = intvar(0,x_ub)
constraints += [
# These two constraints are different
# from regular(.)
xi1 == x[i]-1,
Table((a[i], xi1, a[i + 1]), d2_flatten)
]
return constraints
def lex_less(x,y):
"""
lex_less(x,y)
Ensures that the array 'x' is strictly lexicographically less than array 'y'.
Compares them from first to last element, regardless of indices
This is a port of MiniZinc's definition lex_less_int
https://github.com/MiniZinc/libminizinc/blob/master/share/minizinc/std/fzn_lex_less_int.mzn
Note that we simplify the calculation of lx and ly since cpmpy has start index 0 (in MiniZinc
the start index can be user defined).
"""
xlen = len(x)
ylen = len(y)
ux = xlen
uy = ylen
size = min([ux,uy])
# Do not name variables in global constraints
# since then the variables are not unique.
# b = boolvar(shape=size+1,name="b")
b = boolvar(shape=size+1)
constraints = []
constraints += [b[0] == 1 ]
for i in range(size):
constraints += [b[i] == ((x[i] <= y[i]) &
((x[i] < y[i]) | (b[i+1] == 1)) )]
constraints += [b[size] == (ux < uy)]
return constraints
def lex_greater(x,y):
"""
lex_greater(x,y)
Ensures that the array 'x' is strictly lexicographically greater than array 'y'.
Compares them from first to last element, regardless of indices.
This constraint is defined by lex_less(y,x) defined above .
"""
return lex_less(y,x)
def lex2(x):
"""
lex2(x)
Ensures that the rows and columns in the matrix `x` are increasing,
using lex_less.
"""
x_t = x.transpose()
return [[lex_less(x[i],x[i+1]) for i in range(len(x)-1)],
[lex_less(x_t[i],x_t[i+1]) for i in range(len(x_t)-1)]]
#
# Somewhat general definition of knapsack.
#
def knapsack(values, weights, n):
"""
knapsack(values, weights, n)
Creates a model for the knapsack problem with the values, weights and limit n.
See knapsack.py for usage of this.
"""
z = intvar(0, 10000,name="z")
x = intvar(0,1,shape=len(values),name="x")
model = Model(
[
z >= 0,
z == sum(x*values),
sum(x*weights) <= n,
],
maximize=z
)
return [model, x, z]
def my_abs(x,y,d):
"""
A decomposition of abs() for experimentation.
"""
constraints = []
b = boolvar()
constraints += [b == (x >= y)]
constraints += [(b).implies(d == x - y)]
constraints += [(~b).implies(d == y - x)]
return constraints
def my_abs2(x,y):
"""
A decomposition of abs() for experimentation.
"""
constraints = []
b = boolvar()
d = intvar(0,1000000)
constraints += [b == (x >= y)]
constraints += [(b).implies(d == x - y)]
constraints += [(~b).implies(d == y - x)]
return d
def prod(x,res):
"""
prod(x,res)
res is the product of the values in x.
"""
return [reduce(lambda a, b: a * b, x) == res]
def prod1(x):
"""
prod1(x)
return the product of the values in x.
"""
return reduce(lambda a, b: a * b, x)
def among(m,x,v):
"""
among(m,x,v)
Requires exactly m variables in x to take one of the values in v.
"""
return [m == sum([x[i] == j for i in range(len(x)) for j in v])]
#
# Symmetry breaking
#
# From
# http://en.wikipedia.org/wiki/Fr#C3#A9nicle_standard_form
# """
# A magic square is in Frénicle standard form, named for
# <NAME>, if the following two conditions apply:
# - the element at position [1,1] (top left corner) is the smallest
# of the four corner elements; and
# - the element at position [1,2] (top edge, second from left) is
# smaller than the element in [2,1].
# """
#
def frenicle(x,n):
constraints = [x[(0,0)] == min([x[0,0], x[0,n-1], x[n-1,0], x[n-1,n-1]])]
constraints += [x[0,1] < x[1,0]]
return constraints
def distribute(card, value, base):
"""
distribute(card, value, base)
Requires that 'card[i]' is the number of occurences of 'value[i]' in 'base'.
Note: card, value, and base are assumed to be intvar arrays.
"""
card_len = len(card)
value_len = len(value)
assert card_len == value_len, "`card` and `value` must have the same length"
base_len = len(base)
constraints = []
constraints += [AllDifferent(value)]
for i in range(card_len):
constraints += [
card[i] == sum([value[i] == base[j] for j in range(base_len)])
]
return constraints
def fill_array(x,x_val):
"""
fill_array(x,x_val)
If x_val[i] != None then x[i] == x_val[i].
"""
constraints = []
for i in range(len(x)):
if x_val[i] != None:
constraints += [x[i] == x_val[i]]
return constraints
def all_different_pairs(a, s):
"""
all_different_pairs(a, s)
all pairs must be different
"""
return [AllDifferent([p for p in pairs(a,s)])]
def increasing_pairs(a, s):
"""
increasing_pairs(a, s)
Ensure that the pairs are in increasing order.
"""
return [increasing(pairs(a,s))]
def decreasing_pairs(a, s):
"""
decreasing_pairs(a, s)
Ensure that the pairs are in decreasing order.
"""
return [decreasing(pairs(a,s))]
def pairs(a, s):
"""
return the pairs of a in the 'integer representation': a[k,0]*(n-1) + a[k,1]
s is the size of max value of n
"""
n = len(a)
return [ a[(k,0)]*(s-1) + a[(k,1)] for k in range(n)]
def all_min_dist(min_dist, x, n):
"""
all_min_dist(min_dist, x, n)
Ensures that the differences of all pairs (i !=j) are >= min_dist.
"""
constraints = []
for i in range(n):
for j in range(i):
constraints += [abs(x[i]-x[j]) >= min_dist] # Nope!
return constraints
def all_different_on_intersection(x, y):
"""
all_different_on_intersection(x, y)
Ensure that the values that are common in x and y are distinct (in each array).
"""
return [count_a_in_b(x,y), count_a_in_b(y,x)]
def count_a_in_b(ass,bss):
"""
count_a_in_b(ass,bss)
helper for all_different_on_intersection
"""
constraints = []
for a in ass:
constraints += [sum([a == b for b in bss]) <= 1]
return constraints
def all_different_modulo(x, m):
"""
all_different_modulo(x, m)
Ensure that all elements in x (modulo m) are distinct
"""
print("x2:",x)
n = len(x)
constraints = []
mods = intvar(0,m-1,shape=n)
for i in range(n):
constraints += [mods[i] == x[i] % m]
constraints += [AllDifferent(mods)]
return constraints
def all_different_cst(xs, cst):
"""
all_different_cst(xs, cst)
Ensure that all elements in xs + cst are distinct
"""
return [AllDifferent([(x + c) for (x,c) in zip(xs,cst)])]
def arith(x, relop, val):
"""
arith(x, relop, val)
Ensure that all elements in x are <relop> val.
"""
constraints = []
for i in range(len(x)):
constraints += [arith_relop(x[i],relop, val)]
return constraints
def arith_relop(a, t, b):
"""
arith_relop(a, t, b)
This is (arguably) a hack.
Represents each function as an integer 0..5.
"""
return [(t == 0).implies(a < b),
(t == 1).implies(a <= b),
(t == 2).implies(a == b),
(t == 3).implies(a >= b),
(t == 4).implies(a > b),
(t == 5).implies(a != b)
]
#
# diffn ported from MiniZinc's fzn_diffn:
#
def diffn(x,y,dx,dy):
"""
diffn(x,y,dx,dy)
Constrains rectangles i, given by their origins x[i], y[i])
and sizes (dx[i], dy[i]), to be non-overlapping. Zero-width
rectangles can still not overlap with any other rectangle.
"""
n = len(x)
constraints = []
for i in range(n):
for j in range(i+1,n):
constraints += [(x[i] + dx[i] <= x[j]) |
(y[i] + dy[i] <= y[j]) |
(x[j] + dx[j] <= x[i]) |
(y[j] + dy[j] <= y[i])
]
return constraints
def nvalue(m, x):
"""
nvalue(m, x)
Requires that there is exactly m distinct values in x
(min_val and max_val are the minimum and maximum value
in x, respectively)
"""
n = len(x)
min_val = min([x[i].lb for i in range(n)])
max_val = max([x[i].ub for i in range(n)])
return (m == sum([ sum([ x[j] == i for j in range(n)]) > 0 for i in range(min_val, max_val+1)]))
#
# nvalues(x,op,n)
#
# Requires that the number of distinct values in the array x is
# op n
# where
# op is either one of
# =, <m, =<, >=, >
#
def nvalues(x, op, n):
xlen = len(x)
m = intvar(1,xlen)
return [nvalue(m,x),
arith_relop(m,op,n)
]
def clique(g, clique, card):
"""
clique(g, clique, card)
Ensure that the boolean array 'clique' (of Integer Array type)
represents a clique in the graph g with the cardinality card.
Note: This is kind of backward, but it is the whole thing:
If there is a connection between nodes I and J (I != J) then
there should be a node from I to J in G. If it's not then
both c1 and c2 is not in the clique.
"""
n = len(g)
constraints = []
constraints += [card == sum([clique[i] for i in range(n)])]
for (c1,i) in zip(clique, range(n)):
for (c2,j) in zip(clique, range(n)):
if i != j and g[i][j] == 0:
constraints += [(c1 == 0) | (c2 == 0)]
return constraints
def assignment_model(cost, tasks=None,people=None,print_solution=None,opt="min"):
"""
assignment_model(cost, rows, cols, tasks=None,people=None,print_solution=None,opt='min'):
Fairly general implementation of the assignment problem:
Minimize total cost of assign all task to one person given
the cost of assigning a person to the tasks.
For problems were 'task' and 'people' does not applies, a used-defined
method 'print_solution' can be used.
For maximization problems, use opt='max'.
"""
rows = len(cost)
cols = len(cost[0])
max_cost = np.sum(np.array(cost))
total_cost = intvar(0,max_cost,name='cost')
x = boolvar(shape=(rows,cols),name="x")
model = Model(
total_cost >= 0,
total_cost == np.sum([ x_row*cost_row for (x_row, cost_row) in zip(x, cost)]),
# exacly one assignment per row, all rows (tasks) must be assigned.
[sum(row) == 1 for row in x],
# zero or one assignments per column (people)
[sum(col) <= 1 for col in x.transpose()],
)
if opt == "max":
model.maximize(total_cost)
else:
model.minimize(total_cost)
ss = CPM_ortools(model)
if ss.solve():
print("total_cost: ", total_cost.value())
print("x:")
print(x.value())
print()
if tasks == None and people == None:
for i in range(rows):
print("Task", i, end="")
for j in range(cols):
if x[i][j].value() == 1:
print(" is done by ", j)
print()
else:
if print_solution != None:
print_solution(x.value(),tasks,people)
else:
for i in range(rows):
print("Task", tasks[i], end="")
for j in range(cols):
if x[i][j].value() == 1:
print(" is done by", people[j])
print()
def latin_square(x):
"""
latin_square(x)
The matrix x is a Latin square.
"""
return [[AllDifferent(row) for row in x],
[AllDifferent(col) for col in x.transpose()]]
#
# reverses an array from -> to
#
def reverse(xfrom, xto):
"""
reverse(xfrom, xto)
xto is reverse of xfrom.
"""
n = len(xfrom)
return [xto[i] == xfrom[n-i-1] for i in range(n)]
def print_model_and_variables(model):
"""
print_model_and_variables(model)
Prints the following:
- the unflattened model (via print(model))
- the flattened model
- the variables and the domains in the flattened model
(From <NAME> when he debugged one of my models. Thanks, Tias!)
"""
print("Model:")
print(model)
print("\nFlattened model and variables:")
mf = flatten_model(model)
print_variables(mf)
print(mf)
print()
def argmax(x,p):
"""
argmax(x,p)
Ensure that p is the argmax, i.e. the position of the maximum value
in x.
Note: If there are many maximum values then argmax(x,p) will find
all these values.
"""
n = len(x)
constraints = []
for i in range(n):
constraints += [(p != i).implies(x[p] > x[i]) ]
return constraints
def argmin(x,p):
"""
argmin(x,p)
Ensure that p is the argmin, i.e. the position of the minimum value
in x.
Note: If there are many minimum values then argmin(x,p) will find
all these values.
"""
n = len(x)
constraints = []
for i in range(n):
constraints += [(p != i).implies(x[p] < x[i]) ]
return constraints
def argmin_except_c(x,p,c):
"""
argmin_except_c(x,p,c)
Ensure that p is the argmin, i.e. the position of the minimum value
in x, but ignores any value of c.
Note:
- If there are many minimum values then argmin_except_c(x,p,c) will find
all these values.
- We assume that there are at least one value != c.
"""
n = len(x)
constraints = [x[p] != c]
for i in range(n):
constraints += [(p != i).implies((x[i] == c) | (x[p] < x[i])) ]
return constraints
def argmin_except_0(x,p):
"""
argmin_except_0(x,p)
Ensure that p is the argmin, i.e. the position of the minimum value
in x, but ignores any value of 0.
Note:
- If there are many minimum values then argmin_except_0(x,p) will find
all these values.
- We assume that there are at least one value > 0.
"""
return argmin_except_c(x,p,0)
def argmax_except_c(x,p,c):
"""
argmax_except_c(x,p,c)
Ensure that p is the argmax, i.e. the position of the minimum value
in x, but ignores any value of c.
Note:
- If there are many maximum values then argmax_except_c(x,p,c) will find
all these values.
- We assume that there are at least one value != c.
"""
n = len(x)
constraints = [x[p] != c]
for i in range(n):
constraints += [(p != i).implies((x[i] == c) | (x[p] > x[i])) ]
return constraints
def permutation3(x,p,y):
"""
permutation(x,p,y)
Ensure that the array y is a permutation of array x with the permutation
operations in array p.
Example:
x = [2,0,1,3]
p = [2,1,3,0]
What is y?
y[0] = x[p[0]] = x[2] = 1
y[1] = x[p[1]] = x[1] = 0
y[2] = x[p[2]] = x[3] = 3
y[3] = x[p[3]] = x[0] = 2
Thus:
y = [1,0,3,2]
Assumptions:
- We assume that x, p, and y has distinct values, i.e. constrained by
AllDifferent.
We check that:
- p has the domain of 0..len(p)-1
"""
n = len(x)
assert n == len(p) and n == len(y), f"Length of x, p, and y must be the same"
p_lb, p_ub = get_min_max_domain(p)
assert p_lb == 0 and p_ub == n-1, "Domain value of p must be 0..n-1"
constraints = []
for i in range(n):
constraints += [y[i] == x[p[i]] ]
return constraints
def permutation(x,y):
"""
permutation(x,y)
Ensure that the array y is a permutation of array x,
connected with some unknown permutation.
permutation3(x,p,y) is used (which see).
"""
n = len(x)
p = intvar(0,n-1,shape=n)
return permutation3(x,p,y)
def get_min_max_domain(x):
"""
get_min_max_domain(x)
Return the minimum and maximum domain of an array x.
"""
n = len(x)
x_lb = min([x[i].lb for i in range(n)])
x_ub = max([x[i].ub for i in range(n)])
return [x_lb,x_ub]
def chain(op,x):
"""
chain(op,x)
Ensure that all elements pairwise satisfies the binary operator op.
Note: In order for this to work the operator must be from the
operator library, e.g. operator.lt, operator.ne, e.g:
chain(operator.lt,x)
Note: Many of the binary operator.* has a definition already, e.g.
(from cpmpy_hakank.py):
increasing, increasing_strict, decreasing, descreasing_strict
and
AllDifferent, AllEqual
"""
n = len(x)
constraints = []
for i in range(1,n):
constraints += [ op(x[i-1], x[i]) ]
return constraints
def minimum_except_c(x,min_val,c,allow_all_c=False):
"""
minimum_except_c(x,min_val,c,allow_all_c)
Ensures that min_val is the minimum value in array x, ignoring the value of c.
The flag allow_all_c:
- If True: allow an array with only c values: min_val is thus c.
- If False: assume that there is at least one non c value. min_val must be != c.
"""
n = len(x)
ix = intvar(0,n-1)
# Ensure that min_val is in x
constraints = [min_val == x[ix]]
for j in range(n):
constraints += [(min_val <= x[j]) | (x[j] == 0)]
if allow_all_c:
max_val = max(x) # To be able to handle the case when there is only 0s
constraints += [(max_val == c)==(min_val == c)]
else:
constraints += [min_val != c]
return constraints
def minimum_except_0(x,min_val,allow_all_0s=False):
"""
minimum_except_0(x,min_val,allow_all_0s)
Ensures that min_val is the minimum value in array x, ignoring 0s.
The flag allow_all_0s:
- If True: allow an array with only 0 values: min_val is thus 0.
- If False: assume that there is at least one non 0 value. min_val must be != 0.
"""
return minimum_except_c(x,min_val,0,False)
def value_precede(s,t,x):
"""
value_precede(s,t, x)
Ensures that the (first occurrence) of the value s precedes
the (first occurrence) of the value t in array x if both
s and t are in x.
This means that for t to occur in x then s has to precede t.
This definition is inspired by MiniZinc's definition
value_precede.mzn
"""
n = len(x)
bs = boolvar(shape=n+1)
constraints = []
for i in range(n):
xis = boolvar()
constraints += [(xis ==1)==(x[i] == s),
(xis ==1).implies(bs[i+1]==1),
(xis == 0).implies(bs[i]==bs[i+1]),
(bs[i] == 0).implies(x[i] != t)
]
constraints += [bs[0] == 0]
return constraints
def value_precede_chain(c,x):
"""
value_precede_chain(c, x)
Ensures that the value c[i-1] precedes the value c[i] is the array x
if both c[i-1] and c[i] are in x.
See value_precede().
"""
n=len(c)
constraints = []
for i in range(1,n):
constraints += [value_precede(c[i-1],c[i],x)]
return constraints
def sliding_sum(low, up, seq, x):
"""
sliding_sum(low, up, seq, x)
Ensure that all sequences of length seq in x sums to between low and up.
"""
vlen = len(x)
constraints = []
for i in range(vlen-seq+1):
s = intvar(low,up)
constraints += [s == sum([x[j] for j in range(i,i+seq)])]
return constraints
def no_overlap(s1, d1, s2, d2):
"""
no_overlap(s1, d1, s2, d2)
Ensures that task 1 (start time s1 with duration d1) does not overlap with
task2 (start time s2 with duration d2)
"""
return [(s1 + d1 <= s2) | (s2 + d2 <= s1)]
def is_prime(n):
"""
is_prime(n)
Returns True if the number n is a prime number, otherwise return False.
"""
if n < 2: return False
if n == 2: return True
if not n & 1:
return False
for i in range(3, 1+int(math.sqrt(n)), 2):
if n % i == 0:
return False
return True
def primes(limit):
"""
primes(limit)
Returns the prime numbers below limit.
"""
primes = [2]
i = 3
for i in range(3, limit, 2):
if is_prime(i):
primes.append(i)
return primes
def all_different_reif(x,b):
"""
all_different_reif(x,b)
b == 1 if all values in x are different, else 0.
"""
n = len(x)
m = intvar(1,n)
return [nvalue(m,x),
(m==n)==(b==1)
]
def all_different_reif_m(model,x):
"""
all_different_reif(x,b)
b == 1 if all values in x are different, else 0.
This version returns b.
Note that the model is a parameter so it must be
created first:
x = intvar(...)
b = boolvar()
model = Model(...)
model += [b == all_different_reif_m(model,x)]
"""
n = len(x)
m = intvar(1,n)
b = boolvar()
model += [nvalue(m,x),
(m==n)==(b==1)]
return b
def lex_chain_less(x):
"""
lex_chain_less(x)
Require that all the rows are lexicographically sorted
(but not the columns as in lex2).
See: http://www.emn.fr/z-info/sdemasse/gccat/Clex_chain_less.html
"""
n = len(x)
m = len(x[0])
constraints = []
for i in range(1,n):
constraints += [lex_less([x[i-1,j] for j in range(m)], [x[i,j] for j in range(m)])]
return constraints
def soft_alldifferent(x,p):
"""
soft_alldifferent(x,p)
p is the number of pairs that have the same value.
See http://www.emn.fr/z-info/sdemasse/gccat/Csoft_alldifferent_ctr.html
"""
n = len(x)
return [p == sum([x[i] == x[j] for i in range(n) for j in range(i+1,n)])]
def among_seq(low,high,seqlen,x,v):
"""
among_seq(low, high, seqlen, x, v)
Ensures that all sequences of length SeqLen in the list X
contains at least Low and atmost High occurrences of V.
"""
n = len(x)
size = n-seqlen+1
constraints = []
for i in range(size):
seq = [x[j] for j in range(i,i+seqlen)]
constraints += [among_range(low, high, seq, v)]
return constraints
def among_range(low, high,x,v):
"""
among_range(low, high, x, v)
Ensures that the list x contains at least low and atmost high
occurrences of v.
Used by among_seq.
"""
xs = intvar(0,len(x))
vlen = len(v)
return [
xs == sum([sum([el == v[i] for i in range(vlen)])>0 for el in x]),
xs >= low,
xs <= high]
def sequence(x,seq_length, lbound,ubound):
"""
sequence(,length,lbound,ubound)
Ensures that all sums of every subsequence of length length
in array x is between lbound and ubound
"""
n = len(x)
xs = intvar(lbound.lb,ubound.ub)
constraints = []
for i in range(n-seq_length+1):
constraints += [xs == sum([x[j] for j in range(i,i+seq_length)]),
xs >= lbound,
xs <= ubound
]
return constraints
|
VulnerableScan/migrations/0005_vulnerablescantasks_notice.py | b0bac/ApolloScanner | 289 | 12746834 | # Generated by Django 4.0.1 on 2022-03-14 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VulnerableScan', '0004_remove_exploitregister_file_object_and_more'),
]
operations = [
migrations.AddField(
model_name='vulnerablescantasks',
name='notice',
field=models.BooleanField(db_column='notice', default=False, verbose_name='是否钉钉通知'),
),
]
|
Codes/Python32/Lib/test/subprocessdata/sigchild_ignore.py | eyantra/FireBird_Swiss_Knife | 12,496 | 12746836 | import signal, subprocess, sys
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
dvc/command/remove.py | lucasalavapena/dvc | 9,136 | 12746862 | <reponame>lucasalavapena/dvc<filename>dvc/command/remove.py
import argparse
import logging
from dvc.command import completion
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdRemove(CmdBase):
def run(self):
for target in self.args.targets:
try:
self.repo.remove(target, outs=self.args.outs)
except DvcException:
logger.exception("")
return 1
return 0
def add_parser(subparsers, parent_parser):
REMOVE_HELP = (
"Remove stages from dvc.yaml and/or"
" stop tracking files or directories."
)
remove_parser = subparsers.add_parser(
"remove",
parents=[parent_parser],
description=append_doc_link(REMOVE_HELP, "remove"),
help=REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
remove_parser.add_argument(
"--outs",
action="store_true",
default=False,
help="Remove outputs as well.",
)
remove_parser.add_argument(
"targets",
nargs="+",
help=".dvc files or stages from dvc.yaml to remove.",
).complete = completion.DVC_FILE
remove_parser.set_defaults(func=CmdRemove)
|
models/tacotron.py | dieserRobin/ForwardTacotron | 556 | 12746866 | from pathlib import Path
from typing import Union, Dict, Any, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.common_layers import CBHG
from utils.text.symbols import phonemes
class Encoder(nn.Module):
def __init__(self, embed_dims, num_chars, cbhg_channels, K, num_highways, dropout):
super().__init__()
self.embedding = nn.Embedding(num_chars, embed_dims)
self.pre_net = PreNet(embed_dims)
self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels,
proj_channels=[cbhg_channels, cbhg_channels],
num_highways=num_highways)
def forward(self, x):
x = self.embedding(x)
x = self.pre_net(x)
x.transpose_(1, 2)
x = self.cbhg(x)
return x
class PreNet(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
return x
class Attention(nn.Module):
def __init__(self, attn_dims):
super().__init__()
self.W = nn.Linear(attn_dims, attn_dims, bias=False)
self.v = nn.Linear(attn_dims, 1, bias=False)
def forward(self, encoder_seq_proj, query, t):
# print(encoder_seq_proj.shape)
# Transform the query vector
query_proj = self.W(query).unsqueeze(1)
# Compute the scores
u = self.v(torch.tanh(encoder_seq_proj + query_proj))
scores = F.softmax(u, dim=1)
return scores.transpose(1, 2)
class LSA(nn.Module):
def __init__(self, attn_dim, kernel_size=31, filters=32):
super().__init__()
self.conv = nn.Conv1d(2, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=False)
self.L = nn.Linear(filters, attn_dim, bias=True)
self.W = nn.Linear(attn_dim, attn_dim, bias=True)
self.v = nn.Linear(attn_dim, 1, bias=False)
self.cumulative = None
self.attention = None
def init_attention(self, encoder_seq_proj):
device = next(self.parameters()).device # use same device as parameters
b, t, c = encoder_seq_proj.size()
self.cumulative = torch.zeros(b, t, device=device)
self.attention = torch.zeros(b, t, device=device)
def forward(self, encoder_seq_proj, query, t):
if t == 0: self.init_attention(encoder_seq_proj)
processed_query = self.W(query).unsqueeze(1)
location = torch.cat([self.cumulative.unsqueeze(1), self.attention.unsqueeze(1)], dim=1)
processed_loc = self.L(self.conv(location).transpose(1, 2))
u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc))
u = u.squeeze(-1)
# Smooth Attention
#scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True)
scores = F.softmax(u, dim=1)
self.attention = scores
self.cumulative += self.attention
return scores.unsqueeze(-1).transpose(1, 2)
class Decoder(nn.Module):
# Class variable because its value doesn't change between classes
# yet ought to be scoped by class because its a property of a Decoder
max_r = 20
def __init__(self, n_mels, decoder_dims, lstm_dims):
super().__init__()
self.register_buffer('r', torch.tensor(1, dtype=torch.int))
self.n_mels = n_mels
self.prenet = PreNet(n_mels)
self.attn_net = LSA(decoder_dims)
self.attn_rnn = nn.GRUCell(decoder_dims + decoder_dims // 2, decoder_dims)
self.rnn_input = nn.Linear(2 * decoder_dims, lstm_dims)
self.res_rnn1 = nn.LSTMCell(lstm_dims, lstm_dims)
self.res_rnn2 = nn.LSTMCell(lstm_dims, lstm_dims)
self.mel_proj = nn.Linear(lstm_dims, n_mels * self.max_r, bias=False)
def zoneout(self, prev, current, p=0.1):
device = next(self.parameters()).device # Use same device as parameters
mask = torch.zeros(prev.size(), device=device).bernoulli_(p)
return prev * mask + current * (1 - mask)
def forward(self, encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t):
# Need this for reshaping mels
batch_size = encoder_seq.size(0)
# Unpack the hidden and cell states
attn_hidden, rnn1_hidden, rnn2_hidden = hidden_states
rnn1_cell, rnn2_cell = cell_states
# PreNet for the Attention RNN
prenet_out = self.prenet(prenet_in)
# Compute the Attention RNN hidden state
attn_rnn_in = torch.cat([context_vec, prenet_out], dim=-1)
attn_hidden = self.attn_rnn(attn_rnn_in.squeeze(1), attn_hidden)
# Compute the attention scores
scores = self.attn_net(encoder_seq_proj, attn_hidden, t)
# Dot product to create the context vector
context_vec = scores @ encoder_seq
context_vec = context_vec.squeeze(1)
# Concat Attention RNN output w. Context Vector & project
x = torch.cat([context_vec, attn_hidden], dim=1)
x = self.rnn_input(x)
# Compute first Residual RNN
rnn1_hidden_next, rnn1_cell = self.res_rnn1(x, (rnn1_hidden, rnn1_cell))
if self.training:
rnn1_hidden = self.zoneout(rnn1_hidden, rnn1_hidden_next)
else:
rnn1_hidden = rnn1_hidden_next
x = x + rnn1_hidden
# Compute second Residual RNN
rnn2_hidden_next, rnn2_cell = self.res_rnn2(x, (rnn2_hidden, rnn2_cell))
if self.training:
rnn2_hidden = self.zoneout(rnn2_hidden, rnn2_hidden_next)
else:
rnn2_hidden = rnn2_hidden_next
x = x + rnn2_hidden
# Project Mels
mels = self.mel_proj(x)
mels = mels.view(batch_size, self.n_mels, self.max_r)[:, :, :self.r]
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
cell_states = (rnn1_cell, rnn2_cell)
return mels, scores, hidden_states, cell_states, context_vec
class Tacotron(nn.Module):
def __init__(self,
embed_dims: int,
num_chars: int,
encoder_dims: int,
decoder_dims: int,
n_mels: int,
postnet_dims: int,
encoder_k: int,
lstm_dims: int,
postnet_k: int,
num_highways: int,
dropout: float,
stop_threshold: float) -> None:
super().__init__()
self.n_mels = n_mels
self.lstm_dims = lstm_dims
self.decoder_dims = decoder_dims
self.encoder = Encoder(embed_dims, num_chars, encoder_dims,
encoder_k, num_highways, dropout)
self.encoder_proj = nn.Linear(decoder_dims, decoder_dims, bias=False)
self.decoder = Decoder(n_mels, decoder_dims, lstm_dims)
self.postnet = CBHG(postnet_k, n_mels, postnet_dims, [256, 80], num_highways)
self.post_proj = nn.Linear(postnet_dims * 2, n_mels, bias=False)
self.init_model()
self.register_buffer('step', torch.zeros(1, dtype=torch.long))
self.register_buffer('stop_threshold', torch.tensor(stop_threshold, dtype=torch.float32))
@property
def r(self) -> int:
return self.decoder.r.item()
@r.setter
def r(self, value: int) -> None:
self.decoder.r = self.decoder.r.new_tensor(value, requires_grad=False)
def forward(self, x: torch.tensor, m: torch.tensor) -> torch.tensor:
device = next(self.parameters()).device # use same device as parameters
if self.training:
self.step += 1
batch_size, _, steps = m.size()
# Initialise all hidden states and pack into tuple
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
# Initialise all lstm cell states and pack into tuple
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
# <GO> Frame for start of decoder loop
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
# Need an initial context vector
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
# Project the encoder outputs to avoid
# unnecessary matmuls in the decoder loop
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
# Need a couple of lists for outputs
mel_outputs, attn_scores = [], []
# Run the decoder loop
for t in range(0, steps, self.r):
prenet_in = m[:, :, t - 1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
# Concat the mel outputs into sequence
mel_outputs = torch.cat(mel_outputs, dim=2)
# Post-Process for Linear Spectrograms
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)
# For easy visualisation
attn_scores = torch.cat(attn_scores, 1)
# attn_scores = attn_scores.cpu().data.numpy()
return mel_outputs, linear, attn_scores
def generate(self, x: torch.tensor, steps=2000) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
self.eval()
device = next(self.parameters()).device # use same device as parameters
batch_size = 1
# Need to initialise all hidden states and pack into tuple for tidyness
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
# Need to initialise all lstm cell states and pack into tuple for tidyness
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
# Need a <GO> Frame for start of decoder loop
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
# Need an initial context vector
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
# Project the encoder outputs to avoid
# unnecessary matmuls in the decoder loop
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
# Need a couple of lists for outputs
mel_outputs, attn_scores = [], []
# Run the decoder loop
for t in range(0, steps, self.r):
prenet_in = mel_outputs[-1][:, :, -1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
# Stop the loop if silent frames present
if (mel_frames < self.stop_threshold).all() and t > 10: break
# Concat the mel outputs into sequence
mel_outputs = torch.cat(mel_outputs, dim=2)
# Post-Process for Linear Spectrograms
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)[0].cpu().data.numpy()
mel_outputs = mel_outputs[0].cpu().data.numpy()
# For easy visualisation
attn_scores = torch.cat(attn_scores, 1)
attn_scores = attn_scores.cpu().data.numpy()[0]
self.train()
return mel_outputs, linear, attn_scores
def init_model(self):
for p in self.parameters():
if p.dim() > 1: nn.init.xavier_uniform_(p)
def get_step(self):
return self.step.data.item()
def reset_step(self):
# assignment to parameters or buffers is overloaded, updates internal dict entry
self.step = self.step.data.new_tensor(1)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> 'Tacotron':
model_config = config['tacotron']['model']
model_config['num_chars'] = len(phonemes)
model_config['n_mels'] = config['dsp']['num_mels']
return Tacotron(**model_config)
@classmethod
def from_checkpoint(cls, path: Union[Path, str]) -> 'Tacotron':
checkpoint = torch.load(path, map_location=torch.device('cpu'))
model = Tacotron.from_config(checkpoint['config'])
model.load_state_dict(checkpoint['model'])
return model |
coding_assignments/solutions/assignment_helper.py | evelynmitchell/qml-mooc | 200 | 12746871 | <reponame>evelynmitchell/qml-mooc
import itertools
import numpy as np
import scipy
import socket
import subprocess
import time
def get_free_port():
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def init_qvm_and_quilc(qvm_executable="qvm", quilc_executable="quilc"):
qvm_port = get_free_port()
quilc_port = get_free_port()
qvm_server = subprocess.Popen([qvm_executable, "-S", "-p", str(qvm_port)])
quilc_server = subprocess.Popen([quilc_executable, "-R", "-p", str(quilc_port)])
fc = ForestConnection(sync_endpoint='http://127.0.0.1:' + str(qvm_port),
compiler_endpoint='tcp://127.0.0.1:' + str(quilc_port))
time.sleep(5)
return qvm_server, quilc_server, fc
def get_amplitudes(circuit):
if isinstance(circuit, qiskit.circuit.quantumcircuit.QuantumCircuit):
backend = Aer.get_backend('statevector_simulator')
job = execute(circuit, backend)
amplitudes = job.result().get_statevector(circuit)
elif isinstance(circuit, pyquil.quil.Program):
wf_sim = WavefunctionSimulator(connection=fc)
wavefunction = wf_sim.wavefunction(circuit)
amplitudes = wavefunction.amplitudes
else:
raise ValueError("Unknown circuit type")
return amplitudes
def get_counts(circuit, num_shots=100):
if isinstance(circuit, qiskit.circuit.quantumcircuit.QuantumCircuit):
backend = Aer.get_backend('qasm_simulator')
job = execute(circuit, backend, shots=num_shots)
result = job.result()
counts = result.get_counts(circuit)
elif isinstance(circuit, pyquil.quil.Program):
n_qubits = len(circuit.get_qubits())
circuit.wrap_in_numshots_loop(num_shots)
qc = get_qc(str(n_qubits) + 'q-qvm', connection=fc)
executable = qc.compile(circuit)
result = qc.run(executable)
classical_bits = get_classical_bits(circuit)
counts = {}
for bitstring in itertools.product(*[{1, 0} for _ in range(classical_bits)]):
key = "".join(str(i) for i in bitstring)
value = sum([tuple(d.tolist()) == bitstring for d in result])
counts[key] = value
else:
raise ValueError("Unknown circuit type")
return counts
def get_single_measurement_counts(circuit, num_shots=100):
if isinstance(circuit, qiskit.circuit.quantumcircuit.QuantumCircuit):
backend = Aer.get_backend('qasm_simulator')
job = execute(circuit, backend, shots=num_shots)
result = job.result()
counts = result.get_counts(circuit)
elif isinstance(circuit, pyquil.quil.Program):
n_qubits = len(circuit.get_qubits())
circuit.wrap_in_numshots_loop(num_shots)
qc = get_qc(str(n_qubits) + 'q-qvm', connection=fc)
executable = qc.compile(circuit)
result = qc.run(executable)
classical_bits = get_classical_bits(circuit)
counts = {}
for bitstring in itertools.product(*[{1, 0} for _ in range(classical_bits)]):
key = "".join(str(i) for i in bitstring)
counts[key] = 0
counts["0" * classical_bits] = (result == 0).sum()
counts["0" * (classical_bits-1) + "1"] = (result == 1).sum()
else:
raise ValueError("Unknown circuit type")
return counts
def get_classical_bits(circuit):
if isinstance(circuit, qiskit.circuit.quantumcircuit.QuantumCircuit):
classical_bits = circuit.cregs[0].size
elif isinstance(circuit, pyquil.quil.Program):
for instruction in circuit.instructions:
if isinstance(instruction, pyquil.quilbase.Declare):
classical_bits = instruction.memory_size
break
else:
raise ValueError("Unknown circuit type")
return classical_bits
def get_circuit_length(circuit):
if isinstance(circuit, qiskit.circuit.quantumcircuit.QuantumCircuit):
program_length = sum(circuit.count_ops().values())
elif isinstance(circuit, pyquil.quil.Program):
program_length = len(circuit.instructions)
else:
raise ValueError("Unknown circuit type")
return program_length
if __name__ == "__main__":
try:
import grove
import pyquil
from grove.pyvqe import vqe
from pyquil import Program, get_qc
from pyquil.paulis import PauliSum, PauliTerm, exponential_map, sZ
from pyquil.api import WavefunctionSimulator, ForestConnection
from pyquil.gates import *
try:
qvm_server, quilc_server, fc = init_qvm_and_quilc()
is_forest = True
except FileNotFoundError:
try:
prefix = "/home/local/bin/"
qvm_server, quilc_server, fc = init_qvm_and_quilc(prefix+"qvm",
prefix + "quilc")
is_forest = True
except FileNotFoundError:
is_forest = False
except ImportError:
is_forest = False
try:
import qiskit
import qiskit.aqua
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
try:
from qiskit import Aer
except ImportError:
from qiskit import BasicAer as Aer
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import *
is_qiskit = True
except ImportError:
is_qiskit = False
try:
import dimod
import dwave_networkx
import minorminer
is_dwave = True
except ImportError:
is_dwave = False
if not (is_qiskit or is_forest):
raise RuntimeError("No quantum computing framework available!")
if not is_dwave:
raise RuntimeError("D-Wave Ocean is not available!")
print("Available frameworks:")
if is_forest:
print("Forest SDK")
if is_qiskit:
print("Qiskit")
if is_dwave:
print("D-Wave Ocean")
|
07. Chapter_7/pytorch/random_access.py | Mikma03/High-performance-Python | 223 | 12746882 | import time
from functools import partial
import torch
def timer(fxn, max_time=5):
N = 0
total_time = 0
fxn()
while total_time < max_time:
start = time.perf_counter()
fxn()
total_time += time.perf_counter() - start
N += 1
return total_time / N
def task(A, target):
result = 0
i = 0
N = 0
while result < target:
r = A[i]
result += r
i = A[i]
N += 1
return N
if __name__ == "__main__":
N = 1000
print(f"Testing with array of length {N}")
A_py = (torch.rand(N) * N).type(torch.int).to("cuda:0")
A_np = A_py.cpu().numpy()
t_py = timer(partial(task, A_py, 500))
t_np = timer(partial(task, A_np, 500))
print(f"PyTorch took: {t_py:0.3e}s")
print(f"Numpy took: {t_np:0.3e}s")
print(f"Numpy is {100 - t_np/t_py*100:0.2f}% faster")
|
tests/utils.py | pyGrowler/Growler | 806 | 12746895 | <filename>tests/utils.py
#
# tests/utils
#
"""
Useful functions for all tests
"""
import asyncio
import pytest
from growler.aio.http_protocol import GrowlerHTTPProtocol
import growler
def random_port():
from random import randint
return randint(1024, 2**16)
@asyncio.coroutine
def setup_test_server(unused_tcp_port, event_loop):
"""
Sets up a GrowlerProtocol server for testing
"""
# proto = growler.protocol.GrowlerProtocol
proto = TestProtocol
server = yield from event_loop.create_server(proto, '127.0.0.1', unused_tcp_port)
return server, unused_tcp_port
@asyncio.coroutine
def setup_http_server(loop, port):
"""
Sets up a GrowlerHTTPProtocol server for testing
"""
# proto = growler.protocol.GrowlerHTTPProtocol
app = growler.App()
def proto():
return GrowlerHTTPProtocol(app)
return (yield from loop.create_server(proto, '127.0.0.1', port))
def teardown_server(server, loop=asyncio.get_event_loop()):
"""
'Generic' tear down a server and wait on the loop for everything to close.
"""
server.close()
loop.run_until_complete(server.wait_closed())
|
release/stubs.min/Rhino/DocObjects/__init___parts/ObjectMaterialSource.py | htlcnn/ironpython-stubs | 182 | 12746896 | class ObjectMaterialSource(Enum,IComparable,IFormattable,IConvertible):
"""
Defines enumerated values for the source of material of single objects.
enum ObjectMaterialSource,values: MaterialFromLayer (0),MaterialFromObject (1),MaterialFromParent (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
MaterialFromLayer=None
MaterialFromObject=None
MaterialFromParent=None
value__=None
|
RecoLocalMuon/GEMRecHit/python/gemLocalReco_cff.py | ckamtsikis/cmssw | 852 | 12746912 | import FWCore.ParameterSet.Config as cms
from RecoLocalMuon.GEMRecHit.gemRecHits_cfi import *
from RecoLocalMuon.GEMSegment.gemSegments_cfi import *
gemLocalRecoTask = cms.Task(gemRecHits,gemSegments)
gemLocalReco = cms.Sequence(gemLocalRecoTask)
|
Potatso/Library/ShadowPath/ShadowPath/shadowsocks-libev/src/ssrlink.py | ilioner/WCPotatso | 168 | 12746919 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import traceback
import random
import getopt
import sys
import json
import base64
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def b64decode(data):
if b':' in data:
return data
if len(data) % 4 == 2:
data += b'=='
elif len(data) % 4 == 3:
data += b'='
return base64.urlsafe_b64decode(data)
def fromlink(link):
if link[:6] == 'ssr://':
link = to_bytes(link[6:])
link = to_str(b64decode(link))
params_dict = {}
if '/' in link:
datas = link.split('/', 1)
link = datas[0]
param = datas[1]
pos = param.find('?')
if pos >= 0:
param = param[pos + 1:]
params = param.split('&')
for param in params:
part = param.split('=', 1)
if len(part) == 2:
if part[0] in ['obfsparam']:
params_dict[part[0]] = to_str(b64decode(to_bytes(part[1])))
else:
params_dict[part[0]] = part[1]
datas = link.split(':')
if len(datas) == 6:
host = datas[0]
port = int(datas[1])
protocol = datas[2]
method = datas[3]
obfs = datas[4]
passwd = to_str(b64decode(to_bytes(datas[5])))
result = {}
result['server'] = host
result['server_port'] = port
result['local_address'] = '0.0.0.0'
result['local_port'] = 1080
result['password'] = <PASSWORD>
result['protocol'] = protocol
result['method'] = method
result['obfs'] = obfs
result['timeout'] = 300
if 'obfsparam' in params_dict:
result['obfs_param'] = params_dict['obfsparam']
output = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
print(output)
def main():
link = sys.argv[1]
fromlink(link)
if __name__ == '__main__':
main()
|
interpolation/cartesian.py | vishalbelsare/interpolation.py | 110 | 12746958 | """
Filename: cartesian.py
Authors: <NAME>
Implements cartesian products and regular cartesian grids.
"""
import numpy
from numba import njit
def cartesian(nodes, order="C"):
"""Cartesian product of a list of arrays
Parameters:
-----------
nodes: (list of 1d-arrays)
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
nodes = [numpy.array(e) for e in nodes]
shapes = [e.shape[0] for e in nodes]
n = len(nodes)
l = numpy.prod(shapes)
out = numpy.zeros((l, n))
if order == "C":
repetitions = numpy.cumprod([1] + shapes[:-1])
else:
shapes.reverse()
sh = [1] + shapes[:-1]
repetitions = numpy.cumprod(sh)
repetitions = repetitions.tolist()
repetitions.reverse()
for i in range(n):
_repeat_1d(nodes[i], repetitions[i], out[:, i])
return out
def mlinspace(a, b, nums, order="C"):
"""Constructs a regular cartesian grid
Parameters:
-----------
a: (1d-array) lower bounds in each dimension
b: (1d-array) upper bounds in each dimension
nums: (1d-array) number of nodes along each dimension
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
a = numpy.array(a, dtype="float64")
b = numpy.array(b, dtype="float64")
nums = numpy.array(nums, dtype="int64")
nodes = [numpy.linspace(a[i], b[i], nums[i]) for i in range(len(nums))]
return cartesian(nodes, order=order)
@njit(cache=True)
def _repeat_1d(x, K, out):
"""Repeats each element of a vector many times and repeats the whole result many times
Parameters
----------
x: (1d array) vector to be repeated
K: (int) number of times each element of x is repeated (inner iterations)
out: (1d array) placeholder for the result
Returns
-------
None
"""
N = x.shape[0]
L = out.shape[0] // (K * N) # number of outer iterations
# K # number of inner iterations
# the result out should enumerate in C-order the elements
# of a 3-dimensional array T of dimensions (K,N,L)
# such that for all k,n,l, we have T[k,n,l] == x[n]
for n in range(N):
val = x[n]
for k in range(K):
for l in range(L):
ind = k * N * L + n * L + l
out[ind] = val
|
libs/box_utils/iou.py | khanfarhan10/R2CNN_Faster-RCNN_Tensorflow | 629 | 12746968 | <filename>libs/box_utils/iou.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def iou_calculate(boxes_1, boxes_2):
'''
:param boxes_1: [N, 4] [ymin, xmin, ymax, xmax]
:param boxes_2: [M, 4] [ymin, xmin. ymax, xmax]
:return:
'''
with tf.name_scope('iou_caculate'):
ymin_1, xmin_1, ymax_1, xmax_1 = tf.split(boxes_1, 4, axis=1) # ymin_1 shape is [N, 1]..
ymin_2, xmin_2, ymax_2, xmax_2 = tf.unstack(boxes_2, axis=1) # ymin_2 shape is [M, ]..
max_xmin = tf.maximum(xmin_1, xmin_2)
min_xmax = tf.minimum(xmax_1, xmax_2)
max_ymin = tf.maximum(ymin_1, ymin_2)
min_ymax = tf.minimum(ymax_1, ymax_2)
overlap_h = tf.maximum(0., min_ymax - max_ymin) # avoid h < 0
overlap_w = tf.maximum(0., min_xmax - max_xmin)
overlaps = overlap_h * overlap_w
area_1 = (xmax_1 - xmin_1) * (ymax_1 - ymin_1) # [N, 1]
area_2 = (xmax_2 - xmin_2) * (ymax_2 - ymin_2) # [M, ]
iou = overlaps / (area_1 + area_2 - overlaps)
return iou
|
dirigible/sheet/dependency_graph.py | EnoX1/dirigible-spreadsheet | 168 | 12746982 | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from threading import Lock
from .errors import report_cell_error, CycleError
class Node(object):
def __init__(self, location, children=None, parents=None):
self.location = location
self.children = children if children else set()
self.parents = parents if parents else set()
self.lock = Lock()
def __eq__(self, other):
return (
self.location == other.location and
self.children == other.children and
self.parents == other.parents
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Node %d,%d children={%s} parents={%s}>" % (
self.location[0], self.location[1],
', '.join(str(i) for i in self.children),
', '.join(str(i) for i in self.parents))
def remove_from_parents(self, parent_nodes, leaf_queue):
for parent in parent_nodes:
parent.lock.acquire()
parent.children.remove(self.location)
if len(parent.children) == 0:
leaf_queue.put(parent.location)
parent.lock.release()
def build_dependency_graph(worksheet):
graph = {}
visited = set()
for loc in worksheet.keys():
try:
_generate_cell_subgraph(worksheet, graph, loc, visited, [])
except CycleError:
pass # Deal with escapees
leaves = []
for loc, deps in graph.iteritems():
if not deps.children:
leaves.append(loc)
return graph, leaves
def _generate_cell_subgraph(worksheet, graph, loc, completed, path):
if loc not in worksheet:
return
cell = worksheet[loc]
if loc in completed:
if type(cell.error) == CycleError:
raise cell.error
else:
return
if loc in path:
cycle_error = CycleError(path[path.index(loc):] + [loc])
report_cell_error(worksheet, loc, cycle_error)
completed.add(loc)
raise cycle_error
if cell.python_formula:
valid_dependencies = set()
for dep_loc in cell.dependencies:
dep_cell = worksheet[dep_loc]
try:
_generate_cell_subgraph(worksheet, graph, dep_loc, completed, path + [loc])
if dep_cell.error:
continue
if not dep_cell.python_formula:
continue
valid_dependencies.add(dep_loc)
except CycleError as cycle_error:
if not loc in completed:
report_cell_error(worksheet, loc, cycle_error)
if loc in cycle_error.path:
completed.add(loc)
raise cycle_error
_add_location_dependencies(graph, loc, valid_dependencies)
completed.add(loc)
def _add_location_dependencies(graph, location, dependencies):
if location not in graph:
graph[location] = Node(location)
graph[location].children |= dependencies
for dependency in dependencies:
if dependency not in graph:
graph[dependency] = Node(dependency)
graph[dependency].parents.add(location)
|
.github/workflows/get_revision.py | MrBartusek/TwitchIO | 514 | 12746983 | import requests
def getrev():
resp = requests.get("https://pypi.org/pypi/TwitchIO/json")
data = resp.json()["releases"]
pre = max(data).split("b")
final = f"{pre[0]}b{int(pre[1]) + 1}"
return final
print(getrev())
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/c/forward/TestForwardDeclaration.py | Polidea/SiriusObfuscator | 427 | 12746989 | <gh_stars>100-1000
"""Test that forward declaration of a data structure gets resolved correctly."""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class ForwardDeclarationTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_symbol(
self, "foo", num_expected_locations=1, sym_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect(
"frame variable --show-types *bar_ptr",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(bar) *bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect(
"expression --show-types -- *bar_ptr",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(bar)',
'(int) a = 1',
'(int) b = 2'])
|
tests/test_reindex.py | a1346054/tldr.py | 210 | 12746997 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import shutil
from basic import BasicTestCase
class TestReindex(BasicTestCase):
def setUp(self):
super(TestReindex, self).setUp()
# Add a new page.
self.page_path = path.join(self.repo_dir, 'pages')
self.new_page = path.join(self.page_path, 'linux', 'blabla.md')
shutil.copy(path.join(self.page_path, 'linux', 'tcpflow.md'),
self.new_page)
# Backup the index.json.
shutil.copy(path.join(self.page_path, 'index.json'),
path.join(self.page_path, 'index_bak.json'))
def tearDown(self):
super(TestReindex, self).tearDown()
if path.exists(self.new_page):
os.remove(self.new_page)
# Restore the index.json.
if path.exists(path.join(self.page_path, 'index_bak.json')):
shutil.move(path.join(self.page_path, 'index_bak.json'),
path.join(self.page_path, 'index.json'))
def test_reindex(self):
before_reindex = self.call_find_command('blabla', platform='')
assert 'Sorry' in before_reindex.output
self.call_reindex_command()
after_reindex = self.call_find_command('blabla', platform='')
assert 'tcpflow' in after_reindex.output
|
project/path.py | ProfesseurGibaud/TestSite | 304 | 12747013 | # coding: utf-8
"""
This module contains all the paths for alls this project's directories,
that we created dynamically.
All paths are absolute, without symlink and in unicode.
We also add the 'apps' and 'libs' directories to the PYTHON PATH, which
will make the imports much easier.
"""
import sys
import os
import tempfile
from pathlib import Path
# This part is a bit complicated and is not mandatory for your project, but
# it renders it completely portable since all directory paths are dynamically
# generated instead of being hard coded.
# We get the 'settings.py' file path (the __FILE__ variable contains
# automatically the path of the current file) and we transform this string
# to unicode in case you got non ASCII characters in this name (
# sys.getfilesystemencoding() get us the file system encoding which can be
# different for Windows, Mac or Linux)
THIS_FILE = Path(__file__)
# We dynamically create these settings, giving us the absolute path
# to the project directory, the root directory containing all our work
# and any other directory we might need
PROJECT_DIR = THIS_FILE.absolute().resolve()
BASE_DIR = PROJECT_DIR.parent.parent
APPS_DIR = BASE_DIR / 'apps'
LIBS_DIR = BASE_DIR / 'ignore_this_directory'
TEMP_DIR = Path(tempfile.gettempdir())
# We add the apps and libs directory to the PYTHON PATH, so we can import each
# package without prefixing them with the parent package name. This mimic the
# behavior we would have if they were at the root directory or installed with
# pip.
#
# E.G: we can do from "app1_hello.views import hello" instead of
# "from apps.app1_hello.views import hello" or "import django" instead of
# "from libs import django"
#
# When you have a small project, you can avoid this and put all apps at the root
# dir like in the official Django tutorial, but in a big project with a lots of
# apps, you usually put them all in an "apps" dir like we did, so it's a good
# thing to know.
sys.path.append(str(LIBS_DIR))
sys.path.append(str(APPS_DIR))
|
artemis/remote/file_system.py | peteroconnor-bc/artemis | 235 | 12747020 | from __future__ import print_function
import os
from ConfigParser import NoSectionError, NoOptionError
import paramiko
from artemis.config import get_artemis_config_value
from artemis.fileman.config_files import get_config_value
from artemis.remote.utils import get_ssh_connection
def check_config_file(ip_address,file_path=".artemisrc"):
'''
Makes sure all required fields are present in ~./artemisrc.
Also performs test for the different options if applicable
:param ip_address: The section to look for. Remote ip is assumed. Makes no sense for local ip.
:return:
'''
mandatory_options = ["username","python"]
artemisrc_path = os.path.expanduser("~/%s"%file_path)
for option in mandatory_options:
try:
get_artemis_config_value(section=ip_address,option=option)
except NoSectionError:
print("Section %s could not be found in %s. Please provide it." %(ip_address, artemisrc_path))
raise
except NoOptionError:
print("Section %s does not contain option %s. Please provide it in %s" %(ip_address, option, artemisrc_path))
raise
# optional_options = ["private_key"]
try:
private_key_path = get_artemis_config_value(section=ip_address,option="private_key")
assert os.path.isfile(private_key_path), "The path to the private_key for %s you specified in %s is not valid. You provided %s" %(ip_address, artemisrc_path, private_key_path)
except NoOptionError:
pass
# username & private key setup tests:
try:
get_ssh_connection(ip_address)
except paramiko.ssh_exception.AuthenticationException as e:
if "Authentication failed" in e.message:
print("An AuthenticationException is being raised. Make sure you have your private key set up correctly")
else:
print("An AuthenticationException is being raised. Did you specify the correct username for %s in %s? You provided the username %s"% (ip_address, artemisrc_path, get_artemis_config_value(section=ip_address,option="username")))
raise
except paramiko.ssh_exception.SSHException:
try:
private_key_path = get_artemis_config_value(section=ip_address,option="private_key")
print ("Something is wrong with the private_key you specified in %s for %s . You provided %s" % (artemisrc_path, ip_address, private_key_path))
raise
except NoOptionError:
private_key_path = os.path.join(os.path.expanduser("~"),".ssh/id_rsa")
print("You did not provide a private_key path in %s. The default path %s appears to be wrongly set up. "
"Please make sure you have correctly set up your private key for %s " %(artemisrc_path,private_key_path,ip_address))
#python tests:
python_path = get_artemis_config_value(section=ip_address,option="python")
command = "python -c 'import os; print(os.path.isfile(os.path.expanduser(\"%s\")))'"%python_path
ssh_conn = get_ssh_connection(ip_address)
_,stdout,stderr = ssh_conn.exec_command(command)
assert stdout.read().strip()=="True", "The provided path to the remote python installation on %s does not exist. You provided %s" %(ip_address, python_path)
command = "%s -c 'print(\"Success\")'" % python_path
_,stdout,stderr = ssh_conn.exec_command(command)
err = stderr.read().strip()
assert stdout.read().strip()=="Success" and not err, "The provided python path on %s does not seem to point to a python executable. " \
"You provided %s, which resulted in the following error on the remote machine: " %(ip_address, python_path, err)
def simple_rsync(local_path, remote_path, ip_address, verbose=False):
'''
This method synchronizes local_path and all subfolders with remote_path at the given address.
This method executes a system rsync call. This is not a general wrapper for rsync. The call is blocking.
:param local_path:
:param remote_path: Assumed to be relative to the home dir
:param ip_address:
:return:
'''
options = "-ah"
if verbose:
options += "v"
local_path = os.path.expanduser(local_path)
username = get_artemis_config_value(section=ip_address, option="username")
if remote_path.startswith("~"):
remote_path = remote_path[1:]
if remote_path.startswith(("/")):
remote_path = remote_path[1:]
# to_path = "%s@%s:/home/%s/%s" % (username, address, username, remote_path)
to_path = "%s@%s:~/%s" % (username, ip_address, remote_path)
return rsync(options, from_path=local_path, to_path=to_path)
def rsync(options, from_path, to_path):
'''
basic rsync wrapper
:param options:
:param from_path:
:param to_path:
:return:
'''
import subprocess
print ("Starting: rsync %s %s %s" % (options, from_path, to_path))
if not type(options) is list:
options = [options]
command = subprocess.Popen(["rsync"] + options + [from_path, to_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
if "v" in options:
while True:
line = command.stdout.readline()
if line != '':
print (line.rstrip())
else:
break
err = command.stderr.read().strip()
if err:
msg = "rsync received messages on stderr. This might indicate that the command failed or, if you transferred to a remote server," \
" it might just be some message received by the remote server. \n" \
"This is because rsync automatically forwards all messages by the remote server to stderr. \n" \
"If you are confident that the call succeeded although stderr received messages, then catch the RuntimeError accordingly.\n " \
"The messages received are: \n %s" % err
raise RuntimeError(msg)
print("rsync finished")
return True
|
Grab/parse_onlinemultfilmy.py | DazEB2/SimplePyScripts | 117 | 12747050 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Поиск мультсериалов 16+
# Пример сериала: 'http://onlinemultfilmy.ru/bratya-ventura/'
import time
from grab import Grab
g = Grab()
# Перебор страниц с мультами
for i in range(1, 82 + 1):
url_page = 'http://onlinemultfilmy.ru/multserialy/page/' + str(i)
print(url_page)
# Загрузка страницы с мультами
g.go(url_page)
# Перебор и загрузка мультов на странице
for url in g.doc.select('//div[@class="cat-post"]/a'):
g.go(url.attr('href'))
if g.doc.select('//*[@class="age_icon age_icon_16"]').count():
print(' ', url.attr('title'), url.attr('href'))
# Чтобы сервер не посчитал это дос атакой
time.sleep(2)
|
Regression/Linear Regression.py | yanding/Stock-Analysis | 357 | 12747056 | <reponame>yanding/Stock-Analysis<filename>Regression/Linear Regression.py<gh_stars>100-1000
# Linear Regression for Stock based on Date
# Currently only works for month
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html#example-linear-model-plot-ols-py
import csv
import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
close_prices = []
dates = []
f = input('Enter filename: \n')
if not(f.endswith('.csv')):
f = f + '.csv'
with open(f, 'r') as file:
the_data = csv.reader(file)
the_data.next()
for row in the_data:
x = float(row[4])
close_prices.append(float("{0:.2f}".format(x)))
dates.append(int(row[0].split('/')[0]))
# convert lists to numpy arrays
length_of_dates = len(dates) + 1
day = []
day.extend(range(1, length_of_dates))
rev = input('Is the newest price first? (y/n)')
if rev.startswith('y'):
close_prices.reverse()
prices_arr = np.reshape(close_prices, (len(close_prices), 1))
days_arr = np.reshape(day, (len(day), 1))
print close_prices
print day
# Creating lin reg object
regr = linear_model.LinearRegression()
regr.fit(days_arr, prices_arr)
print 'Coefficients: '
print regr.coef_
# Explained variance score: 1 is perfect prediction
print 'Variance score: %.2f' % regr.score(days_arr, prices_arr)
minpr = min(close_prices)
maxpr = max(close_prices)
maxdt = max(day)
mindt = min(day)
# Draw black dots representing prices
plt.figure(figsize=(15, 15), dpi=240)
plt.scatter(days_arr, prices_arr, color='black', label='Close Prices')
plt.plot(days_arr, regr.predict(days_arr), color='red',
linewidth=4, label='Estimated Linear Function')
plt.xlabel('Day')
plt.ylabel('Close Price')
plt.title('Linear Regression')
plt.ylim([minpr - 3, maxpr + 3])
plt.xlim([mindt - 1, maxdt + 5])
plt.subplots_adjust(bottom=0.13)
plt.subplots_adjust(top=0.92)
plt.subplots_adjust(left=0.07)
plt.subplots_adjust(right=0.96)
plt.legend()
plt.show()
|
samples/switch-commands/aci-show-vlan-ext.py | richardstrnad/acitoolkit | 351 | 12747080 | <gh_stars>100-1000
#!/usr/bin/env python
"""
This application replicates the switch CLI command 'show vlan info'
It largely uses raw queries to the APIC API
"""
from acitoolkit import Credentials, Session
from tabulate import tabulate
def show_vlan_brief(apic, node_ids):
"""
show vlan brief
:param apic: Session instance logged in to the APIC
:param node_ids: List of strings containing node ids
"""
for node_id in node_ids:
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree&'
'target-subtree-class=l2BD,l2RsPathDomAtt' % node_id)
resp = apic.get(query_url)
if not resp.ok:
print('Could not collect APIC data for switch %s.' % node_id)
return
l2bd_data = []
port_data = {}
for obj in resp.json()['imdata']:
if 'l2BD' in obj:
obj_attr = obj['l2BD']['attributes']
l2bd_data.append((int(obj_attr['id']), str(obj_attr['name']),
str(obj_attr['adminSt']), str(obj_attr['fabEncap'])))
else:
dn = obj['l2RsPathDomAtt']['attributes']['dn']
port_id = str(dn.rpartition('/path-[')[2].partition(']')[0])
port_bd_encap = str(dn.partition('/bd-[')[2].partition(']')[0])
if port_bd_encap not in port_data:
port_data[port_bd_encap] = port_id
port_data[port_bd_encap] += ', ' + port_id
output_data = []
for (l2bd_id, l2bd_name, l2bd_admin_state, l2bd_fab_encap) in l2bd_data:
try:
ports = port_data[str(l2bd_fab_encap)]
except KeyError:
ports = ''
output_data.append((l2bd_id, l2bd_name, l2bd_admin_state, ports))
output_data.sort(key=lambda tup: tup[0])
print('Switch:', node_id)
print tabulate(output_data, headers=["VLAN", "Name", "Status", "Ports"])
def show_vlan_info(apic, node_ids):
"""
show vlan info
:param apic: Session instance logged in to the APIC
:param node_ids: List of strings containing node ids
"""
for node_id in node_ids:
query_url = '/api/mo/topology/pod-1/node-%s.json?query-target=subtree&target-subtree-class=l2BD' % node_id
resp = apic.get(query_url)
if not resp.ok:
print('Could not collect APIC data for switch %s.' % node_id)
return
data = []
for l2bd in resp.json()['imdata']:
l2bd_attr = l2bd['l2BD']['attributes']
encap = str(l2bd_attr['fabEncap'])
if str(l2bd_attr['accEncap']) != 'unknown':
encap += ', ' + str(l2bd_attr['accEncap'])
data.append((int(l2bd_attr['id']), 'enet', str(l2bd_attr['mode']), encap))
data.sort(key=lambda tup: tup[0])
print('Switch:', node_id)
print tabulate(data, headers=["VLAN", "Type", "Vlan-mode", "Encap"])
def get_node_ids(apic, args):
"""
Get the list of node ids from the command line arguments.
If none, get all of the node ids
:param apic: Session instance logged in to the APIC
:param args: Command line arguments
:return: List of strings containing node ids
"""
if args.switch is not None:
names = [args.switch]
else:
names = []
query_url = '/api/node/class/fabricNode.json?query-target-filter=eq(fabricNode.role,"leaf")'
resp = apic.get(query_url)
if not resp.ok:
print('Could not get switch list from APIC.')
return
nodes = resp.json()['imdata']
for node in nodes:
names.append(str(node['fabricNode']['attributes']['id']))
return names
def main():
"""
Main common routine for show vlan ext, show vlan brief, and show vlan info
:return: None
"""
# Set up the command line options
creds = Credentials(['apic', 'nosnapshotfiles'],
description="This application replicates the switch CLI command 'show vlan extended'")
creds.add_argument('-s', '--switch',
type=str,
default=None,
help='Specify a particular switch id, e.g. "101"')
args = creds.get()
# Login to APIC
apic = Session(args.url, args.login, args.password)
if not apic.login().ok:
print('%% Could not login to APIC')
return
node_ids = get_node_ids(apic, args)
show_vlan_brief(apic, node_ids)
show_vlan_info(apic, node_ids)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
pymatgen/analysis/solar/tests/test_slme.py | exenGT/pymatgen | 921 | 12747083 | <gh_stars>100-1000
import os
import unittest
import warnings
from pymatgen.analysis.solar.slme import optics, slme
from pymatgen.util.testing import PymatgenTest
class SolarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_slme_from_vasprun(self):
path = os.path.join(os.path.dirname(__file__), "vasprun.xml")
en, abz, dirgap, indirgap = optics(path)
abz = abz * 100.0
eff = slme(en, abz, indirgap, indirgap, plot_current_voltage=False)
self.assertAlmostEqual(eff, 27.728998512472298, places=5)
if __name__ == "__main__":
unittest.main()
|
hummingbot/core/data_type/order_book_row.py | BGTCapital/hummingbot | 3,027 | 12747097 | <reponame>BGTCapital/hummingbot
#!/usr/bin/env python
from collections import namedtuple
from decimal import Decimal
class OrderBookRow(namedtuple("_OrderBookRow", "price, amount, update_id")):
"""
Used to apply changes to OrderBook. OrderBook classes uses float internally for better performance over Decimal.
"""
price: float
amount: float
update_id: int
class ClientOrderBookRow(namedtuple("_OrderBookRow", "price, amount, update_id")):
"""
Used in market classes where OrderBook values are converted to Decimal.
"""
price: Decimal
amount: Decimal
update_id: int
|
kopf/_cogs/structs/__init__.py | tavaresrodrigo/kopf | 1,038 | 12747106 | """
All the functions to manipulate the resource fields, state changes, etc.
Grouped by the type of the fields and the purpose of the manipulation.
Used in the handling routines to check if there were significant changes at all
(i.e. not our own internal and system changes, like the uids, links, etc),
and to get the exact per-field diffs for the specific handler functions.
All the functions are purely data-manipulative and computational.
No external calls or any i/o activities are done here.
"""
|
test/test_tokens.py | robbm1/pyleri | 106 | 12747123 | <reponame>robbm1/pyleri
import unittest
import os
import sys
if os.environ.get('USELIB') != '1':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyleri import (
KeywordError,
create_grammar,
Tokens,
) # nopep8
class TestTokens(unittest.TestCase):
def test_tokens(self):
spaced = '== != >= <= > <'
tokens = Tokens('== > != < >= <= ')
grammar = create_grammar(tokens)
self.assertEqual(spaced, str(tokens))
self.assertTrue(grammar.parse('==').is_valid)
self.assertTrue(grammar.parse('<=').is_valid)
self.assertTrue(grammar.parse('>').is_valid)
self.assertFalse(grammar.parse('').is_valid)
self.assertFalse(grammar.parse('=').is_valid)
self.assertEqual(
str(grammar.parse('')),
'error at position 0, expecting: == != >= <= > <'
)
if __name__ == '__main__':
unittest.main()
|
python-threatexchange/threatexchange/content_type/video.py | b-bold/ThreatExchange | 997 | 12747156 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Wrapper around the video content type.
"""
import typing as t
from ..signal_type import raw_text, trend_query, url, md5, video_tmk_pdqf
from ..signal_type.signal_base import SignalType
from .content_base import ContentType
class VideoContent(ContentType):
"""
Content representing a sequence of images, giving the illusion of motion.
Examples might be:
* mp4
* avi
* gif animations
"""
@classmethod
def get_signal_types(cls) -> t.List[t.Type[SignalType]]:
return [md5.VideoMD5Signal, video_tmk_pdqf.VideoTmkPdqfSignal]
|
ui/pypesvds/lib/extras/pdflib/pycdb.py | onfire73/pypeskg | 117 | 12747175 | #!/usr/bin/env python
#
# pycdb.py - Python implementation of cdb and tcdb
#
# by <NAME>
# * public domain *
#
import sys, os
from struct import pack, unpack
from array import array
# calc hash value with a given key
def cdbhash(s, n=5381L):
return reduce(lambda h,c: ((h*33) ^ ord(c)) & 0xffffffffL, s, n)
if pack('=i',1) == pack('>i',1):
# big endian
def decode(x):
a = array('I', x)
a.byteswap()
return a
def encode(a):
a.byteswap()
return a.tostring()
else:
# little endian
def decode(x):
a = array('I', x)
return a
def encode(a):
return a.tostring()
## CDB
##
# cdbiter
def cdbiter(fp, eod):
kloc = 2048
while kloc < eod:
fp.seek(kloc)
(klen, vlen) = unpack('<II', fp.read(8))
k = fp.read(klen)
v = fp.read(vlen)
kloc += 8+klen+vlen
yield (k,v)
fp.close()
return
# CDBReader
class CDBReader(object):
def __init__(self, cdbname, docache=1):
self.name = cdbname
self._fp = file(cdbname, 'rb')
hash0 = decode(self._fp.read(2048))
self._hash0 = [ (hash0[i], hash0[i+1]) for i in xrange(0, 512, 2) ]
self._hash1 = [ None ] * 256
self._eod = hash0[0]
self._docache = docache
self._cache = {}
self._keyiter = None
self._eachiter = None
return
def __repr__(self):
return '<CDBReader: %r>' % self.name
def __getstate__(self):
raise TypeError
def __setstate__(self, dict):
raise TypeError
def __getitem__(self, k):
k = str(k)
if k in self._cache: return self._cache[k]
h = cdbhash(k)
h1 = h & 0xff
(pos_bucket, ncells) = self._hash0[h1]
if ncells == 0: raise KeyError(k)
hs = self._hash1[h1]
if hs == None:
self._fp.seek(pos_bucket)
hs = decode(self._fp.read(ncells * 8))
self._hash1[h1] = hs
i = ((h >> 8) % ncells) * 2
n = ncells*2
for _ in xrange(ncells):
p1 = hs[i+1]
if p1 == 0: raise KeyError(k)
if hs[i] == h:
self._fp.seek(p1)
(klen, vlen) = unpack('<II', self._fp.read(8))
k1 = self._fp.read(klen)
if k1 == k:
v1 = self._fp.read(vlen)
if self._docache:
self._cache[k] = v1
return v1
i = (i+2) % n
raise KeyError(k)
def get(self, k, failed=None):
try:
return self.__getitem__(k)
except KeyError:
return failed
def has_key(self, k):
try:
self.__getitem__(k)
return True
except KeyError:
return False
def __contains__(self, k):
return self.has_key(k)
def firstkey(self):
self._keyiter = None
return self.nextkey()
def nextkey(self):
if not self._keyiter:
self._keyiter = ( k for (k,v) in cdbiter(self._fp, self._eod) )
try:
return self._keyiter.next()
except StopIteration:
return None
def each(self):
if not self._eachiter:
self._eachiter = cdbiter(self._fp, self._eod)
try:
return self._eachiter.next()
except StopIteration:
return None
def iterkeys(self):
return ( k for (k,v) in cdbiter(self._fp, self._eod) )
def itervalues(self):
return ( v for (k,v) in cdbiter(self._fp, self._eod) )
def iteritems(self):
return cdbiter(self._fp, self._eod)
# CDBMaker
class CDBMaker(object):
def __init__(self, cdbname, tmpname):
self.fn = cdbname
self.fntmp = tmpname
self.numentries = 0
self._fp = file(tmpname, 'wb')
self._pos = 2048 # sizeof((h,p))*256
self._bucket = [ array('I') for _ in xrange(256) ]
return
def __repr__(self):
return '<CDBMaker: %r, %r, %d ents>' % (self.fn, self.fntmp, self.numentries)
def __len__(self):
return self.numentries
def __getstate__(self):
raise TypeError
def __setstate__(self, dict):
raise TypeError
def add(self, k, v):
(k, v) = (str(k), str(v))
(klen, vlen) = (len(k), len(v))
self._fp.seek(self._pos)
self._fp.write(pack('<II', klen, vlen))
self._fp.write(k)
self._fp.write(v)
h = cdbhash(k)
b = self._bucket[h % 256]
b.append(h)
b.append(self._pos)
# sizeof(keylen)+sizeof(datalen)+sizeof(key)+sizeof(data)
self._pos += 8+klen+vlen
self.numentries += 1
return self
def finish(self):
self._fp.seek(self._pos)
pos_hash = self._pos
# write hashes
for b1 in self._bucket:
if not b1: continue
blen = len(b1)
a = array('I', [0]*blen*2)
for j in xrange(0, blen, 2):
(h,p) = (b1[j],b1[j+1])
i = ((h >> 8) % blen)*2
while a[i+1]: # is cell[i] already occupied?
i = (i+2) % len(a)
a[i] = h
a[i+1] = p
self._fp.write(encode(a))
# write header
self._fp.seek(0)
a = array('I')
for b1 in self._bucket:
a.append(pos_hash)
a.append(len(b1))
pos_hash += len(b1)*8
self._fp.write(encode(a))
# close
self._fp.close()
os.rename(self.fntmp, self.fn)
return
# txt2cdb
def txt2cdb(self, lines):
import re
HEAD = re.compile(r'^\+(\d+),(\d+):')
for line in lines:
m = HEAD.match(line)
if not m: break
(klen, vlen) = (int(m.group(1)), int(m.group(2)))
i = len(m.group(0))
k = line[i:i+klen]
i += klen
if line[i:i+2] != '->': raise ValueError('invalid separator: %r' % line)
i += 2
v = line[i:i+vlen]
self.add(k, v)
return self
# cdbdump
def cdbdump(cdbname):
fp = file(cdbname, 'rb')
(eor,) = unpack('<I', fp.read(4))
return cdbiter(fp, eor)
# cdbmerge
def cdbmerge(iters):
q = []
for it in iters:
try:
q.append((it.next(),it))
except StopIteration:
pass
k0 = None
vs = None
while q:
q.sort()
((k,v),it) = q.pop(0)
if k0 != k:
if vs: yield (k0,vs)
vs = []
vs.append(v)
k0 = k
try:
q.append((it.next(),it))
except StopIteration:
continue
if vs: yield (k0,vs)
return
# aliases
cdbmake = CDBMaker
init = CDBReader
## TCDB
##
# tcdbiter
def tcdbiter(fp, eor):
locs = {}
fp.seek(eor)
while 1:
x = fp.read(8)
if not x: break
(h, pos) = unpack('<II', x)
if pos: locs[pos] = h
pos = 2048
fp.seek(pos)
key = ()
parents = [0]
while pos < eor:
(klen, vlen) = unpack('<II', fp.read(8))
k = fp.read(klen)
v = fp.read(vlen)
h = locs[pos]
for (i,p) in enumerate(parents):
if cdbhash(k, p+5381L) == h:
parents = parents[:i+1]
key = key[:i]
break
key += (k,)
yield (key, v)
parents.append(pos)
pos += 8+klen+vlen
fp.close()
return
# TCDBMaker
class TCDBMaker(CDBMaker):
def __init__(self, cdbname, tmpname):
CDBMaker.__init__(self, cdbname, tmpname)
self._parent = 0
self._stack = [self._parent]
return
def put(self, depth, k, v):
if depth == len(self._stack)+1:
self._stack.append(self._parent)
elif depth < len(self._stack):
self._stack = self._stack[:depth]
elif depth != len(self._stack):
raise ValueError('invalid depth: %d' % depth)
#
(k, v) = (str(k), str(v))
(klen, vlen) = (len(k), len(v))
self._parent = self._pos
# sizeof(keylen)+sizeof(datalen)+sizeof(key)+sizeof(data)
self._fp.seek(self._pos)
self._fp.write(pack('<II', klen, vlen))
self._fp.write(k)
self._fp.write(v)
self._pos += 4+4+klen+vlen
h = cdbhash(k, self._stack[-1]+5381L)
b = self._bucket[h % 256]
b.append(h)
b.append(self._parent)
self.numentries += 1
return self
def txt2tcdb(self, lines):
import re
HEAD = re.compile(r'^(\++)(\d+),(\d+):')
for line in lines:
m = HEAD.match(line)
if not m: break
(depth, klen, vlen) = (len(m.group(1)), int(m.group(2)), int(m.group(3)))
i = len(m.group(0))
k = line[i:i+klen]
i += klen
if line[i:i+2] != '->': raise ValueError('invalid separator: %r' % line)
i += 2
v = line[i:i+vlen]
self.put(depth, k, v)
return self
# TCDBReader
class TCDBReader(CDBReader):
def lookup(self, seq, parent=0L):
r = []
for k in seq:
(v, parent) = self.lookup1(k, parent)
r.append(v)
return r
def lookup1(self, k, parent=0L):
k = str(k)
if self._docache and (parent,k) in self._cache:
return self._cache[(parent,k)]
h = cdbhash(k, parent+5381L)
self._fp.seek((h % 256) << 3)
(pos_bucket, ncells) = unpack('<II', self._fp.read(8))
if ncells == 0: raise KeyError(k)
start = (h >> 8) % ncells
for i in xrange(ncells):
self._fp.seek(pos_bucket + ((start+i) % ncells << 3))
(h1, p1) = unpack('<II', self._fp.read(8))
if p1 == 0: raise KeyError(k)
if h1 == h:
self._fp.seek(p1)
(klen, vlen) = unpack('<II', self._fp.read(8))
k1 = self._fp.read(klen)
if k1 == k:
v1 = self._fp.read(vlen)
if self._docache:
self._cache[(parent,k)] = (v1,p1)
return (v1,p1)
raise KeyError(k)
def iterkeys(self):
return ( k for (k,v) in tcdbiter(self._fp, self._eod) )
def itervalues(self):
return ( v for (k,v) in tcdbiter(self._fp, self._eod) )
def iteritems(self):
return tcdbiter(self._fp, self._eod)
# tcdbdump
def tcdbdump(cdbname):
fp = file(cdbname, 'rb')
(eor,) = unpack('<I', fp.read(4))
return tcdbiter(fp, eor)
# aliases
tcdbmake = TCDBMaker
tcdbinit = TCDBReader
tcdbmerge = cdbmerge
# main
def main(argv):
import getopt, fileinput
def usage():
print 'usage: %s {cmake,cget,cdump,cmerge} [options] cdbname [args ...]' % argv[0]
print 'usage: %s {tmake,tget,tdump,tmerge} [options] tcdbname [args ...]' % argv[0]
return 100
args = argv[1:]
if not args: return usage()
cmd = args.pop(0)
try:
(opts, args) = getopt.getopt(args, 'kv2')
except getopt.GetoptError:
return usage()
if not args: return usage()
dbname = args.pop(0)
# cdb
if cmd == 'cmake':
CDBMaker(dbname, dbname+'.tmp').txt2cdb(fileinput.input(args)).finish()
elif cmd == 'cget':
print repr(CDBReader(dbname).get(args[0]))
elif cmd == 'cdump':
f = (lambda k,v: '+%d,%d:%s->%s' % (len(k), len(v), k, v))
for (k, v) in opts:
if k == '-k': f = (lambda k,_: k)
elif k == '-v': f = (lambda _,v: v)
elif k == '-2': f = (lambda k,v: k+'\t'+v)
for (k,v) in cdbdump(dbname):
print f(k,v)
print
elif cmd == 'cmerge':
dbs = [ cdbdump(fname) for fname in args ]
m = CDBMaker(dbname, dbname+'.tmp')
for (k,vs) in tcdbmerge(dbs):
m.add(k, ' '.join(vs))
m.finish()
# tcdb
elif cmd == 'tmake':
TCDBMaker(dbname, dbname+'.tmp').txt2tcdb(fileinput.input(args)).finish()
elif cmd == 'tget':
print repr(TCDBReader(dbname).lookup(args))
elif cmd == 'tdump':
f = (lambda k,v: '%s%d,%d:%s->%s' % ('+'*len(k), len(k[-1]), len(v), k[-1], v))
for (k, v) in opts:
if k == '-k': f = (lambda k,_: '/'.join(k))
elif k == '-v': f = (lambda _,v: v)
elif k == '-2': f = (lambda k,v: '/'.join(k)+'\t'+v)
for (k,v) in tcdbdump(dbname):
print f(k,v)
print
elif cmd == 'tmerge':
dbs = [ tcdbdump(fname) for fname in args ]
m = TCDBMaker(dbname, dbname+'.tmp')
for (k,vs) in tcdbmerge(dbs):
m.put(len(k), k[-1], ' '.join(vs))
m.finish()
else:
return usage()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
|
leonardo/module/media/widget/vectorgraphics/util.py | timgates42/django-leonardo | 102 | 12747193 | <gh_stars>100-1000
import xml.parsers.expat
from decimal import Decimal
from django.conf import settings
from django.template.loader import render_to_string
import xml.dom.minidom
import pprint
pp = pprint.PrettyPrinter(indent=1, width=20).pprint
def autofill_svg_size(sender, **kwargs):
if not kwargs['instance'].autofill_dimensions:
return
dims = {}
def start_element(name, attrs):
if name == 'svg':
if u'height' in attrs and u'width' in attrs:
dims['height'] = attrs[u'height']
dims['width'] = attrs[u'width']
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.Parse(kwargs['instance'].file.read())
if 'height' in dims and 'width' in dims:
#attention! measure unit is deleted! need to find - what measure units can be used for size
#and how it can be implemented in svg-editor
kwargs['instance'].height = Decimal(dims['height'].rstrip('px').rstrip('em').rstrip('%'))
kwargs['instance'].width = Decimal(dims['width'].rstrip('px').rstrip('em').rstrip('%'))
class recompose(object):
def __init__(self, modify_type):
self.modify_type = modify_type
self.sizes = []
self.urls = []
def __call__(self, *args, **kwargs):
self.sizes = []
self.urls = []
content_tokens = []
content_attr_name = kwargs['sender'].__name__.split("_")[-1]
modified_contents = getattr(kwargs['instance'],
content_attr_name).filter(id__in=list(kwargs['pk_set']))
for modified_content in modified_contents:
content_tokens += {"orm_id":unicode(modified_content.id),
"orm_page_id":unicode(modified_content.parent.id),
"orm_region":unicode(modified_content.region),
"orm_ordering":unicode(modified_content.ordering)},
self.sizes += (modified_content.markup.height,
modified_content.markup.width),
self.urls += modified_content.markup.file.url,
return self.modify(markup_path=kwargs['instance'].markup.file.path,
content_tokens=content_tokens,
modify_type=self.modify_type,
sizes =self.sizes,)
def modify(self, markup_path="", content_tokens=[], sizes=[], modify_type=""):
def marked_elements(root, element_name, content_tokens):
for element in root.getElementsByTagName(element_name):
attrs = dict(((attr_node.name, attr_node.value) for\
attr_node in element._attrs.values()))
for content_token in content_tokens:
related_attrs = dict((a[0],a[1]) for a in attrs.items() if \
a[0] in content_token \
and a[1] == content_token[a[0]])
if len(related_attrs) == len(content_token):
yield element
def adder(doc):
if not len(list(marked_elements(doc, 'image', content_tokens))):
for content_token in content_tokens:
# print content_tokens.index(content_token)
# print self.urls
# print self.urls[content_tokens.index(content_token)]
image_node = xml.dom.minidom.parseString(render_to_string('svg/image_node.xml',
{'token':content_token,
'size':self.sizes[content_tokens.index(content_token)],
'url':self.urls[content_tokens.index(content_token)]})).\
childNodes[0].childNodes[0]
image_node.ownerDocument = None
image_node.parentNode = None
doc.childNodes[0].appendChild(image_node)
def remover(doc):
for element in marked_elements(doc, 'image', content_tokens):
element.parentNode.removeChild(element)
if modify_type == 'add':
modifier = adder
elif modify_type == 'remove':
modifier = remover
else:
raise ValueError("'modify_type' must be either 'add' or 'remove' string")
markup = open(markup_path)
doc = xml.dom.minidom.parse(markup)
markup.close()
#markup = open(markup_path, mode='w')
modifier(doc)
markup = file(markup_path, mode='w')
markup.write(doc.toprettyxml())
markup.close()
RECOMPOSE_MAP = {('pre_remove', "SVGComposerContentType_composers"):recompose('remove'),
('post_add', "SVGComposerContentType_composers"):recompose('add'),
('pre_remove', "SVGComposerContentType_components"):recompose('remove'),
('post_add', "SVGComposerContentType_components"):recompose('add'),}
def recompose_dispatcher(*args, **kwargs):
if kwargs['instance'].__class__.__name__ == 'SVGComposerContentType':
if (kwargs['action'],kwargs['sender'].__name__) in RECOMPOSE_MAP:
RECOMPOSE_MAP[(kwargs['action'],kwargs['sender'].__name__)](*args, **kwargs)
def create_markup_if_empty(*args, **kwargs):
if kwargs['sender'].__name__ == 'SVGComposerContentType' and\
not kwargs['instance'].markup and\
not hasattr(kwargs['instance'],'_markup_resetted'):
print "reset"
kwargs['instance'].reset_markup()
setattr(kwargs['instance'],'_markup_resetted', True)
kwargs['instance'].save()
|
geonotebook/vis/ktile/handler.py | irina694/earth-science-notebook | 1,076 | 12747210 | <filename>geonotebook/vis/ktile/handler.py
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
import json
from ModestMaps.Core import Coordinate
from notebook.base.handlers import IPythonHandler
from tornado import concurrent, ioloop
from tornado import gen
from tornado import web
from .utils import serialize_config, serialize_layer
class KTileAsyncClient(object):
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super(
KTileAsyncClient, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def __init__(self):
self.executor = ThreadPoolExecutor(max_workers=4)
self.io_loop = ioloop.IOLoop.current()
@concurrent.run_on_executor
def getTileResponse(self, layer, coord, extension):
return layer.getTileResponse(coord, extension)
class KtileHandler(IPythonHandler):
def check_xsrf_cookie(self):
# TODO: Find a way to correctly communicate XSRF secret to
# the kernel so ingest requests can be property authenticated
pass
def initialize(self, ktile_config_manager):
self.ktile_config_manager = ktile_config_manager
try:
if self.request.headers["Content-Type"].lower().startswith(
"application/json"):
try:
body = self.request.body.decode('utf-8')
except AttributeError:
body = self.request.body
self.request.json = json.loads(body)
except Exception:
self.request.json = None
def post(self, kernel_id):
# Note: needs paramater validation
kwargs = {} if self.request.json is None else self.request.json
self.ktile_config_manager.add_config(kernel_id, **kwargs)
self.log.info("Created config for {}".format(kernel_id))
self.finish()
def delete(self, kernel_id):
try:
del self.ktile_config_manager[kernel_id]
except KeyError:
raise web.HTTPError(404, u'Kernel %s not found' % kernel_id)
def get(self, kernel_id, **kwargs):
try:
config = self.ktile_config_manager[kernel_id]
except KeyError:
raise web.HTTPError(404, u'Kernel %s not found' % kernel_id)
self.finish(serialize_config(config))
class KtileLayerHandler(IPythonHandler):
def check_xsrf_cookie(self):
# TODO: Find a way to correctly communicate XSRF secret to
# the kernel so ingest requests can be property authenticated
pass
def initialize(self, ktile_config_manager):
self.ktile_config_manager = ktile_config_manager
def prepare(self):
try:
if self.request.headers["Content-Type"].lower().startswith(
"application/json"):
try:
body = self.request.body.decode('utf-8')
except AttributeError:
body = self.request.body
self.request.json = json.loads(body)
except Exception:
self.request.json = None
def post(self, kernel_id, layer_name):
# Note: needs paramater validation
try:
self.ktile_config_manager.add_layer(
kernel_id, layer_name, self.request.json)
self.finish()
except Exception:
import sys
import traceback
t, v, tb = sys.exc_info()
self.log.error(''.join(traceback.format_exception(t, v, tb)))
self.clear()
self.set_status(500)
self.finish({'error': traceback.format_exception(t, v, tb)})
def get(self, kernel_id, layer_name, **kwargs):
try:
config = self.ktile_config_manager[kernel_id]
except KeyError:
raise web.HTTPError(400, u'Kernel %s not found' % kernel_id)
try:
layer = config.layers[layer_name]
except KeyError:
raise web.HTTPError(404, u'Layer %s not found' % layer_name)
self.finish(serialize_layer(layer))
class KtileTileHandler(IPythonHandler):
def initialize(self, ktile_config_manager):
self.client = KTileAsyncClient()
self.ktile_config_manager = ktile_config_manager
@gen.coroutine
def get(self, kernel_id, layer_name, x, y, z, extension, **kwargs):
config = self.ktile_config_manager[kernel_id]
layer = config.layers[layer_name]
coord = Coordinate(int(y), int(x), int(z))
# To run synchronously:
# status_code, headers, content = layer.getTileResponse(
# coord, extension)
status_code, headers, content = yield self.client.getTileResponse(
layer, coord, extension)
if layer.max_cache_age is not None:
expires = datetime.utcnow() + timedelta(
seconds=layer.max_cache_age)
headers['Expires'] = expires.strftime('%a %d %b %Y %H:%M:%S GMT')
headers['Cache-Control'] = 'public, max-age=%d' \
% layer.max_cache_age
else:
headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
headers['Pragma'] = 'no-cache'
headers['Expires'] = '0'
# Force allow cross origin access
headers["Access-Control-Allow-Origin"] = "*"
# Fill tornado handler properties with ktile code/header/content
for k, v in headers.items():
self.set_header(k, v)
self.set_status(status_code)
self.write(content)
|
jnpr/openclos/trapd.py | rohitt29/OpenClos | 114 | 12747233 | <gh_stars>100-1000
'''
Created on Nov. 06, 2014
@author: yunli
'''
from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher
from pysnmp.carrier.asynsock.dgram import udp
from pyasn1.codec.ber import decoder
from pysnmp.proto import api
from threading import Thread, Event
import logging
import util
import signal
import sys
import subprocess
import concurrent.futures
from devicePlugin import TwoStageConfigurator
from propLoader import OpenClosProperty, loadLoggingConfig
from exception import TrapDaemonError
moduleName = 'trapd'
loadLoggingConfig(appName = moduleName)
logger = logging.getLogger(moduleName)
DEFAULT_HOST = "0.0.0.0"
DEFAULT_PORT = 20162
DEFAULT_MAX_THREADS = 10
trapReceiver = None
def onTrap(transportDispatcher, transportDomain, transportAddress, wholeMsg):
# don't even log the trap PDU unless we are at DEBUG level
if logger.isEnabledFor(logging.DEBUG):
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
logger.error('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
logger.info('Notification message from %s:%s ' % (
transportAddress[0], transportAddress[1]
)
)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
logger.debug('Enterprise: %s' % (
pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()
)
)
logger.debug('Agent Address: %s' % (
pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()
)
)
logger.debug('Generic Trap: %s' % (
pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()
)
)
logger.debug('Specific Trap: %s' % (
pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()
)
)
logger.debug('Uptime: %s' % (
pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()
)
)
varBinds = pMod.apiTrapPDU.getVarBindList(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBindList(reqPDU)
logger.debug('Var-binds:')
for oid, val in varBinds:
logger.debug('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
# start the 2-stage configuration in a separate thread
if trapReceiver is not None:
# execute 2-stage configuration callback if there is one configured in openclos.yaml
callback = trapReceiver.twoStageConfigurationCallback
if callback is not None and len(callback) > 0:
proc = subprocess.Popen(callback, shell=True)
returnValue = proc.wait()
if returnValue != 0:
# 2-stage configuration callback returns non-zero value indicating we SHOULD NOT continue
logger.debug('twoStageConfigurationCallback "%s" returns %d, trap ignored' % (callback, returnValue))
return
configurator = TwoStageConfigurator(deviceIp=transportAddress[0], stopEvent=trapReceiver.stopEvent)
trapReceiver.executor.submit(configurator.start2StageConfiguration)
class TrapReceiver():
def __init__(self, conf = {}):
if conf is None or any(conf) == False:
self.__conf = OpenClosProperty(appName = moduleName).getProperties()
else:
self.__conf = conf
# default value
self.target = DEFAULT_HOST
self.port = DEFAULT_PORT
# validate required parameter
if 'snmpTrap' in self.__conf and 'openclos_trap_group' in self.__conf['snmpTrap'] and 'target' in self.__conf['snmpTrap']['openclos_trap_group']:
self.target = self.__conf['snmpTrap']['openclos_trap_group']['target']
else:
logger.info("snmpTrap:openclos_trap_group:target is missing from configuration. using %s" % (self.target))
if 'snmpTrap' in self.__conf and 'openclos_trap_group' in self.__conf['snmpTrap'] and 'port' in self.__conf['snmpTrap']['openclos_trap_group']:
self.port = int(self.__conf['snmpTrap']['openclos_trap_group']['port'])
else:
logger.info("snmpTrap:openclos_trap_group:port is missing from configuration. using %d" % (self.port))
if 'snmpTrap' in self.__conf and 'threadCount' in self.__conf['snmpTrap']:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers = self.__conf['snmpTrap']['threadCount'])
else:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers = DEFAULT_MAX_THREADS)
# event to stop from sleep
self.stopEvent = Event()
self.twoStageConfigurationCallback = util.getTwoStageConfigurationCallback(self.__conf)
def threadFunction(self):
self.transportDispatcher = AsynsockDispatcher()
self.transportDispatcher.registerRecvCbFun(onTrap)
# UDP/IPv4
self.transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode((self.target, self.port))
)
self.transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
self.transportDispatcher.runDispatcher()
except Exception as exc:
logger.error("Encounted error '%s' on trap receiver %s:%d" % (exc, self.target, self.port))
self.transportDispatcher.closeDispatcher()
raise TrapDaemonError("Trap receiver %s:%d" % (self.target, self.port), exc)
else:
self.transportDispatcher.closeDispatcher()
def start(self):
logger.info("Starting trap receiver...")
self.thread = Thread(target=self.threadFunction, args=())
self.thread.start()
logger.info("Trap receiver started on %s:%d" % (self.target, self.port))
def stop(self):
logger.info("Stopping trap receiver...")
self.stopEvent.set()
self.executor.shutdown()
self.transportDispatcher.jobFinished(1)
self.thread.join()
logger.info("Trap receiver stopped")
def trap_receiver_signal_handler(signal, frame):
logger.debug("received signal %d" % signal)
trapReceiver.stop()
sys.exit(0)
def main():
signal.signal(signal.SIGINT, trap_receiver_signal_handler)
signal.signal(signal.SIGTERM, trap_receiver_signal_handler)
global trapReceiver
trapReceiver = TrapReceiver()
trapReceiver.start()
# Note we have to do this in order for signal to be properly caught by main thread
# We need to do the similar thing when we integrate this into sampleApplication.py
while True:
signal.pause()
if __name__ == '__main__':
main() |
CalibPPS/ESProducers/test/ppsTimingCalibrationAnalyzer_cfg.py | ckamtsikis/cmssw | 852 | 12747238 | <filename>CalibPPS/ESProducers/test/ppsTimingCalibrationAnalyzer_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process('test')
# minimum logging
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
)
)
process.source = cms.Source('EmptyIOVSource',
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
# load calibrations from database
process.load('CondCore.CondDB.CondDB_cfi')
process.CondDB.connect = 'sqlite_file:ppsDiamondTiming_calibration.sqlite' # SQLite input
process.PoolDBESSource = cms.ESSource('PoolDBESSource',
process.CondDB,
DumpStats = cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('PPSTimingCalibrationRcd'),
tag = cms.string('PPSDiamondTimingCalibration')
)
)
)
process.ppsTimingCalibrationAnalyzer = cms.EDAnalyzer('PPSTimingCalibrationAnalyzer')
process.path = cms.Path(
process.ppsTimingCalibrationAnalyzer
)
|
supervisor/dbus/network/interface.py | pnjongang/supervisor | 597 | 12747257 | <filename>supervisor/dbus/network/interface.py
"""NetworkInterface object for Network Manager."""
from typing import Optional
from ...utils.dbus import DBus
from ..const import (
DBUS_ATTR_ACTIVE_CONNECTION,
DBUS_ATTR_DEVICE_INTERFACE,
DBUS_ATTR_DEVICE_TYPE,
DBUS_ATTR_DRIVER,
DBUS_ATTR_MANAGED,
DBUS_IFACE_DEVICE,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
DeviceType,
)
from ..interface import DBusInterfaceProxy
from .connection import NetworkConnection
from .setting import NetworkSetting
from .wireless import NetworkWireless
class NetworkInterface(DBusInterfaceProxy):
"""NetworkInterface object represents Network Manager Device objects.
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Device.html
"""
def __init__(self, nm_dbus: DBus, object_path: str) -> None:
"""Initialize NetworkConnection object."""
self.object_path = object_path
self.properties = {}
self.primary = False
self._connection: Optional[NetworkConnection] = None
self._settings: Optional[NetworkSetting] = None
self._wireless: Optional[NetworkWireless] = None
self._nm_dbus: DBus = nm_dbus
@property
def name(self) -> str:
"""Return interface name."""
return self.properties[DBUS_ATTR_DEVICE_INTERFACE]
@property
def type(self) -> int:
"""Return interface type."""
return self.properties[DBUS_ATTR_DEVICE_TYPE]
@property
def driver(self) -> str:
"""Return interface driver."""
return self.properties[DBUS_ATTR_DRIVER]
@property
def managed(self) -> bool:
"""Return interface driver."""
return self.properties[DBUS_ATTR_MANAGED]
@property
def connection(self) -> Optional[NetworkConnection]:
"""Return the connection used for this interface."""
return self._connection
@property
def settings(self) -> Optional[NetworkSetting]:
"""Return the connection settings used for this interface."""
return self._settings
@property
def wireless(self) -> Optional[NetworkWireless]:
"""Return the wireless data for this interface."""
return self._wireless
async def connect(self) -> None:
"""Get device information."""
self.dbus = await DBus.connect(DBUS_NAME_NM, self.object_path)
self.properties = await self.dbus.get_properties(DBUS_IFACE_DEVICE)
# Abort if device is not managed
if not self.managed:
return
# If active connection exists
if self.properties[DBUS_ATTR_ACTIVE_CONNECTION] != DBUS_OBJECT_BASE:
self._connection = NetworkConnection(
self.properties[DBUS_ATTR_ACTIVE_CONNECTION]
)
await self._connection.connect()
# Attach settings
if self.connection and self.connection.setting_object != DBUS_OBJECT_BASE:
self._settings = NetworkSetting(self.connection.setting_object)
await self._settings.connect()
# Wireless
if self.type == DeviceType.WIRELESS:
self._wireless = NetworkWireless(self.object_path)
await self._wireless.connect()
|
src/apifuzz.py | Boyploy/IMF | 108 | 12747259 | # Copyright (c) 2017 <NAME> and <NAME> at SoftSec, KAIST
#
# See the file LICENCE for copying permission.
import basic
import hook
import utils
import log
import const
import sys
import time
import argparse
import os
from multiprocessing import Pool
from model import Model
class ApiFuzz:
def __init__(self):
self.apis = {}
self.apisets = {}
self.apis= basic.load_apis()
def load_apilog(self, log_fname, limit):
with open(log_fname, 'rb') as f:
data = f.read().split('\n')[:-1]
if len(data) %2 !=0:
data = data[:-1]
idx = 0
apilogs = []
while idx < len(data) and idx < limit*2:
if data[idx][:2] == 'IN':
il = utils.evaluate(data[idx][2:])
else:
utils.error('load_apilog: parse IN error')
if data[idx+1][:3] == 'OUT' :
ol = utils.evaluate(data[idx+1][3:])
else:
utils.error('load_apilog: parse OUT error')
apilog = log.ApiLog(self.apis[il[0]], il, ol)
apilogs.append(apilog)
idx+=2
return apilogs
def make_model(self, fnames, limit, path, core):
apisets = utils.multiproc(self.load_apilog_multi(limit), fnames, core)
model = Model(apisets)
with open(path, 'wb') as f:
code = model.fuzz(const.CODE_HEAD, const.CODE_TAIL)
f.write(code)
def load_apilog_multi(self, limit):
def func(fname):
apiseq = self.load_apilog(fname, limit)
return apiseq
return func
def show_help():
print './gen-fuzz [filtered logs path] [output(fuzzer code) path] [# of core]'
def get_limit(logs):
limit = None
for log in logs:
with open(log, 'rb') as f:
n = (len(f.read().split('\n'))-1)/2
if limit == None :
limit = n
elif limit != n:
utils.error('Invalid triaged logs')
return limit
if __name__== '__main__':
if len(sys.argv) != 4:
show_help()
sys.exit(-1)
fuzz = ApiFuzz()
log_dir = sys.argv[1]
logs = []
for fname in os.listdir(log_dir):
logs.append(os.path.join(log_dir, fname))
limit = get_limit(logs)
core = int(sys.argv[3])
fuzz.make_model(logs, limit, sys.argv[2], core)
|
src/app/beer_garden/api/http/handlers/v1/admin.py | ExpressHermes/beer-garden | 230 | 12747272 | # -*- coding: utf-8 -*-
from brewtils.errors import ModelValidationError
from brewtils.models import Operation
from brewtils.schema_parser import SchemaParser
from beer_garden.api.http.base_handler import BaseHandler
class AdminAPI(BaseHandler):
async def patch(self):
"""
---
summary: Initiate administrative actions
description: |
The body of the request needs to contain a set of instructions
detailing the operations to perform.
Currently the supported operations are `rescan`:
```JSON
[
{ "operation": "rescan" }
]
```
* Will remove from the registry and database any currently stopped
plugins who's directory has been removed.
* Will add and start any new plugin directories.
And reloading the plugin logging configuration:
```JSON
[
{
"operation": "reload",
"path": "/config/logging/plugin"
}
]
```
parameters:
- name: patch
in: body
required: true
description: Instructions for operations
schema:
$ref: '#/definitions/Patch'
responses:
204:
description: Operation successfully initiated
50x:
$ref: '#/definitions/50xError'
tags:
- Admin
"""
operations = SchemaParser.parse_patch(
self.request.decoded_body, many=True, from_string=True
)
for op in operations:
if op.operation == "rescan":
await self.client(Operation(operation_type="RUNNER_RESCAN"))
elif op.operation == "reload":
if op.path == "/config/logging/plugin":
await self.client(Operation(operation_type="PLUGIN_LOG_RELOAD"))
else:
raise ModelValidationError(f"Unsupported path '{op.path}'")
else:
raise ModelValidationError(f"Unsupported operation '{op.operation}'")
self.set_status(204)
|
corehq/apps/user_importer/helpers.py | andyasne/commcare-hq | 471 | 12747277 | from dimagi.utils.parsing import string_to_boolean
from corehq.apps.custom_data_fields.models import PROFILE_SLUG
from corehq.apps.user_importer.exceptions import UserUploadError
from corehq.apps.users.audit.change_messages import UserChangeMessage
from corehq.apps.users.model_log import UserModelAction
from corehq.apps.users.util import log_user_change
def spec_value_to_boolean_or_none(user_spec_dict, key):
value = user_spec_dict.get(key, None)
if value and isinstance(value, str):
return string_to_boolean(value)
elif isinstance(value, bool):
return value
else:
return None
class UserChangeLogger(object):
"""
User change logger to record
- changes to user properties
- text messages for changes
- useful info for changes to associated data models like role/locations
"""
def __init__(self, upload_domain, user_domain, user, is_new_user, changed_by_user, changed_via,
upload_record_id, user_domain_required_for_log=True):
self.upload_domain = upload_domain
self.user_domain = user_domain
self.user = user
self.is_new_user = is_new_user
self.changed_by_user = changed_by_user
self.changed_via = changed_via
self.upload_record_id = upload_record_id
self.user_domain_required_for_log = user_domain_required_for_log
if not is_new_user:
self.original_user_doc = self.user.to_json()
else:
self.original_user_doc = None
self.fields_changed = {}
self.change_messages = {}
self._save = False # flag to check if log needs to be saved for updates
def add_changes(self, changes):
"""
Add changes to user properties.
Ignored for new user since the whole user doc is logged for a new user
:param changes: dict of property mapped to it's new value
"""
if self.is_new_user:
return
for name, new_value in changes.items():
if self.original_user_doc[name] != new_value:
self.fields_changed[name] = new_value
self._save = True
def add_change_message(self, message):
"""
Add change message for a change in user property that is in form of a UserChangeMessage
Ignored for new user since the whole user doc is logged for a new user
:param message: text message for the change like 'Password reset' / 'Added as web user to domain foo'
"""
if self.is_new_user:
return
self._update_change_messages(message)
self._save = True
def _update_change_messages(self, change_messages):
for slug in change_messages:
if slug in self.change_messages:
raise UserUploadError(f"Double Entry for {slug}")
self.change_messages.update(change_messages)
def add_info(self, change_message):
"""
Add change message for a change to the user that is in form of a UserChangeMessage
"""
self._update_change_messages(change_message)
self._save = True
def save(self):
if self.is_new_user or self._save:
action = UserModelAction.CREATE if self.is_new_user else UserModelAction.UPDATE
fields_changed = None if self.is_new_user else self.fields_changed
log_user_change(
by_domain=self.upload_domain,
for_domain=self.user_domain,
couch_user=self.user,
changed_by_user=self.changed_by_user,
changed_via=self.changed_via,
change_messages=self.change_messages,
action=action,
fields_changed=fields_changed,
bulk_upload_record_id=self.upload_record_id,
for_domain_required_for_log=self.user_domain_required_for_log,
)
class BaseUserImporter(object):
"""
Imports a Web/CommCareUser via bulk importer and also handles the logging
save_log should be called explicitly to save logs, after user is saved
"""
def __init__(self, upload_domain, user_domain, user, upload_user, is_new_user, via, upload_record_id):
"""
:param upload_domain: domain on which the bulk upload is being done
:param user_domain: domain user is being updated for
:param user: user to update
:param upload_user: user doing the upload
:param is_new_user: if user is a new user
:param via: USER_CHANGE_VIA_BULK_IMPORTER
:param upload_record_id: ID of the bulk upload record
"""
self.user_domain = user_domain
self.user = user
self.upload_user = upload_user
self.logger = UserChangeLogger(upload_domain=upload_domain, user_domain=user_domain, user=user,
is_new_user=is_new_user,
changed_by_user=upload_user, changed_via=via,
upload_record_id=upload_record_id)
self.role_updated = False
def update_role(self, role_qualified_id):
user_current_role = self.user.get_role(domain=self.user_domain)
self.role_updated = not (user_current_role
and user_current_role.get_qualified_id() == role_qualified_id)
if self.role_updated:
self.user.set_role(self.user_domain, role_qualified_id)
def save_log(self):
# Tracking for role is done post save to have role setup correctly on save
if self.role_updated:
new_role = self.user.get_role(domain=self.user_domain)
self.logger.add_info(UserChangeMessage.role_change(new_role))
self._include_user_data_changes()
self.logger.save()
def _include_user_data_changes(self):
# ToDo: consider putting just the diff
if self.logger.original_user_doc and self.logger.original_user_doc['user_data'] != self.user.user_data:
self.logger.add_changes({'user_data': self.user.user_data})
class CommCareUserImporter(BaseUserImporter):
def update_password(self, password):
self.user.set_password(password)
self.logger.add_change_message(UserChangeMessage.password_reset())
def update_phone_numbers(self, phone_numbers):
"""
The first item in 'phone_numbers' will be the default
"""
old_user_phone_numbers = self.user.phone_numbers
fmt_phone_numbers = [_fmt_phone(n) for n in phone_numbers]
if any(fmt_phone_numbers):
self.user.set_phone_numbers(fmt_phone_numbers, default_number=fmt_phone_numbers[0])
else:
self.user.set_phone_numbers([])
self._log_phone_number_changes(old_user_phone_numbers, fmt_phone_numbers)
def update_name(self, name):
self.user.set_full_name(str(name))
self.logger.add_changes({'first_name': self.user.first_name, 'last_name': self.user.last_name})
def update_user_data(self, data, uncategorized_data, profile, domain_info):
# Add in existing data. Don't use metadata - we don't want to add profile-controlled fields.
current_profile_id = self.user.user_data.get(PROFILE_SLUG)
for key, value in self.user.user_data.items():
if key not in data:
data[key] = value
if profile:
profile_obj = domain_info.profiles_by_name[profile]
data[PROFILE_SLUG] = profile_obj.id
for key in profile_obj.fields.keys():
self.user.pop_metadata(key)
try:
self.user.update_metadata(data)
except ValueError as e:
raise UserUploadError(str(e))
if uncategorized_data:
self.user.update_metadata(uncategorized_data)
# Clear blank user data so that it can be purged by remove_unused_custom_fields_from_users_task
for key in dict(data, **uncategorized_data):
value = self.user.metadata[key]
if value is None or value == '':
self.user.pop_metadata(key)
new_profile_id = self.user.user_data.get(PROFILE_SLUG)
if new_profile_id and new_profile_id != current_profile_id:
profile_name = domain_info.profile_name_by_id[new_profile_id]
self.logger.add_info(UserChangeMessage.profile_info(new_profile_id, profile_name))
def update_language(self, language):
self.user.language = language
self.logger.add_changes({'language': language})
def update_email(self, email):
self.user.email = email.lower()
self.logger.add_changes({'email': self.user.email})
def update_status(self, is_active):
self.user.is_active = is_active
self.logger.add_changes({'is_active': is_active})
def update_locations(self, location_codes, domain_info):
from corehq.apps.user_importer.importer import (
check_modified_user_loc,
find_location_id,
get_location_from_site_code
)
location_ids = find_location_id(location_codes, domain_info.location_cache)
user_current_primary_location_id = self.user.location_id
locations_updated, primary_loc_removed = check_modified_user_loc(location_ids,
self.user.location_id,
self.user.assigned_location_ids)
if primary_loc_removed:
self.user.unset_location(commit=False)
if locations_updated:
self.user.reset_locations(location_ids, commit=False)
self.logger.add_changes({'assigned_location_ids': location_ids})
if location_ids:
locations = [get_location_from_site_code(code, domain_info.location_cache)
for code in location_codes]
self.logger.add_info(
UserChangeMessage.assigned_locations_info(locations))
else:
self.logger.add_info(UserChangeMessage.assigned_locations_info([]))
# log this after assigned locations are updated, which can re-set primary location
if self.user.location_id != user_current_primary_location_id:
self.logger.add_changes({'location_id': self.user.location_id})
if self.user.location_id:
self.logger.add_info(
UserChangeMessage.primary_location_info(
self.user.get_sql_location(self.user_domain)
)
)
else:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def _log_phone_number_changes(self, old_phone_numbers, new_phone_numbers):
(items_added, items_removed) = find_differences_in_list(
target=new_phone_numbers,
source=old_phone_numbers
)
change_messages = {}
if items_added:
change_messages.update(UserChangeMessage.phone_numbers_added(list(items_added))["phone_numbers"])
if items_removed:
change_messages.update(UserChangeMessage.phone_numbers_removed(list(items_removed))["phone_numbers"])
if change_messages:
self.logger.add_change_message({'phone_numbers': change_messages})
def _fmt_phone(phone_number):
if phone_number and not isinstance(phone_number, str):
phone_number = str(int(phone_number))
return phone_number.lstrip("+")
class WebUserImporter(BaseUserImporter):
def add_to_domain(self, role_qualified_id, location_id):
self.user.add_as_web_user(self.user_domain, role=role_qualified_id, location_id=location_id)
self.role_updated = bool(role_qualified_id)
self.logger.add_info(UserChangeMessage.added_as_web_user(self.user_domain))
if location_id:
self._log_primary_location_info()
def _log_primary_location_info(self):
primary_location = self.user.get_sql_location(self.user_domain)
self.logger.add_info(UserChangeMessage.primary_location_info(primary_location))
def update_primary_location(self, location_id):
current_primary_location_id = get_user_primary_location_id(self.user, self.user_domain)
if location_id:
self.user.set_location(self.user_domain, location_id)
if current_primary_location_id != location_id:
self._log_primary_location_info()
else:
self.user.unset_location(self.user_domain)
# if there was a location before, log that it was cleared
if current_primary_location_id:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def update_locations(self, location_codes, membership, domain_info):
from corehq.apps.user_importer.importer import (
check_modified_user_loc,
find_location_id,
get_location_from_site_code
)
location_ids = find_location_id(location_codes, domain_info.location_cache)
user_current_primary_location_id = membership.location_id
locations_updated, primary_loc_removed = check_modified_user_loc(location_ids,
membership.location_id,
membership.assigned_location_ids)
if primary_loc_removed:
self.user.unset_location(self.user_domain, commit=False)
if locations_updated:
self.user.reset_locations(self.user_domain, location_ids, commit=False)
if location_ids:
locations = [get_location_from_site_code(code, domain_info.location_cache)
for code in location_codes]
else:
locations = []
self.logger.add_info(UserChangeMessage.assigned_locations_info(locations))
# log this after assigned locations are updated, which can re-set primary location
user_updated_primary_location_id = get_user_primary_location_id(self.user, self.user_domain)
if user_updated_primary_location_id != user_current_primary_location_id:
if user_updated_primary_location_id:
self._log_primary_location_info()
else:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def get_user_primary_location_id(user, domain):
primary_location = user.get_sql_location(domain)
if primary_location:
return primary_location.location_id
def get_user_primary_location_name(user, domain):
primary_location = user.get_sql_location(domain)
if primary_location:
return primary_location.name
def find_differences_in_list(target: list, source: list):
"""
Find the differences between 'source' and 'target' and
return (added_items, removed_items)
'added_items': items that are in 'target' but not in 'source'
'removed_items': items that are in 'source' but not 'target'
>>> find_differences_in_list(list_to_compare=[3,4,5,6], reference_list=[1,2,3,5])
({4, 6}, {1, 2})
"""
shared_items = set(target).intersection(source)
added_items = set(target).difference(shared_items)
removed_items = set(source).difference(shared_items)
return added_items, removed_items
|
c2nl/config.py | kopf-yhs/ncscos | 131 | 12747282 | """ Implementation of all available options """
from __future__ import print_function
"""Model architecture/optimization options for Seq2seq architecture."""
import argparse
import logging
logger = logging.getLogger(__name__)
# Index of arguments concerning the core model architecture
MODEL_ARCHITECTURE = {
'model_type',
'emsize',
'rnn_type',
'nhid',
'nlayers',
'use_all_enc_layers',
'bidirection',
'src_pos_emb',
'tgt_pos_emb',
'max_relative_pos',
'use_neg_dist',
'd_ff',
'd_k',
'd_v',
'num_head',
'trans_drop',
'n_characters',
'char_emsize',
'filter_size',
'nfilters'
}
SEQ2SEQ_ARCHITECTURE = {
'attn_type',
'coverage_attn',
'copy_attn',
'review_attn',
'force_copy',
'layer_wise_attn',
'split_decoder',
'reuse_copy_attn',
'reload_decoder_state',
'share_decoder_embeddings',
'conditional_decoding'
}
DATA_OPTIONS = {
'use_src_char',
'use_tgt_char',
'use_src_word',
'use_tgt_word',
'max_src_len',
'max_tgt_len',
'src_vocab_size',
'tgt_vocab_size',
'num_train_examples',
'batch_size',
'use_code_type',
'code_tag_type',
'uncase',
'max_characters_per_token',
'dataset_weights'
}
# Index of arguments concerning the model optimizer/training
MODEL_OPTIMIZER = {
'optimizer',
'fix_embeddings',
'learning_rate',
'momentum',
'weight_decay',
'rnn_padding',
'dropout_rnn',
'dropout',
'dropout_emb',
'cuda',
'grad_clipping',
'lr_decay',
'warmup_steps',
'num_epochs',
'parallel'
}
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_model_args(parser):
parser.register('type', 'bool', str2bool)
# Data options
data = parser.add_argument_group('Data parameters')
data.add_argument('--max_src_len', type=int, default=100,
help='Maximum allowed length for the source sequence')
data.add_argument('--max_tgt_len', type=int, default=50,
help='Maximum allowed length for the target sequence')
data.add_argument('--use_code_type', type='bool', default=False,
help='Use code type as additional feature for feature representations')
data.add_argument('--code_tag_type', type=str, default='subtoken',
help='Use code type as additional feature for feature representations')
# Model architecture
model = parser.add_argument_group('Summary Generator')
model.add_argument('--model_type', type=str, default='rnn',
choices=['rnn', 'transformer'],
help='Model architecture type')
model.add_argument('--emsize', type=int, default=300,
help='Embedding size if embedding_file is not given')
model.add_argument('--rnn_type', type=str, default='LSTM',
help='RNN type: LSTM, GRU')
model.add_argument('--nhid', type=int, default=200,
help='Hidden size of RNN units')
model.add_argument('--bidirection', type='bool', default=True,
help='use bidirectional recurrent unit')
model.add_argument('--nlayers', type=int, default=2,
help='Number of encoding layers')
model.add_argument('--use_all_enc_layers', type='bool', default=False,
help='Use a weighted average of all encoder layers\' '
'representation as the contextual representation')
# Transformer specific params
model.add_argument('--src_pos_emb', type='bool', default=True,
help='Use positional embeddings in encoder')
model.add_argument('--tgt_pos_emb', type='bool', default=True,
help='Use positional embeddings in decoder')
model.add_argument('--max_relative_pos', nargs='+', type=int,
default=0, help='Max value for relative position representations')
model.add_argument('--use_neg_dist', type='bool', default=True,
help='Use negative Max value for relative position representations')
model.add_argument('--d_ff', type=int, default=2048,
help='Number of units in position-wise FFNN')
model.add_argument('--d_k', type=int, default=64,
help='Hidden size of heads in multi-head attention')
model.add_argument('--d_v', type=int, default=64,
help='Hidden size of heads in multi-head attention')
model.add_argument('--num_head', type=int, default=8,
help='Number of heads in Multi-Head Attention')
model.add_argument('--trans_drop', type=float, default=0.2,
help='Dropout for transformer')
model.add_argument('--layer_wise_attn', type='bool', default=False,
help='Use layer-wise attention in Transformer')
# Input representation specific details
model.add_argument('--use_src_char', type='bool', default=False,
help='Use character embedding in the source')
model.add_argument('--use_tgt_char', type='bool', default=False,
help='Use character embedding in the target')
model.add_argument('--use_src_word', type='bool', default=True,
help='Use word embedding in the input')
model.add_argument('--use_tgt_word', type='bool', default=True,
help='Use word embedding in the input')
model.add_argument('--n_characters', type=int, default=260,
help='Character vocabulary size')
model.add_argument('--char_emsize', type=int, default=16,
help='Character embedding size')
model.add_argument('--filter_size', nargs='+', type=int,
default=5, help='Char convolution filter sizes')
model.add_argument('--nfilters', nargs='+', type=int,
default=100, help='Number of char convolution filters')
seq2seq = parser.add_argument_group('Seq2seq Model Specific Params')
seq2seq.add_argument('--attn_type', type=str, default='general',
help='Attention type for the seq2seq [dot, general, mlp]')
seq2seq.add_argument('--coverage_attn', type='bool', default=False,
help='Use coverage attention')
seq2seq.add_argument('--copy_attn', type='bool', default=False,
help='Use copy attention')
seq2seq.add_argument('--review_attn', type='bool', default=False,
help='Use review attention')
seq2seq.add_argument('--force_copy', type='bool', default=False,
help='Apply force copying')
seq2seq.add_argument('--reuse_copy_attn', type='bool', default=False,
help='Reuse encoder attention')
seq2seq.add_argument('--share_decoder_embeddings', type='bool', default=False,
help='Share decoder embeddings weight with softmax layer')
seq2seq.add_argument('--split_decoder', type='bool', default=False,
help='Split the decoder into two for copying and generation')
seq2seq.add_argument('--reload_decoder_state', type=str, default=None,
help='Reload decoder states for the seq2seq')
seq2seq.add_argument('--conditional_decoding', type='bool', default=False,
help='Conditional decoding applied to Seq2seq')
# Optimization details
optim = parser.add_argument_group('Neural QA Reader Optimization')
optim.add_argument('--optimizer', type=str, default='adam',
choices=['sgd', 'adam', 'adamW'],
help='Name of the optimizer')
optim.add_argument('--dropout_emb', type=float, default=0.2,
help='Dropout rate for word embeddings')
optim.add_argument('--dropout_rnn', type=float, default=0.2,
help='Dropout rate for RNN states')
optim.add_argument('--dropout', type=float, default=0.2,
help='Dropout for NN layers')
optim.add_argument('--learning_rate', type=float, default=0.001,
help='Learning rate for the optimizer')
parser.add_argument('--lr_decay', type=float, default=0.99,
help='Decay ratio for learning rate')
optim.add_argument('--grad_clipping', type=float, default=5.0,
help='Gradient clipping')
parser.add_argument('--early_stop', type=int, default=5,
help='Stop training if performance doesn\'t improve')
optim.add_argument('--weight_decay', type=float, default=0,
help='Weight decay factor')
optim.add_argument('--momentum', type=float, default=0,
help='Momentum factor')
optim.add_argument('--fix_embeddings', type='bool', default=True,
help='Keep word embeddings fixed (use pretrained)')
optim.add_argument('--warmup_steps', type=int, default=10000,
help='Number of of warmup steps')
optim.add_argument('--warmup_epochs', type=int, default=0,
help='Number of of warmup steps')
def get_model_args(args):
"""Filter args for model ones.
From a args Namespace, return a new Namespace with *only* the args specific
to the model architecture or optimization. (i.e. the ones defined here.)
"""
global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, SEQ2SEQ_ARCHITECTURE, DATA_OPTIONS
required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | SEQ2SEQ_ARCHITECTURE | DATA_OPTIONS
arg_values = {k: v for k, v in vars(args).items() if k in required_args}
return argparse.Namespace(**arg_values)
def override_model_args(old_args, new_args):
"""Set args to new parameters.
Decide which model args to keep and which to override when resolving a set
of saved args and new args.
We keep the new optimization or RL setting, and leave the model architecture alone.
"""
global MODEL_OPTIMIZER
old_args, new_args = vars(old_args), vars(new_args)
for k in old_args.keys():
if k in new_args and old_args[k] != new_args[k]:
if k in MODEL_OPTIMIZER:
logger.info('Overriding saved %s: %s --> %s' %
(k, old_args[k], new_args[k]))
old_args[k] = new_args[k]
else:
logger.info('Keeping saved %s: %s' % (k, old_args[k]))
return argparse.Namespace(**old_args)
def add_new_model_args(old_args, new_args):
"""Set args to new parameters.
Decide which model args to keep and which to override when resolving a set
of saved args and new args.
We keep the new optimization or RL setting, and leave the model architecture alone.
"""
global ADVANCED_OPTIONS
old_args, new_args = vars(old_args), vars(new_args)
for k in new_args.keys():
if k not in old_args:
if k in ADVANCED_OPTIONS:
logger.info('Adding arg %s: %s' % (k, new_args[k]))
old_args[k] = new_args[k]
return argparse.Namespace(**old_args)
|
tools/mkowners/mkowners.py | txl0591/grpc | 117 | 12747287 | #!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import operator
import os
import re
import subprocess
#
# Find the root of the git tree
#
git_root = (subprocess
.check_output(['git', 'rev-parse', '--show-toplevel'])
.decode('utf-8')
.strip())
#
# Parse command line arguments
#
default_out = os.path.join(git_root, '.github', 'CODEOWNERS')
argp = argparse.ArgumentParser('Generate .github/CODEOWNERS file')
argp.add_argument('--out', '-o',
type=str,
default=default_out,
help='Output file (default %s)' % default_out)
args = argp.parse_args()
#
# Walk git tree to locate all OWNERS files
#
owners_files = [os.path.join(root, 'OWNERS')
for root, dirs, files in os.walk(git_root)
if 'OWNERS' in files]
#
# Parse owners files
#
Owners = collections.namedtuple('Owners', 'parent directives dir')
Directive = collections.namedtuple('Directive', 'who globs')
def parse_owners(filename):
with open(filename) as f:
src = f.read().splitlines()
parent = True
directives = []
for line in src:
line = line.strip()
# line := directive | comment
if not line: continue
if line[0] == '#': continue
# it's a directive
directive = None
if line == 'set noparent':
parent = False
elif line == '*':
directive = Directive(who='*', globs=[])
elif ' ' in line:
(who, globs) = line.split(' ', 1)
globs_list = [glob
for glob in globs.split(' ')
if glob]
directive = Directive(who=who, globs=globs_list)
else:
directive = Directive(who=line, globs=[])
if directive:
directives.append(directive)
return Owners(parent=parent,
directives=directives,
dir=os.path.relpath(os.path.dirname(filename), git_root))
owners_data = sorted([parse_owners(filename)
for filename in owners_files],
key=operator.attrgetter('dir'))
#
# Modify owners so that parented OWNERS files point to the actual
# Owners tuple with their parent field
#
new_owners_data = []
for owners in owners_data:
if owners.parent == True:
best_parent = None
best_parent_score = None
for possible_parent in owners_data:
if possible_parent is owners: continue
rel = os.path.relpath(owners.dir, possible_parent.dir)
# '..' ==> we had to walk up from possible_parent to get to owners
# ==> not a parent
if '..' in rel: continue
depth = len(rel.split(os.sep))
if not best_parent or depth < best_parent_score:
best_parent = possible_parent
best_parent_score = depth
if best_parent:
owners = owners._replace(parent = best_parent.dir)
else:
owners = owners._replace(parent = None)
new_owners_data.append(owners)
owners_data = new_owners_data
#
# In bottom to top order, process owners data structures to build up
# a CODEOWNERS file for GitHub
#
def full_dir(rules_dir, sub_path):
return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
# glob using git
gg_cache = {}
def git_glob(glob):
global gg_cache
if glob in gg_cache: return gg_cache[glob]
r = set(subprocess
.check_output(['git', 'ls-files', os.path.join(git_root, glob)])
.decode('utf-8')
.strip()
.splitlines())
gg_cache[glob] = r
return r
def expand_directives(root, directives):
globs = collections.OrderedDict()
# build a table of glob --> owners
for directive in directives:
for glob in directive.globs or ['**']:
if glob not in globs:
globs[glob] = []
if directive.who not in globs[glob]:
globs[glob].append(directive.who)
# expand owners for intersecting globs
sorted_globs = sorted(globs.keys(),
key=lambda g: len(git_glob(full_dir(root, g))),
reverse=True)
out_globs = collections.OrderedDict()
for glob_add in sorted_globs:
who_add = globs[glob_add]
pre_items = [i for i in out_globs.items()]
out_globs[glob_add] = who_add.copy()
for glob_have, who_have in pre_items:
files_add = git_glob(full_dir(root, glob_add))
files_have = git_glob(full_dir(root, glob_have))
intersect = files_have.intersection(files_add)
if intersect:
for f in sorted(files_add): # sorted to ensure merge stability
if f not in intersect:
out_globs[os.path.relpath(f, start=root)] = who_add
for who in who_have:
if who not in out_globs[glob_add]:
out_globs[glob_add].append(who)
return out_globs
def add_parent_to_globs(parent, globs, globs_dir):
if not parent: return
for owners in owners_data:
if owners.dir == parent:
owners_globs = expand_directives(owners.dir, owners.directives)
for oglob, oglob_who in owners_globs.items():
for gglob, gglob_who in globs.items():
files_parent = git_glob(full_dir(owners.dir, oglob))
files_child = git_glob(full_dir(globs_dir, gglob))
intersect = files_parent.intersection(files_child)
gglob_who_orig = gglob_who.copy()
if intersect:
for f in sorted(files_child): # sorted to ensure merge stability
if f not in intersect:
who = gglob_who_orig.copy()
globs[os.path.relpath(f, start=globs_dir)] = who
for who in oglob_who:
if who not in gglob_who:
gglob_who.append(who)
add_parent_to_globs(owners.parent, globs, globs_dir)
return
assert(False)
todo = owners_data.copy()
done = set()
with open(args.out, 'w') as out:
out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
out.write('# Uses OWNERS files in different modules throughout the\n')
out.write('# repository as the source of truth for module ownership.\n')
written_globs = []
while todo:
head, *todo = todo
if head.parent and not head.parent in done:
todo.append(head)
continue
globs = expand_directives(head.dir, head.directives)
add_parent_to_globs(head.parent, globs, head.dir)
for glob, owners in globs.items():
skip = False
for glob1, owners1, dir1 in reversed(written_globs):
files = git_glob(full_dir(head.dir, glob))
files1 = git_glob(full_dir(dir1, glob1))
intersect = files.intersection(files1)
if files == intersect:
if sorted(owners) == sorted(owners1):
skip = True # nothing new in this rule
break
elif intersect:
# continuing would cause a semantic change since some files are
# affected differently by this rule and CODEOWNERS is order dependent
break
if not skip:
out.write('/%s %s\n' % (
full_dir(head.dir, glob), ' '.join(owners)))
written_globs.append((glob, owners, head.dir))
done.add(head.dir)
|
third_party/boto/manage/test_manage.py | stdft112/depot_tools | 2,151 | 12747291 | from boto.manage.server import Server
from boto.manage.volume import Volume
import time
print '--> Creating New Volume'
volume = Volume.create()
print volume
print '--> Creating New Server'
server_list = Server.create()
server = server_list[0]
print server
print '----> Waiting for Server to start up'
while server.status != 'running':
print '*'
time.sleep(10)
print '----> Server is running'
print '--> Run "df -k" on Server'
status = server.run('df -k')
print status[1]
print '--> Now run volume.make_ready to make the volume ready to use on server'
volume.make_ready(server)
print '--> Run "df -k" on Server'
status = server.run('df -k')
print status[1]
print '--> Do an "ls -al" on the new filesystem'
status = server.run('ls -al %s' % volume.mount_point)
print status[1]
|
tests/test_snapshot.py | kklein/icontract | 244 | 12747300 | # pylint: disable=missing-docstring
# pylint: disable=invalid-name
# pylint: disable=unnecessary-lambda
# pylint: disable=unused-argument
# pylint: disable=no-self-use
import textwrap
import unittest
from typing import List, Optional # pylint: disable=unused-import
import icontract
import tests.error
class TestOK(unittest.TestCase):
def test_without_argument(self) -> None:
z = [1]
@icontract.snapshot(lambda: z[:], name="z")
@icontract.ensure(lambda OLD, val: OLD.z + [val] == z)
def some_func(val: int) -> None:
z.append(val)
some_func(2)
def test_with_name_same_for_single_argument(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: OLD.lst + [val] == lst)
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
# Expected to pass
some_func([1], 2)
def test_with_custom_name_for_single_argument(self) -> None:
@icontract.snapshot(lambda lst: len(lst), name="len_lst")
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
# Expected to pass
some_func([1], 2)
def test_with_multiple_arguments(self) -> None:
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b), name="union")
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
pass
# Expected to pass
some_func(lst_a=[1, 2], lst_b=[3, 4])
class TestViolation(unittest.TestCase):
def test_with_name_same_as_argument(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: OLD.lst + [val] == lst)
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
lst.append(1984)
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func([1], 2)
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent("""\
OLD.lst + [val] == lst:
OLD was a bunch of OLD values
OLD.lst was [1]
lst was [1, 2, 1984]
result was None
val was 2"""), tests.error.wo_mandatory_location(str(violation_error)))
def test_with_custom_name(self) -> None:
@icontract.snapshot(lambda lst: len(lst), name="len_lst")
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
lst.append(1984)
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func([1], 2)
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent("""\
OLD.len_lst + 1 == len(lst):
OLD was a bunch of OLD values
OLD.len_lst was 1
len(lst) was 3
lst was [1, 2, 1984]
result was None
val was 2"""), tests.error.wo_mandatory_location(str(violation_error)))
def test_with_multiple_arguments(self) -> None:
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b), name="union")
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
lst_a.append(1984) # bug
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func(lst_a=[1, 2], lst_b=[3, 4])
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent('''\
set(lst_a).union(lst_b) == OLD.union:
OLD was a bunch of OLD values
OLD.union was {1, 2, 3, 4}
lst_a was [1, 2, 1984]
lst_b was [3, 4]
result was None
set(lst_a) was {1, 2, 1984}
set(lst_a).union(lst_b) was {1, 2, 3, 4, 1984}'''),
tests.error.wo_mandatory_location(str(violation_error)))
class TestInvalid(unittest.TestCase):
def test_missing_snapshot_but_old_in_postcondition(self) -> None:
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
type_error = None # type: Optional[TypeError]
try:
some_func([1], 2)
except TypeError as err:
type_error = err
self.assertIsNotNone(type_error)
self.assertEqual("The argument(s) of the contract condition have not been set: ['OLD']. "
"Does the original function define them? Did you supply them in the call? "
"Did you decorate the function with a snapshot to capture OLD values?",
tests.error.wo_mandatory_location(str(type_error)))
def test_conflicting_snapshots_with_argument_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: lst[:])
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: len(OLD.lst) + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("There are conflicting snapshots with the name: 'lst'", str(value_error))
def test_conflicting_snapshots_with_custom_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("There are conflicting snapshots with the name: 'len_lst'", str(value_error))
def test_with_invalid_argument(self) -> None:
# lst versus a_list
type_error = None # type: Optional[TypeError]
try:
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.ensure(lambda OLD, val, a_list: OLD.len_lst + 1 == len(a_list))
def some_func(a_list: List[int], val: int) -> None:
a_list.append(val)
some_func([1], 2)
except TypeError as err:
type_error = err
self.assertIsNotNone(type_error)
self.assertEqual("The argument(s) of the snapshot have not been set: ['lst']. "
"Does the original function define them? Did you supply them in the call?",
tests.error.wo_mandatory_location(str(type_error)))
def test_with_no_arguments_and_no_name(self) -> None:
z = [1]
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda: z[:])
@icontract.ensure(lambda OLD, val: OLD.z + [val] == z)
def some_func(val: int) -> None:
z.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You must name a snapshot if no argument was given in the capture function.", str(value_error))
def test_with_multiple_arguments_and_no_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b))
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
pass
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You must name a snapshot if multiple arguments were given in the capture function.",
str(value_error))
def test_with_no_postcondition(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: lst[:])
def some_func(lst: List[int]) -> None:
return
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You are decorating a function with a snapshot, "
"but no postcondition was defined on the function before.", str(value_error))
def test_missing_old_attribute(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, lst: OLD.len_list == lst) # We miss len_lst in OLD here!
def some_func(lst: List[int]) -> None:
return
attribute_error = None # type: Optional[AttributeError]
try:
some_func(lst=[1, 2, 3])
except AttributeError as error:
attribute_error = error
assert attribute_error is not None
self.assertEqual("The snapshot with the name 'len_list' is not available in the OLD of a postcondition. "
"Have you decorated the function with a corresponding snapshot decorator?",
str(attribute_error))
if __name__ == '__main__':
unittest.main()
|
polylearn/kernels.py | JDechery/polylearn | 237 | 12747310 | # Author: <NAME> <<EMAIL>>
# License: Simplified BSD
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.utils.extmath import safe_sparse_dot
from scipy.sparse import issparse
import numpy as np
def safe_power(X, degree=2):
"""Element-wise power supporting both sparse and dense data.
Parameters
----------
X : ndarray or sparse
The array whose entries to raise to the power.
degree : int, default: 2
The power to which to raise the elements.
Returns
-------
X_ret : ndarray or sparse
Same shape as X, but (x_ret)_ij = (x)_ij ^ degree
"""
if issparse(X):
if hasattr(X, 'power'):
return X.power(degree)
else:
# old scipy
X = X.copy()
X.data **= degree
return X
else:
return X ** degree
def _D(X, P, degree=2):
"""The "replacement" part of the homogeneous polynomial kernel.
D[i, j] = sum_k [(X_ik * P_jk) ** degree]
"""
return safe_sparse_dot(safe_power(X, degree), P.T ** degree)
def homogeneous_kernel(X, P, degree=2):
"""Convenience alias for homogeneous polynomial kernel between X and P::
K_P(x, p) = <x, p> ^ degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
return polynomial_kernel(X, P, degree=degree, gamma=1, coef0=0)
def anova_kernel(X, P, degree=2):
"""ANOVA kernel between X and P::
K_A(x, p) = sum_i1>i2>...>id x_i1 p_i1 x_i2 p_i2 ... x_id p_id
See <NAME> and <NAME>,
Kernel Methods for Pattern Analysis section 9.2.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
if degree == 2:
K = homogeneous_kernel(X, P, degree=2)
K -= _D(X, P, degree=2)
K /= 2
elif degree == 3:
K = homogeneous_kernel(X, P, degree=3)
K -= 3 * _D(X, P, degree=2) * _D(X, P, degree=1)
K += 2 * _D(X, P, degree=3)
K /= 6
else:
raise NotImplementedError("ANOVA kernel for degree >= 4 not yet "
"implemented efficiently.")
return K
def _poly_predict(X, P, lams, kernel, degree=2):
if kernel == "anova":
K = anova_kernel(X, P, degree)
elif kernel == "poly":
K = homogeneous_kernel(X, P, degree)
else:
raise ValueError(("Unsuppported kernel: {}. Use one "
"of {{'anova'|'poly'}}").format(kernel))
return np.dot(K, lams)
|
python/ql/test/library-tests/examples/custom-sanitizer/test.py | vadi2/codeql | 4,036 | 12747315 | <filename>python/ql/test/library-tests/examples/custom-sanitizer/test.py
def random_choice():
return bool(GLOBAL_UNKOWN_VAR)
def is_safe(arg):
return UNKNOWN_FUNC(arg)
def true_func():
return True
def test_basic():
s = TAINTED_STRING
if is_safe(s):
ensure_not_tainted(s)
else:
ensure_tainted(s)
if not is_safe(s):
ensure_tainted(s)
else:
ensure_not_tainted(s)
def test_or():
s = TAINTED_STRING
# x or y
if is_safe(s) or random_choice():
ensure_tainted(s) # might be tainted
else:
ensure_tainted(s) # must be tainted
# not (x or y)
if not(is_safe(s) or random_choice()):
ensure_tainted(s) # must be tainted
else:
ensure_tainted(s) # might be tainted
# not (x or y) == not x and not y [de Morgan's laws]
if not is_safe(s) and not random_choice():
ensure_tainted(s) # must be tainted
else:
ensure_tainted(s) # might be tainted
def test_and():
s = TAINTED_STRING
# x and y
if is_safe(s) and random_choice():
ensure_not_tainted(s) # must not be tainted
else:
ensure_tainted(s) # might be tainted
# not (x and y)
if not(is_safe(s) and random_choice()):
ensure_tainted(s) # might be tainted
else:
ensure_not_tainted(s)
# not (x and y) == not x or not y [de Morgan's laws]
if not is_safe(s) or not random_choice():
ensure_tainted(s) # might be tainted
else:
ensure_not_tainted(s)
def test_tricky():
s = TAINTED_STRING
x = is_safe(s)
if x:
ensure_not_tainted(s) # FP
s_ = s
if is_safe(s):
ensure_not_tainted(s_) # FP
def test_nesting_not():
s = TAINTED_STRING
if not(not(is_safe(s))):
ensure_not_tainted(s)
else:
ensure_tainted(s)
if not(not(not(is_safe(s)))):
ensure_tainted(s)
else:
ensure_not_tainted(s)
# Adding `and True` makes the sanitizer trigger when it would otherwise not. See output in
# SanitizedEdges.expected and compare with `test_nesting_not` and `test_basic`
def test_nesting_not_with_and_true():
s = TAINTED_STRING
if not(is_safe(s) and True):
ensure_tainted(s)
else:
ensure_not_tainted(s)
if not(not(is_safe(s) and True)):
ensure_not_tainted(s)
else:
ensure_tainted(s)
if not(not(not(is_safe(s) and True))):
ensure_tainted(s)
else:
ensure_not_tainted(s)
|
pydis_site/apps/resources/apps.py | Robin5605/site | 700 | 12747322 | <reponame>Robin5605/site
from django.apps import AppConfig
class ResourcesConfig(AppConfig):
"""AppConfig instance for Resources app."""
name = 'resources'
|
spikeinterface/core/unitsaggregationsorting.py | khl02007/spikeinterface | 116 | 12747324 | <filename>spikeinterface/core/unitsaggregationsorting.py
from typing import List, Union
import numpy as np
from .basesorting import BaseSorting, BaseSortingSegment
class UnitsAggregationSorting(BaseSorting):
"""
Class that handles aggregating units from different sortings, e.g. from different channel groups.
Do not use this class directly but use `si.aggregate_units(...)`
"""
def __init__(self, sorting_list, renamed_unit_ids=None):
unit_map = {}
num_all_units = sum([sort.get_num_units() for sort in sorting_list])
if renamed_unit_ids is not None:
assert len(np.unique(renamed_unit_ids)) == num_all_units, "'renamed_unit_ids' doesn't have the right size" \
"or has duplicates!"
unit_ids = list(renamed_unit_ids)
else:
unit_ids = list(np.arange(num_all_units))
# unit map maps unit ids that are used to get spike trains
u_id = 0
for s_i, sorting in enumerate(sorting_list):
single_unit_ids = sorting.get_unit_ids()
for unit_id in single_unit_ids:
unit_map[unit_ids[u_id]] = {'sorting_id': s_i, 'unit_id': unit_id}
u_id += 1
sampling_frequency = sorting_list[0].get_sampling_frequency()
num_segments = sorting_list[0].get_num_segments()
ok1 = all(sampling_frequency == sort.get_sampling_frequency() for sort in sorting_list)
ok2 = all(num_segments == sort.get_num_segments() for sort in sorting_list)
if not (ok1 and ok2):
raise ValueError("Sortings don't have the same sampling_frequency/num_segments")
BaseSorting.__init__(self, sampling_frequency, unit_ids)
property_keys = sorting_list[0].get_property_keys()
property_dict = {}
for prop_name in property_keys:
if all([prop_name in sort.get_property_keys() for sort in sorting_list]):
for i_s, sort in enumerate(sorting_list):
prop_value = sort.get_property(prop_name)
if i_s == 0:
property_dict[prop_name] = prop_value
else:
try:
property_dict[prop_name] = np.concatenate((property_dict[prop_name],
sort.get_property(prop_name)))
except Exception as e:
print(f"Skipping property '{prop_name}' for shape inconsistency")
del property_dict[prop_name]
break
for prop_name, prop_values in property_dict.items():
self.set_property(key=prop_name, values=prop_values)
# add segments
for i_seg in range(num_segments):
parent_segments = [sort._sorting_segments[i_seg] for sort in sorting_list]
sub_segment = UnitsAggregationSortingSegment(unit_map, parent_segments)
self.add_sorting_segment(sub_segment)
self._sortings = sorting_list
self._kwargs = {'sorting_list': [sort.to_dict() for sort in sorting_list],
'renamed_unit_ids': renamed_unit_ids}
@property
def sortings(self):
return self._sortings
class UnitsAggregationSortingSegment(BaseSortingSegment):
def __init__(self, unit_map, parent_segments):
BaseSortingSegment.__init__(self)
self._unit_map = unit_map
self._parent_segments = parent_segments
def get_unit_spike_train(self,
unit_id,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
) -> np.ndarray:
sorting_id = self._unit_map[unit_id]['sorting_id']
unit_id_sorting = self._unit_map[unit_id]['unit_id']
times = self._parent_segments[sorting_id].get_unit_spike_train(unit_id_sorting, start_frame, end_frame)
return times
def aggregate_units(sorting_list, renamed_unit_ids=None):
"""
Aggregates units of multiple sortings into a single sorting object
Parameters
----------
sorting_list: list
List of BaseSorting objects to aggregate
renamed_unit_ids: array-like
If given, unit ids are renamed as provided. If None, unit ids are sequential integers.
Returns
-------
aggregate_sorting: UnitsAggregationSorting
The aggregated sorting object
"""
return UnitsAggregationSorting(sorting_list, renamed_unit_ids)
|
worldengine/generation.py | ctittel/worldengine | 946 | 12747325 | import numpy
from noise import snoise2
from worldengine.model.world import Step
from worldengine.simulations.basic import find_threshold_f
from worldengine.simulations.hydrology import WatermapSimulation
from worldengine.simulations.irrigation import IrrigationSimulation
from worldengine.simulations.humidity import HumiditySimulation
from worldengine.simulations.temperature import TemperatureSimulation
from worldengine.simulations.permeability import PermeabilitySimulation
from worldengine.simulations.erosion import ErosionSimulation
from worldengine.simulations.precipitation import PrecipitationSimulation
from worldengine.simulations.biome import BiomeSimulation
from worldengine.simulations.icecap import IcecapSimulation
from worldengine.common import anti_alias, get_verbose
# ------------------
# Initial generation
# ------------------
def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis
y_with_min_sum = y_sums.argmin()
if get_verbose():
print("geo.center_land: height complete")
x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis
x_with_min_sum = x_sums.argmin()
if get_verbose():
print("geo.center_land: width complete")
latshift = 0
world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
if get_verbose():
print("geo.center_land: width complete")
def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i)
def add_noise_to_elevation(world, seed):
octaves = 8
freq = 16.0 * octaves
for y in range(world.height):
for x in range(world.width):
n = snoise2(x / freq * 2, y / freq * 2, octaves, base=seed)
world.layers['elevation'].data[y, x] += n
def fill_ocean(elevation, sea_level):#TODO: Make more use of numpy?
height, width = elevation.shape
ocean = numpy.zeros(elevation.shape, dtype=bool)
to_expand = []
for x in range(width):#handle top and bottom border of the map
if elevation[0, x] <= sea_level:
to_expand.append((x, 0))
if elevation[height - 1, x] <= sea_level:
to_expand.append((x, height - 1))
for y in range(height):#handle left- and rightmost border of the map
if elevation[y, 0] <= sea_level:
to_expand.append((0, y))
if elevation[y, width - 1] <= sea_level:
to_expand.append((width - 1, y))
for t in to_expand:
tx, ty = t
if not ocean[ty, tx]:
ocean[ty, tx] = True
for px, py in _around(tx, ty, width, height):
if not ocean[py, px] and elevation[py, px] <= sea_level:
to_expand.append((px, py))
return ocean
def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.layers['elevation'].data
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills
ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
harmonize_ocean(ocean, e, ocean_level)
world.ocean = ocean
world.elevation = (e, e_th)
world.sea_depth = sea_depth(world, ocean_level)
def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0)
# ----
# Misc
# ----
def sea_depth(world, sea_level):
# a dynamic programming approach to gather how far the next land is
# from a given coordinate up to a maximum distance of max_radius
# result is 0 for land coordinates and -1 for coordinates further than
# max_radius away from land
# there might be even faster ways but it does the trick
def next_land_dynamic(ocean, max_radius=5):
next_land = numpy.full(ocean.shape, -1, int)
# non ocean tiles are zero distance away from next land
next_land[numpy.logical_not(ocean)]=0
height, width = ocean.shape
for dist in range(max_radius):
indices = numpy.transpose(numpy.where(next_land==dist))
for y, x in indices:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height:
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
if next_land[ny,nx] == -1:
next_land[ny,nx] = dist + 1
return next_land
# We want to multiply the raw sea_depth by one of these factors
# depending on the distance from the next land
# possible TODO: make this a parameter
factors = [0.0, 0.3, 0.5, 0.7, 0.9]
next_land = next_land_dynamic(world.layers['ocean'].data)
sea_depth = sea_level - world.layers['elevation'].data
for y in range(world.height):
for x in range(world.width):
dist_to_next_land = next_land[y,x]
if dist_to_next_land > 0:
sea_depth[y,x]*=factors[dist_to_next_land-1]
sea_depth = anti_alias(sea_depth, 10)
min_depth = sea_depth.min()
max_depth = sea_depth.max()
sea_depth = (sea_depth - min_depth) / (max_depth - min_depth)
return sea_depth
def _around(x, y, width, height):
ps = []
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height and (dx != 0 or dy != 0):
ps.append((nx, ny))
return ps
def generate_world(w, step):
if isinstance(step, str):
step = Step.get_by_name(step)
if not step.include_precipitations:
return w
# Prepare sufficient seeds for the different steps of the generation
rng = numpy.random.RandomState(w.seed) # create a fresh RNG in case the global RNG is compromised (i.e. has been queried an indefinite amount of times before generate_world() was called)
sub_seeds = rng.randint(0, numpy.iinfo(numpy.int32).max, size=100) # choose lowest common denominator (32 bit Windows numpy cannot handle a larger value)
seed_dict = {
'PrecipitationSimulation': sub_seeds[ 0], # after 0.19.0 do not ever switch out the seeds here to maximize seed-compatibility
'ErosionSimulation': sub_seeds[ 1],
'WatermapSimulation': sub_seeds[ 2],
'IrrigationSimulation': sub_seeds[ 3],
'TemperatureSimulation': sub_seeds[ 4],
'HumiditySimulation': sub_seeds[ 5],
'PermeabilitySimulation': sub_seeds[ 6],
'BiomeSimulation': sub_seeds[ 7],
'IcecapSimulation': sub_seeds[ 8],
'': sub_seeds[99]
}
TemperatureSimulation().execute(w, seed_dict['TemperatureSimulation'])
# Precipitation with thresholds
PrecipitationSimulation().execute(w, seed_dict['PrecipitationSimulation'])
if not step.include_erosion:
return w
ErosionSimulation().execute(w, seed_dict['ErosionSimulation']) # seed not currently used
if get_verbose():
print("...erosion calculated")
WatermapSimulation().execute(w, seed_dict['WatermapSimulation']) # seed not currently used
# FIXME: create setters
IrrigationSimulation().execute(w, seed_dict['IrrigationSimulation']) # seed not currently used
HumiditySimulation().execute(w, seed_dict['HumiditySimulation']) # seed not currently used
PermeabilitySimulation().execute(w, seed_dict['PermeabilitySimulation'])
cm, biome_cm = BiomeSimulation().execute(w, seed_dict['BiomeSimulation']) # seed not currently used
for cl in cm.keys():
count = cm[cl]
if get_verbose():
print("%s = %i" % (str(cl), count))
if get_verbose():
print('') # empty line
print('Biome obtained:')
for cl in biome_cm.keys():
count = biome_cm[cl]
if get_verbose():
print(" %30s = %7i" % (str(cl), count))
IcecapSimulation().execute(w, seed_dict['IcecapSimulation']) # makes use of temperature-map
return w
|
components/isceobj/Scene/Track.py | vincentschut/isce2 | 1,133 | 12747336 | <reponame>vincentschut/isce2<filename>components/isceobj/Scene/Track.py
#!/usr/bin/env python3
#
#Copyright 2010, by the California Institute of Technology.
#ALL RIGHTS RESERVED.
#United States Government Sponsorship acknowledged.
#Any commercial use must be negotiated with the Office of
#Technology Transfer at the California Institute of Technology.
#
#This software may be subject to U.S. export control laws. By
#accepting this software, the user agrees to comply with all applicable
#U.S. export laws and regulations. User has the responsibility to obtain
#export licenses, or other export authority as may be required before
#exporting such information to foreign countries or providing access
#to foreign persons.
#
import isce
import sys
import os
from sys import float_info
import logging
import datetime
from isceobj.Scene.Frame import Frame
from isceobj.Orbit.Orbit import Orbit
from isceobj.Attitude.Attitude import Attitude
from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil as DTU
from isceobj.Util.decorators import type_check, logged, pickled
import isceobj
import tempfile
@pickled
class Track(object):
"""A class to represent a collection of temporally continuous radar frame
objects"""
logging_name = "isce.Scene.Track"
@logged
def __init__(self):
# These are attributes representing the starting time and stopping
# time of the track
# As well as the early and late times (range times) of the track
self._startTime = datetime.datetime(year=datetime.MAXYEAR,month=1,day=1)
self._stopTime = datetime.datetime(year=datetime.MINYEAR,month=1,day=1)
# Hopefully this number is large
# enough, Python doesn't appear to have a MAX_FLT variable
self._nearRange = float_info.max
self._farRange = 0.0
self._frames = []
self._frame = Frame()
self._lastFile = ''
return None
def combineFrames(self, output, frames):
attitudeOk = True
for frame in frames:
self.addFrame(frame)
if hasattr(frame,'_attitude'):
att = frame.getAttitude()
if not att:
attitudeOk = False
self.createInstrument()
self.createTrack(output)
self.createOrbit()
if attitudeOk:
self.createAttitude()
return self._frame
def createAuxFile(self, fileList, output):
import struct
from operator import itemgetter
import os
import array
import copy
dateIndx = []
cnt = 0
#first sort the files from earlier to latest. use the first element
for name in fileList:
with open(name,'rb') as fp: date = fp.read(16)
day, musec = struct.unpack('<dd',date)
dateIndx.append([day,musec,name])
cnt += 1
sortedDate = sorted(dateIndx, key=itemgetter(0,1))
#we need to make sure that there are not duplicate points in the orbit since some frames overlap
allL = array.array('d')
allL1 = array.array('d')
name = sortedDate[0][2]
size = os.path.getsize(name)//8
with open(name,'rb') as fp1: allL.fromfile(fp1,size)
lastDay = allL[-2]
lastMusec = allL[-1]
for j in range(1, len(sortedDate)):
name = sortedDate[j][2]
size = os.path.getsize(name)//8
with open(name,'rb') as fp1: allL1.fromfile(fp1, size)
indxFound = None
avgPRI = 0
cnt = 0
for i in range(len(allL1)//2):
if i > 0:
avgPRI += allL1[2*i+1] - allL1[2*i-1]
cnt += 1
if allL1[2*i] >= lastDay and allL1[2*i+1] > lastMusec:
avgPRI //= (cnt-1)
if (allL1[2*i+1] - lastMusec) > avgPRI/2:# make sure that the distance in pulse is atleast 1/2 PRI
indxFound = 2*i
else:#if not take the next
indxFound = 2*(i+1)
pass
break
if not indxFound is None:
allL.extend(allL1[indxFound:])
lastDay = allL[-2]
lastMusec = allL[-1]
pass
pass
with open(output,'wb') as fp: allL.tofile(fp)
return
# Add an additional Frame object to the track
@type_check(Frame)
def addFrame(self, frame):
self.logger.info("Adding Frame to Track")
self._updateTrackTimes(frame)
self._frames.append(frame)
return None
def createOrbit(self):
orbitAll = Orbit()
for i in range(len(self._frames)):
orbit = self._frames[i].getOrbit()
#remember that everything is by reference, so the changes applied to orbitAll will be made to the Orbit
#object in self.frame
for sv in orbit._stateVectors:
orbitAll.addStateVector(sv)
# sort the orbit state vecotrs according to time
orbitAll._stateVectors.sort(key=lambda sv: sv.time)
self.removeDuplicateVectors(orbitAll._stateVectors)
self._frame.setOrbit(orbitAll)
def removeDuplicateVectors(self,stateVectors):
i1 = 0
#remove duplicate state vectors
while True:
if i1 >= len(stateVectors) - 1:
break
if stateVectors[i1].time == stateVectors[i1+1].time:
stateVectors.pop(i1+1)
#since is sorted by time if is not equal we can pass to the next
else:
i1 += 1
def createAttitude(self):
attitudeAll = Attitude()
for i in range(len(self._frames)):
attitude = self._frames[i].getAttitude()
#remember that everything is by reference, so the changes applied to attitudeAll will be made to the Attitude object in self.frame
for sv in attitude._stateVectors:
attitudeAll.addStateVector(sv)
# sort the attitude state vecotrs according to time
attitudeAll._stateVectors.sort(key=lambda sv: sv.time)
self.removeDuplicateVectors(attitudeAll._stateVectors)
self._frame.setAttitude(attitudeAll)
def createInstrument(self):
# the platform is already part of the instrument
ins = self._frames[0].getInstrument()
self._frame.setInstrument(ins)
# sometime the startLine computed below from the sensingStart is not
#precise and the image are missaligned.
#for each pair do an exact mach by comparing the lines around lineStart
#file1,2 input files, startLine1 is the estimated start line in the first file
#line1 = last line used in the first file
#width = width of the files
#frameNum1,2 number of the frames in the sequence of frames to stitch
#returns a more accurate line1
def findOverlapLine(self, file1, file2, line1,width,frameNum1,frameNum2):
import numpy as np
import array
fin2 = open(file2,'rb')
arr2 = array.array('b')
#read full line at the beginning of second file
arr2.fromfile(fin2,width)
buf2 = np.array(arr2,dtype = np.int8)
numTries = 30
# start around line1 and try numTries around line1
# see searchlist to see which lines it searches
searchNumLines = 2
#make a sliding window that search for the searchSize samples inside buf2
searchSize = 500
max = 0
indx = None
fin1 = open(file1,'rb')
for i in range(numTries):
# example line1 = 0,searchNumLine = 2 and i = 0 search = [-2,-1,0,1], i = 1, serach = [-4,-3,2,3]
search = list(range(line1 - (i+1)*searchNumLines,line1 - i*searchNumLines))
search.extend(list(range(line1 + i*searchNumLines,line1 + (i+1)*searchNumLines)))
for k in search:
arr1 = array.array('b')
#seek to the line k and read +- searchSize/2 samples from the middle of the line
fin1.seek(k*width + (width - searchSize)//2,0)
arr1.fromfile(fin1,searchSize)
buf1 = np.array(arr1,dtype = np.int8)
found = False
for i in np.arange(width-searchSize):
lenSame =len(np.nonzero(buf1 == buf2[i:i+searchSize])[0])
if lenSame > max:
max = lenSame
indx = k
if(lenSame == searchSize):
found = True
break
if(found):
break
if(found):
break
if not found:
self.logger.warning("Cannot find perfect overlap between frame %d and frame %d. Using acquisition time to find overlap position."%(frameNum1,frameNum2))
fin1.close()
fin2.close()
print('Match found: ', indx)
return indx
def reAdjustStartLine(self, sortedList, width):
""" Computed the adjusted starting lines based on matching in overlapping regions """
from operator import itemgetter
import os
#first one always starts from zero
startLine = [sortedList[0][0]]
outputs = [sortedList[0][1]]
for i in range(1,len(sortedList)):
# endLine of the first file. we use all the lines of the first file up to endLine
endLine = sortedList[i][0] - sortedList[i-1][0]
indx = self.findOverlapLine(sortedList[i-1][1],sortedList[i][1],endLine,width,i-1,i)
#if indx is not None than indx is the new start line
#otherwise we use startLine computed from acquisition time
if (indx is not None) and (indx + sortedList[i-1][0] != sortedList[i][0]):
startLine.append(indx + sortedList[i-1][0])
outputs.append(sortedList[i][1])
self.logger.info("Changing starting line for frame %d from %d to %d"%(i,endLine,indx))
else:
startLine.append(sortedList[i][0])
outputs.append(sortedList[i][1])
return startLine,outputs
# Create the actual Track data by concatenating data from
# all of the Frames objects together
def createTrack(self,output):
import os
from operator import itemgetter
from isceobj import Constants as CN
from ctypes import cdll, c_char_p, c_int, c_ubyte,byref
lib = cdll.LoadLibrary(os.path.dirname(__file__)+'/concatenate.so')
# Perhaps we should check to see if Xmin is 0, if it is not, strip off the header
self.logger.info("Adjusting Sampling Window Start Times for all Frames")
# Iterate over each frame object, and calculate the number of samples with which to pad it on the left and right
outputs = []
totalWidth = 0
auxList = []
for frame in self._frames:
# Calculate the amount of padding
thisNearRange = frame.getStartingRange()
thisFarRange = frame.getFarRange()
left_pad = int(round(
(thisNearRange - self._nearRange)*
frame.getInstrument().getRangeSamplingRate()/(CN.SPEED_OF_LIGHT/2.0)))*2
right_pad = int(round((self._farRange - thisFarRange)*frame.getInstrument().getRangeSamplingRate()/(CN.SPEED_OF_LIGHT/2.0)))*2
width = frame.getImage().getXmax()
if width - int(width) != 0:
raise ValueError("frame Xmax is not an integer")
else:
width = int(width)
input = frame.getImage().getFilename()
# tempOutput = os.path.basename(os.tmpnam()) # Some temporary filename
with tempfile.NamedTemporaryFile(dir='.') as f:
tempOutput = f.name
pad_value = int(frame.getInstrument().getInPhaseValue())
if totalWidth < left_pad + width + right_pad:
totalWidth = left_pad + width + right_pad
# Resample this frame with swst_resample
input_c = c_char_p(bytes(input,'utf-8'))
output_c = c_char_p(bytes(tempOutput,'utf-8'))
width_c = c_int(width)
left_pad_c = c_int(left_pad)
right_pad_c = c_int(right_pad)
pad_value_c = c_ubyte(pad_value)
lib.swst_resample(input_c,output_c,byref(width_c),byref(left_pad_c),byref(right_pad_c),byref(pad_value_c))
outputs.append(tempOutput)
auxList.append(frame.auxFile)
#this step construct the aux file withe the pulsetime info for the all set of frames
self.createAuxFile(auxList,output + '.aux')
# This assumes that all of the frames to be concatenated are sampled at the same PRI
prf = self._frames[0].getInstrument().getPulseRepetitionFrequency()
# Calculate the starting output line of each scene
i = 0
lineSort = []
# the listSort has 2 elements: a line start number which is the position of that specific frame
# computed from acquisition time and the corresponding file name
for frame in self._frames:
startLine = int(round(DTU.timeDeltaToSeconds(frame.getSensingStart()-self._startTime)*prf))
lineSort.append([startLine,outputs[i]])
i += 1
sortedList = sorted(lineSort, key=itemgetter(0)) # sort by line number i.e. acquisition time
startLines, outputs = self.reAdjustStartLine(sortedList,totalWidth)
self.logger.info("Concatenating Frames along Track")
# this is a hack since the length of the file could be actually different from the one computed using start and stop time. it only matters the last frame added
import os
fileSize = os.path.getsize(outputs[-1])
numLines = fileSize//totalWidth + startLines[-1]
totalLines_c = c_int(numLines)
# Next, call frame_concatenate
width_c = c_int(totalWidth) # Width of each frame (with the padding added in swst_resample)
numberOfFrames_c = c_int(len(self._frames))
inputs_c = (c_char_p * len(outputs))() # These are the inputs to frame_concatenate, but the outputs from swst_resample
for kk in range(len(outputs)):
inputs_c[kk] = bytes(outputs[kk],'utf-8')
output_c = c_char_p(bytes(output,'utf-8'))
startLines_c = (c_int * len(startLines))()
startLines_c[:] = startLines
lib.frame_concatenate(output_c,byref(width_c),byref(totalLines_c),byref(numberOfFrames_c),inputs_c,startLines_c)
# Clean up the temporary output files from swst_resample
for file in outputs:
os.unlink(file)
orbitNum = self._frames[0].getOrbitNumber()
first_line_utc = self._startTime
last_line_utc = self._stopTime
centerTime = DTU.timeDeltaToSeconds(last_line_utc-first_line_utc)/2.0
center_line_utc = first_line_utc + datetime.timedelta(microseconds=int(centerTime*1e6))
procFac = self._frames[0].getProcessingFacility()
procSys = self._frames[0].getProcessingSystem()
procSoft = self._frames[0].getProcessingSoftwareVersion()
pol = self._frames[0].getPolarization()
xmin = self._frames[0].getImage().getXmin()
self._frame.setOrbitNumber(orbitNum)
self._frame.setSensingStart(first_line_utc)
self._frame.setSensingMid(center_line_utc)
self._frame.setSensingStop(last_line_utc)
self._frame.setStartingRange(self._nearRange)
self._frame.setFarRange(self._farRange)
self._frame.setProcessingFacility(procFac)
self._frame.setProcessingSystem(procSys)
self._frame.setProcessingSoftwareVersion(procSoft)
self._frame.setPolarization(pol)
self._frame.setNumberOfLines(numLines)
self._frame.setNumberOfSamples(width)
# add image to frame
rawImage = isceobj.createRawImage()
rawImage.setByteOrder('l')
rawImage.setFilename(output)
rawImage.setAccessMode('r')
rawImage.setWidth(totalWidth)
rawImage.setXmax(totalWidth)
rawImage.setXmin(xmin)
self._frame.setImage(rawImage)
# Extract the early, late, start and stop times from a Frame object
# And use this information to update
def _updateTrackTimes(self,frame):
if (frame.getSensingStart() < self._startTime):
self._startTime = frame.getSensingStart()
if (frame.getSensingStop() > self._stopTime):
self._stopTime = frame.getSensingStop()
if (frame.getStartingRange() < self._nearRange):
self._nearRange = frame.getStartingRange()
if (frame.getFarRange() > self._farRange):
self._farRange = frame.getFarRange()
pass
pass
pass
def main():
tr = Track()
file1 = sys.argv[1]
file2 = sys.argv[2]
line1 = 17731
width = 21100
indx = tr.findOverlapLine(file1, file2, line1,width,0,1)
if __name__ == '__main__':
sys.exit(main())
|
ShuffleNetV2.ExLarge/network.py | PeterouZh/ShuffleNet-Series | 1,382 | 12747360 | <reponame>PeterouZh/ShuffleNet-Series
import torch
import torch.nn as nn
class Conv_BN_ReLU(nn.Module):
def __init__(self, in_channel, out_channel, k_size, stride=1, padding=0, groups=1,
has_bn=True, has_relu=True, gaussian_init=False):
super(Conv_BN_ReLU, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=k_size,
stride=stride, padding=padding,
groups=groups, bias=False)
if gaussian_init:
nn.init.normal_(self.conv.weight.data, 0, 0.01)
if has_bn:
self.bn = nn.BatchNorm2d(out_channel)
self.has_bn = has_bn
self.has_relu = has_relu
if has_relu:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class FC(nn.Module):
def __init__(self, in_channels, out_channels):
super(FC, self).__init__()
self.fc = nn.Linear(in_channels, out_channels)
nn.init.normal_(self.fc.weight.data, 0, 0.01)
def forward(self, x):
return self.fc(x)
class ExtraLabelPredict(nn.Module):
def __init__(self, in_channels, out_channels, num_classes=1000):
super(ExtraLabelPredict, self).__init__()
self.num_classes = num_classes
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv = nn.Sequential(
Conv_BN_ReLU(in_channels, out_channels, 1, 1, 0),
Conv_BN_ReLU(out_channels, out_channels, 3, 1, 1)
)
self.globalpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(out_channels, num_classes)
def forward(self, inputs):
inputs = self.maxpool(inputs)
inputs = self.conv(inputs)
inputs = self.globalpool(inputs)
inputs = inputs.view(inputs.size(0), -1)
inputs = self.fc(inputs)
return inputs
class ShuffleV2Block(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels, stride, groups, has_proj=False, has_se=False):
super(ShuffleV2Block, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.has_proj = has_proj
self.has_se = has_se
self.relu = nn.ReLU(inplace=True)
if has_proj:
self.proj = Conv_BN_ReLU(in_channels, out_channels - mid_channels, k_size=3, stride=stride, padding=1,
has_bn=True, has_relu=True)
self.branch_main = nn.Sequential(
Conv_BN_ReLU(in_channels, out_channels, k_size=1, stride=1, padding=0,
has_bn=True, has_relu=True),
Conv_BN_ReLU(out_channels, out_channels, k_size=3, stride=stride, padding=1, groups=groups,
has_bn=True, has_relu=True),
Conv_BN_ReLU(out_channels, out_channels, k_size=3, stride=1, padding=1, groups=out_channels,
has_bn=True, has_relu=False),
Conv_BN_ReLU(out_channels, mid_channels, k_size=1, stride=1, padding=0,
has_bn=True, has_relu=False),
)
if has_se:
self.se_globalpool = nn.AdaptiveAvgPool2d(output_size=1)
self.se_fc1 = FC(mid_channels, mid_channels // 4)
self.se_fc2 = FC(mid_channels // 4, mid_channels)
se_block = [
self.se_fc1,
nn.ReLU(inplace=True),
self.se_fc2,
nn.Sigmoid(),
]
self.se_block = nn.Sequential(*se_block)
def forward(self, old_x):
if self.has_proj:
proj, x = old_x, old_x
else:
proj, x = self.channel_shuffle(old_x)
x_proj = x
if self.has_proj:
proj = self.proj(proj)
x = self.branch_main(x)
if self.has_se:
se_scale = self.se_globalpool(x).view(x.size(0), -1)
se_scale = self.se_block(se_scale).unsqueeze(-1).unsqueeze(-1)
x = x * se_scale
if not self.has_proj:
x = self.relu(x_proj + x)
x = torch.cat((proj, x), dim=1)
return x
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.data.size()
assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = x.permute(1, 0, 2)
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
class ShuffleNetV2(nn.Module):
def __init__(self, n_class=1000, model_size='ExLarge'):
super(ShuffleNetV2, self).__init__()
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if model_size == 'ExLarge':
self.pre = [2, 3, 4, 5]
self.stage_repeats = [8, 16, 36, 10]
self.outputs = [320, 640, 1280, 2560]
self.enable_stride = [False, True, True, True]
else:
raise NotImplementedError
self.first_conv = nn.Sequential(
Conv_BN_ReLU(3, 64, k_size=3, stride=2, padding=1),
Conv_BN_ReLU(64, 128, k_size=3, stride=1, padding=1),
Conv_BN_ReLU(128, 256, k_size=3, stride=1, padding=1),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = nn.ModuleList()
input_channel = 256
if model_size == 'ExLarge':
for p, s, o, es in zip(self.pre, self.stage_repeats, self.outputs, self.enable_stride):
feature = []
for i in range(s):
prefix = "{}{}".format(p, str(i))
stride = 1 if not es or i > 0 else 2
has_proj = False if i > 0 else True
feature.append(ShuffleV2Block(in_channels=input_channel, out_channels=o, mid_channels=o // 2,
stride=stride, groups=16, has_proj=has_proj, has_se=True))
input_channel = o // 2
feature.append(Conv_BN_ReLU(o, o, k_size=1, stride=1, padding=0))
input_channel = o
feature = nn.Sequential(*feature)
self.features.append(feature)
if p == 2:
self.predict_56 = ExtraLabelPredict(in_channels=320, out_channels=256)
elif p == 3:
self.predict_28 = ExtraLabelPredict(in_channels=640, out_channels=512)
elif p == 4:
self.predict_14 = ExtraLabelPredict(in_channels=1280, out_channels=1024)
self.globalpool = nn.AvgPool2d(7)
if self.model_size == 'ExLarge':
self.dropout = nn.Dropout(0.2)
self.fc = FC(2560, n_class)
self._initialize_weights()
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if 'first' in name:
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
# 1 * 256 * 56 * 56
x = self.features[0](x)
# 1 * 320 * 56 * 56
if self.training:
predict_56 = self.predict_56(x)
x = self.features[1](x)
# 1 * 640 * 28 * 28
if self.training:
predict_28 = self.predict_28(x)
x = self.features[2](x)
# 1 * 1280 * 14 * 14
if self.training:
predict_14 = self.predict_14(x)
x = self.features[3](x)
# 1 * 2560 * 7 * 7
x = self.globalpool(x)
if self.model_size == 'ExLarge':
x = self.dropout(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
if self.training:
# Loss is scaled by 1.0, 0.7, 0.5, 0.3
return x, predict_14, predict_28, predict_56
else:
return x
def create_network():
model = ShuffleNetV2()
return model
if __name__ == "__main__":
create_network()
|
sdk/python/pulumi_azure/kusto/_inputs.py | henriktao/pulumi-azure | 109 | 12747404 | <filename>sdk/python/pulumi_azure/kusto/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AttachedDatabaseConfigurationSharingArgs',
'ClusterIdentityArgs',
'ClusterOptimizedAutoScaleArgs',
'ClusterSkuArgs',
'ClusterVirtualNetworkConfigurationArgs',
]
@pulumi.input_type
class AttachedDatabaseConfigurationSharingArgs:
def __init__(__self__, *,
external_tables_to_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_tables_to_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
materialized_views_to_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
materialized_views_to_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tables_to_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tables_to_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_tables_to_excludes: List of external tables exclude from the follower database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_tables_to_includes: List of external tables to include in the follower database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] materialized_views_to_excludes: List of materialized views exclude from the follower database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] materialized_views_to_includes: List of materialized views to include in the follower database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables_to_excludes: List of tables to exclude from the follower database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables_to_includes: List of tables to include in the follower database.
"""
if external_tables_to_excludes is not None:
pulumi.set(__self__, "external_tables_to_excludes", external_tables_to_excludes)
if external_tables_to_includes is not None:
pulumi.set(__self__, "external_tables_to_includes", external_tables_to_includes)
if materialized_views_to_excludes is not None:
pulumi.set(__self__, "materialized_views_to_excludes", materialized_views_to_excludes)
if materialized_views_to_includes is not None:
pulumi.set(__self__, "materialized_views_to_includes", materialized_views_to_includes)
if tables_to_excludes is not None:
pulumi.set(__self__, "tables_to_excludes", tables_to_excludes)
if tables_to_includes is not None:
pulumi.set(__self__, "tables_to_includes", tables_to_includes)
@property
@pulumi.getter(name="externalTablesToExcludes")
def external_tables_to_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of external tables exclude from the follower database.
"""
return pulumi.get(self, "external_tables_to_excludes")
@external_tables_to_excludes.setter
def external_tables_to_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_tables_to_excludes", value)
@property
@pulumi.getter(name="externalTablesToIncludes")
def external_tables_to_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of external tables to include in the follower database.
"""
return pulumi.get(self, "external_tables_to_includes")
@external_tables_to_includes.setter
def external_tables_to_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_tables_to_includes", value)
@property
@pulumi.getter(name="materializedViewsToExcludes")
def materialized_views_to_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of materialized views exclude from the follower database.
"""
return pulumi.get(self, "materialized_views_to_excludes")
@materialized_views_to_excludes.setter
def materialized_views_to_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "materialized_views_to_excludes", value)
@property
@pulumi.getter(name="materializedViewsToIncludes")
def materialized_views_to_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of materialized views to include in the follower database.
"""
return pulumi.get(self, "materialized_views_to_includes")
@materialized_views_to_includes.setter
def materialized_views_to_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "materialized_views_to_includes", value)
@property
@pulumi.getter(name="tablesToExcludes")
def tables_to_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of tables to exclude from the follower database.
"""
return pulumi.get(self, "tables_to_excludes")
@tables_to_excludes.setter
def tables_to_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tables_to_excludes", value)
@property
@pulumi.getter(name="tablesToIncludes")
def tables_to_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of tables to include in the follower database.
"""
return pulumi.get(self, "tables_to_includes")
@tables_to_includes.setter
def tables_to_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tables_to_includes", value)
@pulumi.input_type
class ClusterIdentityArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
identity_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
principal_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: `SystemAssigned`, `UserAssigned` and `SystemAssigned, UserAssigned`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] identity_ids: A list of IDs for User Assigned Managed Identity resources to be assigned.
:param pulumi.Input[str] principal_id: The Principal ID associated with this System Assigned Managed Service Identity.
:param pulumi.Input[str] tenant_id: The Tenant ID associated with this System Assigned Managed Service Identity.
"""
pulumi.set(__self__, "type", type)
if identity_ids is not None:
pulumi.set(__self__, "identity_ids", identity_ids)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: `SystemAssigned`, `UserAssigned` and `SystemAssigned, UserAssigned`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="identityIds")
def identity_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IDs for User Assigned Managed Identity resources to be assigned.
"""
return pulumi.get(self, "identity_ids")
@identity_ids.setter
def identity_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "identity_ids", value)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The Principal ID associated with this System Assigned Managed Service Identity.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The Tenant ID associated with this System Assigned Managed Service Identity.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ClusterOptimizedAutoScaleArgs:
def __init__(__self__, *,
maximum_instances: pulumi.Input[int],
minimum_instances: pulumi.Input[int]):
"""
:param pulumi.Input[int] maximum_instances: The maximum number of allowed instances. Must between `0` and `1000`.
:param pulumi.Input[int] minimum_instances: The minimum number of allowed instances. Must between `0` and `1000`.
"""
pulumi.set(__self__, "maximum_instances", maximum_instances)
pulumi.set(__self__, "minimum_instances", minimum_instances)
@property
@pulumi.getter(name="maximumInstances")
def maximum_instances(self) -> pulumi.Input[int]:
"""
The maximum number of allowed instances. Must between `0` and `1000`.
"""
return pulumi.get(self, "maximum_instances")
@maximum_instances.setter
def maximum_instances(self, value: pulumi.Input[int]):
pulumi.set(self, "maximum_instances", value)
@property
@pulumi.getter(name="minimumInstances")
def minimum_instances(self) -> pulumi.Input[int]:
"""
The minimum number of allowed instances. Must between `0` and `1000`.
"""
return pulumi.get(self, "minimum_instances")
@minimum_instances.setter
def minimum_instances(self, value: pulumi.Input[int]):
pulumi.set(self, "minimum_instances", value)
@pulumi.input_type
class ClusterSkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capacity: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] name: The name of the SKU. Valid values are: `Dev(No SLA)_Standard_D11_v2`, `Dev(No SLA)_Standard_E2a_v4`, `Standard_D11_v2`, `Standard_D12_v2`, `Standard_D13_v2`, `Standard_D14_v2`, `Standard_DS13_v2+1TB_PS`, `Standard_DS13_v2+2TB_PS`, `Standard_DS14_v2+3TB_PS`, `Standard_DS14_v2+4TB_PS`, `Standard_E16as_v4+3TB_PS`, `Standard_E16as_v4+4TB_PS`, `Standard_E16a_v4`, `Standard_E2a_v4`, `Standard_E4a_v4`, `Standard_E64i_v3`, `Standard_E8as_v4+1TB_PS`, `Standard_E8as_v4+2TB_PS`, `Standard_E8a_v4`, `Standard_L16s`, `Standard_L4s`, `Standard_L8s`, `Standard_L16s_v2` and `Standard_L8s_v2`.
:param pulumi.Input[int] capacity: Specifies the node count for the cluster. Boundaries depend on the sku name.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the SKU. Valid values are: `Dev(No SLA)_Standard_D11_v2`, `Dev(No SLA)_Standard_E2a_v4`, `Standard_D11_v2`, `Standard_D12_v2`, `Standard_D13_v2`, `Standard_D14_v2`, `Standard_DS13_v2+1TB_PS`, `Standard_DS13_v2+2TB_PS`, `Standard_DS14_v2+3TB_PS`, `Standard_DS14_v2+4TB_PS`, `Standard_E16as_v4+3TB_PS`, `Standard_E16as_v4+4TB_PS`, `Standard_E16a_v4`, `Standard_E2a_v4`, `Standard_E4a_v4`, `Standard_E64i_v3`, `Standard_E8as_v4+1TB_PS`, `Standard_E8as_v4+2TB_PS`, `Standard_E8a_v4`, `Standard_L16s`, `Standard_L4s`, `Standard_L8s`, `Standard_L16s_v2` and `Standard_L8s_v2`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the node count for the cluster. Boundaries depend on the sku name.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@pulumi.input_type
class ClusterVirtualNetworkConfigurationArgs:
def __init__(__self__, *,
data_management_public_ip_id: pulumi.Input[str],
engine_public_ip_id: pulumi.Input[str],
subnet_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] data_management_public_ip_id: Data management's service public IP address resource id.
:param pulumi.Input[str] engine_public_ip_id: Engine service's public IP address resource id.
:param pulumi.Input[str] subnet_id: The subnet resource id.
"""
pulumi.set(__self__, "data_management_public_ip_id", data_management_public_ip_id)
pulumi.set(__self__, "engine_public_ip_id", engine_public_ip_id)
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="dataManagementPublicIpId")
def data_management_public_ip_id(self) -> pulumi.Input[str]:
"""
Data management's service public IP address resource id.
"""
return pulumi.get(self, "data_management_public_ip_id")
@data_management_public_ip_id.setter
def data_management_public_ip_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_management_public_ip_id", value)
@property
@pulumi.getter(name="enginePublicIpId")
def engine_public_ip_id(self) -> pulumi.Input[str]:
"""
Engine service's public IP address resource id.
"""
return pulumi.get(self, "engine_public_ip_id")
@engine_public_ip_id.setter
def engine_public_ip_id(self, value: pulumi.Input[str]):
pulumi.set(self, "engine_public_ip_id", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The subnet resource id.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
|
hubspot/crm/quotes/api/__init__.py | fakepop/hubspot-api-python | 117 | 12747414 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.crm.quotes.api.associations_api import AssociationsApi
from hubspot.crm.quotes.api.basic_api import BasicApi
from hubspot.crm.quotes.api.batch_api import BatchApi
from hubspot.crm.quotes.api.search_api import SearchApi
|
tests/test_queries.py | matchup-ir/whooshy | 270 | 12747416 | <gh_stars>100-1000
from __future__ import with_statement
import copy
import pytest
from whoosh import fields, qparser, query
from whoosh.compat import b, u
from whoosh.filedb.filestore import RamStorage
from whoosh.qparser import QueryParser
from whoosh.query import And
from whoosh.query import AndMaybe
from whoosh.query import ConstantScoreQuery
from whoosh.query import DateRange
from whoosh.query import DisjunctionMax
from whoosh.query import Every
from whoosh.query import FuzzyTerm
from whoosh.query import Not
from whoosh.query import NullQuery
from whoosh.query import NumericRange
from whoosh.query import Or
from whoosh.query import Phrase
from whoosh.query import Prefix
from whoosh.query import Require
from whoosh.query import Term
from whoosh.query import TermRange
from whoosh.query import Variations
from whoosh.query import Wildcard
from whoosh.query.spans import SpanContains
from whoosh.query.spans import SpanFirst
from whoosh.query.spans import SpanNear
from whoosh.query.spans import SpanNot
from whoosh.query.spans import SpanOr
from whoosh.util.testing import TempIndex
def test_all_terms():
q = QueryParser("a", None).parse(u('hello b:there c:"my friend"'))
ts = q.all_terms(phrases=False)
assert sorted(ts) == [("a", "hello"), ("b", "there")]
ts = q.all_terms(phrases=True)
assert sorted(ts) == [("a", "hello"), ("b", "there"), ("c", "friend"),
("c", "my")]
def test_existing_terms():
s = fields.Schema(key=fields.ID, value=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(key=u("a"), value=u("alfa bravo charlie delta echo"))
w.add_document(key=u("b"), value=u("foxtrot golf hotel india juliet"))
w.commit()
r = ix.reader()
q = QueryParser("value", None).parse(u('alfa hotel tango "sierra bravo"'))
ts = q.existing_terms(r, phrases=False)
assert sorted(ts) == [("value", b("alfa")), ("value", b("hotel"))]
ts = q.existing_terms(r)
assert sorted(ts) == [("value", b("alfa")), ("value", b("bravo")), ("value", b("hotel"))]
def test_wildcard_existing_terms():
s = fields.Schema(key=fields.ID, value=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(key=u("a"), value=u("alfa bravo bear charlie delta"))
w.add_document(key=u("a"), value=u("boggle echo render rendering renders"))
w.commit()
r = ix.reader()
qp = QueryParser("value", ix.schema)
def words(terms):
z = []
for t in terms:
assert t[0] == "value"
z.append(t[1])
return b(" ").join(sorted(z))
q = qp.parse(u("b*"))
ts = q.existing_terms(r)
assert ts == set()
ts = q.existing_terms(r, expand=True)
assert words(ts) == b("bear boggle bravo")
q = qp.parse(u("[a TO f]"))
ts = q.existing_terms(r)
assert ts == set()
ts = q.existing_terms(r, expand=True)
assert words(ts) == b("alfa bear boggle bravo charlie delta echo")
q = query.Variations("value", "render")
ts = q.existing_terms(r, expand=False)
assert ts == set([("value", b("render"))])
ts = q.existing_terms(r, expand=True)
assert words(ts) == b("render rendering renders")
def test_replace():
q = And([Or([Term("a", "b"), Term("b", "c")], boost=1.2),
Variations("a", "b", boost=2.0)])
q = q.replace("a", "b", "BB")
assert q == And([Or([Term("a", "BB"), Term("b", "c")], boost=1.2),
Variations("a", "BB", boost=2.0)])
def test_apply():
def visit(q):
if isinstance(q, (Term, Variations, FuzzyTerm)):
q.text = q.text.upper()
return q
return q.apply(visit)
before = And([Not(Term("a", u("b"))), Variations("a", u("c")),
Not(FuzzyTerm("a", u("d")))])
after = visit(before)
assert after == And([Not(Term("a", u("B"))), Variations("a", u("C")),
Not(FuzzyTerm("a", u("D")))])
def term2var(q):
if isinstance(q, Term):
return Variations(q.fieldname, q.text)
else:
return q.apply(term2var)
q = And([Term("f", "alfa"), Or([Term("f", "bravo"),
Not(Term("f", "charlie"))])])
q = term2var(q)
assert q == And([Variations('f', 'alfa'),
Or([Variations('f', 'bravo'),
Not(Variations('f', 'charlie'))])])
def test_accept():
def boost_phrases(q):
if isinstance(q, Phrase):
q.boost *= 2.0
return q
before = And([Term("a", u("b")), Or([Term("c", u("d")),
Phrase("a", [u("e"), u("f")])]),
Phrase("a", [u("g"), u("h")], boost=0.25)])
after = before.accept(boost_phrases)
assert after == And([Term("a", u("b")),
Or([Term("c", u("d")), Phrase("a", [u("e"), u("f")], boost=2.0)]),
Phrase("a", [u("g"), u("h")], boost=0.5)])
before = Phrase("a", [u("b"), u("c")], boost=2.5)
after = before.accept(boost_phrases)
assert after == Phrase("a", [u("b"), u("c")], boost=5.0)
def test_simplify():
s = fields.Schema(k=fields.ID, v=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(k=u("1"), v=u("aardvark apple allan alfa bear bee"))
w.add_document(k=u("2"), v=u("brie glue geewhiz goop julia"))
w.commit()
r = ix.reader()
q1 = And([Prefix("v", "b", boost=2.0), Term("v", "juliet")])
q2 = And([Or([Term('v', 'bear', boost=2.0),
Term('v', 'bee', boost=2.0),
Term('v', 'brie', boost=2.0)]),
Term('v', 'juliet')])
assert q1.simplify(r) == q2
def test_merge_ranges():
q = And([TermRange("f1", u("a"), None), TermRange("f1", None, u("z"))])
assert q.normalize() == TermRange("f1", u("a"), u("z"))
q = And([NumericRange("f1", None, u("aaaaa")),
NumericRange("f1", u("zzzzz"), None)])
assert q.normalize() == q
q = And([TermRange("f1", u("a"), u("z")), TermRange("f1", "b", "x")])
assert q.normalize() == TermRange("f1", u("a"), u("z"))
q = And([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
assert q.normalize() == TermRange("f1", u("f"), u("m"))
q = Or([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
assert q.normalize() == TermRange("f1", u("a"), u("q"))
q = Or([TermRange("f1", u("m"), None), TermRange("f1", None, u("n"))])
assert q.normalize() == Every("f1")
q = And([Every("f1"), Term("f1", "a"), Variations("f1", "b")])
assert q.normalize() == Every("f1")
q = Or([Term("f1", u("q")), TermRange("f1", u("m"), None),
TermRange("f1", None, u("n"))])
assert q.normalize() == Every("f1")
q = And([Or([Term("f1", u("a")), Term("f1", u("b"))]), Every("f1")])
assert q.normalize() == Every("f1")
q = And([Term("f1", u("a")), And([Or([Every("f1")])])])
assert q.normalize() == Every("f1")
def test_normalize_compound():
def oq():
return Or([Term("a", u("a")), Term("a", u("b"))])
def nq(level):
if level == 0:
return oq()
else:
return Or([nq(level - 1), nq(level - 1), nq(level - 1)])
q = nq(5)
q = q.normalize()
assert q == Or([Term("a", u("a")), Term("a", u("b"))])
def test_duplicates():
q = And([Term("a", u("b")), Term("a", u("b"))])
assert q.normalize() == Term("a", u("b"))
q = And([Prefix("a", u("b")), Prefix("a", u("b"))])
assert q.normalize() == Prefix("a", u("b"))
q = And([Variations("a", u("b")), And([Variations("a", u("b")),
Term("a", u("b"))])])
assert q.normalize() == And([Variations("a", u("b")), Term("a", u("b"))])
q = And([Term("a", u("b")), Prefix("a", u("b")),
Term("a", u("b"), boost=1.1)])
assert q.normalize() == q
# Wildcard without * or ? normalizes to Term
q = And([Wildcard("a", u("b")),
And([Wildcard("a", u("b")), Term("a", u("b"))])])
assert q.normalize() == Term("a", u("b"))
# TODO: FIX THIS
def test_query_copy_hash():
def do(q1, q2):
q1a = copy.deepcopy(q1)
assert q1 == q1a
assert hash(q1) == hash(q1a)
assert q1 != q2
do(Term("a", u("b"), boost=1.1), Term("a", u("b"), boost=1.5))
do(And([Term("a", u("b")), Term("c", u("d"))], boost=1.1),
And([Term("a", u("b")), Term("c", u("d"))], boost=1.5))
do(Or([Term("a", u("b"), boost=1.1), Term("c", u("d"))]),
Or([Term("a", u("b"), boost=1.8), Term("c", u("d"))], boost=1.5))
do(DisjunctionMax([Term("a", u("b"), boost=1.8), Term("c", u("d"))]),
DisjunctionMax([Term("a", u("b"), boost=1.1), Term("c", u("d"))],
boost=1.5))
do(Not(Term("a", u("b"), boost=1.1)), Not(Term("a", u("b"), boost=1.5)))
do(Prefix("a", u("b"), boost=1.1), Prefix("a", u("b"), boost=1.5))
do(Wildcard("a", u("b*x?"), boost=1.1), Wildcard("a", u("b*x?"),
boost=1.5))
do(FuzzyTerm("a", u("b"), constantscore=True),
FuzzyTerm("a", u("b"), constantscore=False))
do(FuzzyTerm("a", u("b"), boost=1.1), FuzzyTerm("a", u("b"), boost=1.5))
do(TermRange("a", u("b"), u("c")), TermRange("a", u("b"), u("d")))
do(TermRange("a", None, u("c")), TermRange("a", None, None))
do(TermRange("a", u("b"), u("c"), boost=1.1),
TermRange("a", u("b"), u("c"), boost=1.5))
do(TermRange("a", u("b"), u("c"), constantscore=True),
TermRange("a", u("b"), u("c"), constantscore=False))
do(NumericRange("a", 1, 5), NumericRange("a", 1, 6))
do(NumericRange("a", None, 5), NumericRange("a", None, None))
do(NumericRange("a", 3, 6, boost=1.1), NumericRange("a", 3, 6, boost=1.5))
do(NumericRange("a", 3, 6, constantscore=True),
NumericRange("a", 3, 6, constantscore=False))
# do(DateRange)
do(Variations("a", u("render")), Variations("a", u("renders")))
do(Variations("a", u("render"), boost=1.1),
Variations("a", u("renders"), boost=1.5))
do(Phrase("a", [u("b"), u("c"), u("d")]),
Phrase("a", [u("b"), u("c"), u("e")]))
do(Phrase("a", [u("b"), u("c"), u("d")], boost=1.1),
Phrase("a", [u("b"), u("c"), u("d")], boost=1.5))
do(Phrase("a", [u("b"), u("c"), u("d")], slop=1),
Phrase("a", [u("b"), u("c"), u("d")], slop=2))
# do(Ordered)
do(Every(), Every("a"))
do(Every("a"), Every("b"))
do(Every("a", boost=1.1), Every("a", boost=1.5))
do(NullQuery, Term("a", u("b")))
do(ConstantScoreQuery(Term("a", u("b"))),
ConstantScoreQuery(Term("a", u("c"))))
do(ConstantScoreQuery(Term("a", u("b")), score=2.0),
ConstantScoreQuery(Term("a", u("c")), score=2.1))
do(Require(Term("a", u("b")), Term("c", u("d"))),
Require(Term("a", u("b"), boost=1.1), Term("c", u("d"))))
# do(Require)
# do(AndMaybe)
# do(AndNot)
# do(Otherwise)
do(SpanFirst(Term("a", u("b")), limit=1), SpanFirst(Term("a", u("b")),
limit=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d"))),
SpanNear(Term("a", u("b")), Term("c", u("e"))))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), slop=1),
SpanNear(Term("a", u("b")), Term("c", u("d")), slop=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), mindist=1),
SpanNear(Term("a", u("b")), Term("c", u("d")), mindist=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), ordered=True),
SpanNear(Term("a", u("b")), Term("c", u("d")), ordered=False))
do(SpanNot(Term("a", u("b")), Term("a", u("c"))),
SpanNot(Term("a", u("b")), Term("a", u("d"))))
do(SpanOr([Term("a", u("b")), Term("a", u("c")), Term("a", u("d"))]),
SpanOr([Term("a", u("b")), Term("a", u("c")), Term("a", u("e"))]))
do(SpanContains(Term("a", u("b")), Term("a", u("c"))),
SpanContains(Term("a", u("b")), Term("a", u("d"))))
# do(SpanBefore)
# do(SpanCondition)
def test_requires():
a = Term("f", u("a"))
b = Term("f", u("b"))
assert And([a, b]).requires() == set([a, b])
assert Or([a, b]).requires() == set()
assert AndMaybe(a, b).requires() == set([a])
assert a.requires() == set([a])
def test_highlight_daterange():
from datetime import datetime
schema = fields.Schema(id=fields.ID(unique=True, stored=True),
title=fields.TEXT(stored=True),
content=fields.TEXT(stored=True),
released=fields.DATETIME(stored=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.update_document(
id=u('1'),
title=u('Life Aquatic'),
content=u('A nautic film crew sets out to kill a gigantic shark.'),
released=datetime(2004, 12, 25)
)
w.update_document(
id=u('2'),
title=u('Darjeeling Limited'),
content=u('Three brothers meet in India for a life changing train ' +
'journey.'),
released=datetime(2007, 10, 27)
)
w.commit()
s = ix.searcher()
r = s.search(Term('content', u('train')), terms=True)
assert len(r) == 1
assert r[0]["id"] == "2"
assert (r[0].highlights("content")
== 'for a life changing <b class="match term0">train</b> journey')
r = s.search(DateRange('released', datetime(2007, 1, 1), None))
assert len(r) == 1
assert r[0].highlights("content") == ''
def test_patterns():
domain = u("aaron able acre adage aether after ago ahi aim ajax akimbo "
"alembic all amiga amount ampere").split()
schema = fields.Schema(word=fields.KEYWORD(stored=True))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
for word in domain:
w.add_document(word=word)
with ix.reader() as r:
assert list(r.field_terms("word")) == domain
assert list(r.expand_prefix("word", "al")) == [b("alembic"), b("all")]
q = query.Prefix("word", "al")
assert q.simplify(r).__unicode__() == "(word:alembic OR word:all)"
q = query.Wildcard("word", "a*[ae]")
assert q.simplify(r).__unicode__() == "(word:able OR word:acre OR word:adage OR word:amiga OR word:ampere)"
assert q._find_prefix(q.text) == "a"
q = query.Regex("word", "am.*[ae]")
assert q.simplify(r).__unicode__() == "(word:amiga OR word:ampere)"
assert q._find_prefix(q.text) == "am"
q = query.Regex("word", "able|ago")
assert q.simplify(r).__unicode__() == "(word:able OR word:ago)"
assert q._find_prefix(q.text) == ""
# special case: ? may mean "zero occurences"
q = query.Regex("word", "ah?i")
assert q.simplify(r).__unicode__() == "(word:ahi OR word:aim)"
assert q._find_prefix(q.text) == "a"
# special case: * may mean "zero occurences"
q = query.Regex("word", "ah*i")
assert q.simplify(r).__unicode__() == "(word:ahi OR word:aim)"
assert q._find_prefix(q.text) == "a"
def test_or_nots1():
# Issue #285
schema = fields.Schema(a=fields.KEYWORD(stored=True),
b=fields.KEYWORD(stored=True))
st = RamStorage()
ix = st.create_index(schema)
with ix.writer() as w:
w.add_document(a=u("alfa"), b=u("charlie"))
with ix.searcher() as s:
q = query.And([query.Term("a", "alfa"),
query.Or([query.Not(query.Term("b", "bravo")),
query.Not(query.Term("b", "charlie"))
])
])
r = s.search(q)
assert len(r) == 1
def test_or_nots2():
# Issue #286
schema = fields.Schema(a=fields.KEYWORD(stored=True),
b=fields.KEYWORD(stored=True))
st = RamStorage()
ix = st.create_index(schema)
with ix.writer() as w:
w.add_document(b=u("bravo"))
with ix.searcher() as s:
q = query.Or([query.Term("a", "alfa"),
query.Not(query.Term("b", "alfa"))
])
r = s.search(q)
assert len(r) == 1
def test_or_nots3():
schema = fields.Schema(title=fields.TEXT(stored=True),
itemtype=fields.ID(stored=True))
with TempIndex(schema, "ornot") as ix:
w = ix.writer()
w.add_document(title=u("a1"), itemtype=u("a"))
w.add_document(title=u("a2"), itemtype=u("a"))
w.add_document(title=u("b1"), itemtype=u("b"))
w.commit()
q = Term('itemtype', 'a') | Not(Term('itemtype', 'a'))
with ix.searcher() as s:
r = " ".join([hit["title"] for hit in s.search(q)])
assert r == "a1 a2 b1"
def test_ornot_andnot():
schema = fields.Schema(id=fields.NUMERIC(stored=True), a=fields.KEYWORD())
st = RamStorage()
ix = st.create_index(schema)
with ix.writer() as w:
w.add_document(id=0, a=u("word1 word1"))
w.add_document(id=1, a=u("word1 word2"))
w.add_document(id=2, a=u("word1 foo"))
w.add_document(id=3, a=u("foo word2"))
w.add_document(id=4, a=u("foo bar"))
with ix.searcher() as s:
qp = qparser.QueryParser("a", ix.schema)
q1 = qp.parse(u("NOT word1 NOT word2"))
q2 = qp.parse(u("NOT (word1 OR word2)"))
r1 = [hit["id"] for hit in s.search(q1, sortedby="id")]
r2 = [hit["id"] for hit in s.search(q2, sortedby="id")]
assert r1 == r2 == [4]
def test_none_in_compounds():
with pytest.raises(query.QueryError):
_ = query.And([query.Term("a", "b"), None, query.Term("c", "d")])
def test_issue_355():
schema = fields.Schema(seats=fields.NUMERIC(bits=8, stored=True))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(seats=0)
w.add_document(seats=10)
w.add_document(seats=20)
with ix.searcher() as s:
# Passing a bytestring for a numeric field
q = Term("seats", b("maker"))
r1 = [hit["seats"] for hit in s.search(q, limit=5)]
# Passing a unicode string for a numeric field
q = Term("seats", u("maker"))
r2 = [hit["seats"] for hit in s.search(q, limit=5)]
# Passing a value too large for the numeric field
q = Term("seats", 260)
r3 = [hit["seats"] for hit in s.search(q, limit=5)]
assert r1 == r2 == r3 == []
def test_sequence():
schema = fields.Schema(id=fields.STORED, text=fields.TEXT)
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(id=0, text=u("alfa bravo charlie delta echo"))
w.add_document(id=1, text=u("bravo charlie delta echo alfa"))
w.add_document(id=2, text=u("charlie delta echo bravo"))
w.add_document(id=3, text=u("delta echo charlie"))
w.add_document(id=4, text=u("echo delta"))
with ix.searcher() as s:
seq = query.Sequence([query.Term("text", u("echo")),
query.Term("text", u("alfa"))])
q = query.And([query.Term("text", "bravo"), seq])
r = s.search(q, limit=4)
assert len(r) == 1
assert r[0]["id"] == 1
def test_andmaybe():
schema = fields.Schema(id=fields.STORED, text=fields.TEXT)
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(id=0, text=u("alfa bravo charlie delta echo"))
w.add_document(id=1, text=u("bravo charlie delta echo alfa"))
w.add_document(id=2, text=u("charlie delta echo bravo"))
w.add_document(id=3, text=u("delta echo charlie"))
w.add_document(id=4, text=u("echo delta"))
qp = qparser.QueryParser("text", schema)
q = qp.parse(u('bravo ANDMAYBE "echo alfa"'))
with ix.searcher() as s:
r = s.search(q)
assert len(r) == 3
assert [hit["id"] for hit in r] == [1, 2, 0]
def test_numeric_filter():
schema = fields.Schema(status=fields.NUMERIC, tags=fields.TEXT)
ix = RamStorage().create_index(schema)
# Add a single document with status = -2
with ix.writer() as w:
w.add_document(status=-2, tags=u"alfa bravo")
with ix.searcher() as s:
# No document should match the filter
fq = query.NumericRange("status", 0, 2)
fr = s.search(fq)
assert fr.scored_length() == 0
# Make sure the query would otherwise match
q = query.Term("tags", u"alfa")
r = s.search(q)
assert r.scored_length() == 1
# Check the query doesn't match with the filter
r = s.search(q, filter=fq)
assert r.scored_length() == 0
def test_andnot_reverse():
# Bitbucket issue 419
docs = ['ruby', 'sapphire', 'ruby + sapphire']
schema = fields.Schema(name=fields.TEXT(stored=True))
q = query.AndNot(query.Term('name', 'ruby'), query.Term('name', 'sapphire'))
with TempIndex(schema) as ix:
with ix.writer() as w:
for name in docs:
w.add_document(name=u(name))
with ix.searcher() as s:
names_fw = [hit["name"] for hit in s.search(q, limit=None)]
with TempIndex(schema) as ix:
with ix.writer() as w:
for name in reversed(docs):
w.add_document(name=u(name))
with ix.searcher() as s:
names_rv = [hit["name"] for hit in s.search(q, limit=None)]
assert len(names_fw) == len(names_rv) == 1
assert names_fw == names_rv
|
src/imitation_frames.py | akolishchak/doom-net-pytorch | 143 | 12747435 | #
# imitation_frames.py, doom-net
#
# Created by <NAME> on 01/21/17.
#
import os
import time
import h5py
import torch
import torch.nn as nn
import torch.optim as optim
from device import device
import argparse
from doom_instance import *
from aac import BaseModel
def data_generator(args, screens, variables, labels, episodes, step_size):
# remove short episodes
episode_min_size = args.episode_size*step_size
episodes = episodes[episodes[:, 1]-episodes[:, 0] > episode_min_size]
episodes_num = len(episodes)
#
step_idx = episodes[:, 0].copy() + np.random.randint(step_size, size=episodes_num)
step_screens = np.ndarray(shape=(args.batch_size, *screens.shape[1:]), dtype=np.float32)
step_variables = np.ndarray(shape=(args.batch_size, *variables.shape[1:]), dtype=np.float32)
step_labels = np.ndarray(shape=(args.batch_size,), dtype=np.int)
step_terminals = np.ones(shape=(args.batch_size,), dtype=np.float32)
# select episodes for the initial batch
batch_episodes = np.random.randint(episodes_num, size=args.batch_size)
while True:
for i in range(args.batch_size):
idx = batch_episodes[i]
step_screens[i, :] = screens[step_idx[idx]] / 127.5 - 1.0
step_variables[i, :] = variables[step_idx[idx]] / 100
step_labels[i] = labels[step_idx[idx]]
step_idx[idx] += step_size
if step_idx[idx] > episodes[idx][1]:
step_idx[idx] = episodes[idx][0] + np.random.randint(step_size)
step_terminals[i] = 0
# reached terminal state, select a new episode
batch_episodes[i] = np.random.randint(episodes_num)
else:
step_terminals[i] = 1
yield torch.from_numpy(step_screens), \
torch.from_numpy(step_variables), \
torch.from_numpy(step_labels), \
torch.from_numpy(step_terminals)
def train(args):
data_file = h5py.File(args.h5_path, 'r')
screens = data_file['screens']
variables = data_file['variables']
labels = data_file['action_labels']
print('Dataset size =', len(screens))
action_sets = data_file['action_sets'][:]
episodes = data_file['episodes'][:]
input_shape = screens[0].shape
train_generator = data_generator(args, screens, variables, labels, episodes, args.skiprate)
np.save('action_set', action_sets)
model = BaseModel(input_shape[0]*args.frame_num, len(action_sets), variables.shape[1], args.frame_num).to(device)
if args.load is not None and os.path.isfile(args.load):
print("loading model parameters {}".format(args.load))
source_model = torch.load(args.load)
model.load_state_dict(source_model.state_dict())
del source_model
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=5e-4)
optimizer.zero_grad()
running_loss = 0
running_accuracy = 0
batch_time = time.time()
for batch, (screens, variables, labels, terminals) in enumerate(train_generator):
labels = labels.to(device)
outputs, _ = model(*model.transform_input(screens, variables))
loss = criterion(outputs, labels)
model.set_terminal(terminals)
running_loss += loss.item()
_, pred = outputs.max(1)
accuracy = (pred == labels).float().mean()
running_accuracy += accuracy
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch % args.episode_length == args.episode_length - 1:
running_loss /= args.episode_length
running_accuracy /= args.episode_length
print(
'[{:d}] loss: {:.3f}, accuracy: {:.3f}, time: {:.6f}'.format(
batch + 1, running_loss, running_accuracy, time.time()-batch_time
)
)
running_loss = 0
running_accuracy = 0
batch_time = time.time()
if batch % args.checkpoint_rate == args.checkpoint_rate - 1:
torch.save(model, args.checkpoint_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Doom Recorder')
parser.add_argument('--episode_size', type=int, default=20, help='number of steps in an episode')
parser.add_argument('--batch_size', type=int, default=64, help='number of game instances running in parallel')
parser.add_argument('--load', default=None, help='path to model file')
parser.add_argument('--h5_path', default=os.path.expanduser('~') + '/test/datasets/vizdoom/cig_map01/flat.h5',
help='hd5 file path')
parser.add_argument('--skiprate', type=int, default=2, help='number of skipped frames')
parser.add_argument('--episode_length', type=int, default=30, help='episode length')
parser.add_argument('--frame_num', type=int, default=4, help='number of frames per input')
parser.add_argument('--checkpoint_file', default=None, help='check point file name')
parser.add_argument('--checkpoint_rate', type=int, default=5000, help='number of batches per checkpoit')
args = parser.parse_args()
train(args)
|
main/signals.py | curenamo/ssmreleva | 123 | 12747459 | <reponame>curenamo/ssmreleva
from django.contrib.auth.models import User
from django.db.models.signals import post_save
def set_api_permissions(sender, instance=None, created=False, **kwargs):
from utils.user_auth import set_api_permissions_for_user
if created:
set_api_permissions_for_user(instance)
post_save.connect(set_api_permissions, sender=User)
|
monai/losses/tversky.py | dylanbuchi/MONAI | 2,971 | 12747475 | <reponame>dylanbuchi/MONAI<gh_stars>1000+
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Callable, List, Optional, Union
import torch
from torch.nn.modules.loss import _Loss
from monai.networks import one_hot
from monai.utils import LossReduction
class TverskyLoss(_Loss):
"""
Compute the Tversky loss defined in:
Sadegh et al. (2017) Tversky loss function for image segmentation
using 3D fully convolutional deep networks. (https://arxiv.org/abs/1706.05721)
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L631
"""
def __init__(
self,
include_background: bool = True,
to_onehot_y: bool = False,
sigmoid: bool = False,
softmax: bool = False,
other_act: Optional[Callable] = None,
alpha: float = 0.5,
beta: float = 0.5,
reduction: Union[LossReduction, str] = LossReduction.MEAN,
smooth_nr: float = 1e-5,
smooth_dr: float = 1e-5,
batch: bool = False,
) -> None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
alpha: weight of false positives
beta: weight of false negatives
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.alpha = alpha
self.beta = beta
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `softmax=True` ignored.")
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `include_background=False` ignored.")
else:
# if skipping background, removing first channel
target = target[:, 1:]
input = input[:, 1:]
if target.shape != input.shape:
raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
p0 = input
p1 = 1 - p0
g0 = target
g1 = 1 - g0
# reducing only spatial dimensions (not batch nor channels)
reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist()
if self.batch:
# reducing spatial dimensions and batch
reduce_axis = [0] + reduce_axis
tp = torch.sum(p0 * g0, reduce_axis)
fp = self.alpha * torch.sum(p0 * g1, reduce_axis)
fn = self.beta * torch.sum(p1 * g0, reduce_axis)
numerator = tp + self.smooth_nr
denominator = tp + fp + fn + self.smooth_dr
score: torch.Tensor = 1.0 - numerator / denominator
if self.reduction == LossReduction.SUM.value:
return torch.sum(score) # sum over the batch and channel dims
if self.reduction == LossReduction.NONE.value:
return score # returns [N, num_classes] losses
if self.reduction == LossReduction.MEAN.value:
return torch.mean(score)
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
verde/tests/test_scipy.py | fatiando/verde | 415 | 12747489 | <reponame>fatiando/verde
# Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Test the scipy based interpolator.
"""
import warnings
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from ..coordinates import grid_coordinates
from ..scipygridder import ScipyGridder
from ..synthetic import CheckerBoard
def test_scipy_gridder_same_points():
"See if the gridder recovers known points."
region = (1000, 5000, -8000, -7000)
synth = CheckerBoard(region=region)
data = synth.scatter(size=1000, random_state=0)
coords = (data.easting, data.northing)
# The interpolation should be perfect on top of the data points
for method in ["nearest", "linear", "cubic"]:
grd = ScipyGridder(method=method)
grd.fit(coords, data.scalars)
predicted = grd.predict(coords)
npt.assert_allclose(predicted, data.scalars)
npt.assert_allclose(grd.score(coords, data.scalars), 1)
def test_scipy_gridder():
"See if the gridder recovers known points."
synth = CheckerBoard(region=(1000, 5000, -8000, -6000))
data = synth.scatter(size=20000, random_state=0)
coords = (data.easting, data.northing)
pt_coords = (3000, -7000)
true_data = synth.predict(pt_coords)
# nearest will never be too close to the truth
grd = ScipyGridder("cubic").fit(coords, data.scalars)
npt.assert_almost_equal(grd.predict(pt_coords), true_data, decimal=2)
grd = ScipyGridder("linear").fit(coords, data.scalars)
npt.assert_almost_equal(grd.predict(pt_coords), true_data, decimal=1)
def test_scipy_gridder_region():
"See if the region is gotten from the data is correct."
region = (1000, 5000, -8000, -6000)
synth = CheckerBoard(region=region)
# Test using xarray objects
grid = synth.grid(shape=(101, 101))
coords = grid_coordinates(region, grid.scalars.shape)
grd = ScipyGridder().fit(coords, grid.scalars)
npt.assert_allclose(grd.region_, region)
# Test using pandas objects
data = pd.DataFrame(
{
"easting": coords[0].ravel(),
"northing": coords[1].ravel(),
"scalars": grid.scalars.values.ravel(),
}
)
grd = ScipyGridder().fit((data.easting, data.northing), data.scalars)
npt.assert_allclose(grd.region_, region)
def test_scipy_gridder_extra_args():
"Passing in extra arguments to scipy"
data = CheckerBoard().scatter(random_state=100)
coords = (data.easting, data.northing)
grd = ScipyGridder(method="linear", extra_args=dict(rescale=True))
grd.fit(coords, data.scalars)
predicted = grd.predict(coords)
npt.assert_allclose(predicted, data.scalars)
def test_scipy_gridder_fails():
"fit should fail for invalid method name"
data = CheckerBoard().scatter(random_state=0)
grd = ScipyGridder(method="some invalid method name")
with pytest.raises(ValueError):
grd.fit((data.easting, data.northing), data.scalars)
def test_scipy_gridder_warns():
"Check that a warning is issued when using weights."
data = CheckerBoard().scatter(random_state=100)
weights = np.ones_like(data.scalars)
grd = ScipyGridder()
msg = "ScipyGridder does not support weights and they will be ignored."
with warnings.catch_warnings(record=True) as warn:
grd.fit((data.easting, data.northing), data.scalars, weights=weights)
assert len(warn) == 1
assert issubclass(warn[-1].category, UserWarning)
assert str(warn[-1].message) == msg
|
setup.py | DataCanvasIO/Cooka | 222 | 12747521 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join as pjoin
import shutil
from setuptools import setup, find_packages
import distutils.cmd
import distutils.log
import subprocess
from os import path as P
try:
execfile
except NameError:
def execfile(fname, globs, locs=None):
locs = locs or globs
exec(compile(open(fname).read(), fname, "exec"), globs, locs)
HERE = P.dirname((P.abspath(__file__)))
version_ns = {}
execfile(P.join(HERE, 'cooka', '_version.py'), version_ns)
version = version_ns['__version__']
print("__version__=" + version)
with open(P.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class BuildJSCommand(distutils.cmd.Command):
description = 'Build frontend that written by javascript'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# 1. check files
frontend_home = pjoin(HERE, 'packages')
backend_assets = pjoin(HERE, 'cooka', 'assets')
if P.exists(backend_assets):
raise RuntimeError(f"Assets path {backend_assets} already exists")
# 2. install deps by yarn
yarn_executable = 'yarn'
self.announce("yarn install ", distutils.log.INFO)
subprocess.call([yarn_executable, 'install'], cwd=frontend_home)
# 3. build assets
self.announce("yarn build ", distutils.log.INFO)
subprocess.call([yarn_executable, 'build'], cwd=frontend_home)
# 4. copy to python package
frontend_dist = pjoin(frontend_home, 'dist')
shutil.copytree(frontend_dist, backend_assets)
if __name__ == '__main__':
setup(
name="cooka",
version=version,
description="A lightweight AutoML system.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude=["test.*", "test"]),
author="DataCanvas Community",
author_email="<EMAIL>",
cmdclass={'buildjs': BuildJSCommand},
python_requires='>=3.6.*',
license='Apache License 2.0',
install_requires=[
'numpy',
'pandas',
'scikit-learn>=0.22.1',
'requests',
'SQLAlchemy>=1.3.18',
'tornado==6.0.4',
'jinja2',
'deeptables==0.1.13',
'hypergbm==0.2.2',
'traitlets',
],
# extras_require={
# 'notebook': [
# 'shap',
# 'jupyterlab',
# 'matplotlib'
# 'pyecharts'
# ]
# },
zip_safe=False,
platforms="Linux, Mac OS X",
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
],
entry_points={
'console_scripts': [
'cooka = cooka.cli:main',
]
},
include_package_data=True,
package_data={
'cooka': ['core/train_template/*.jinja2', '*.template', 'assets/*', 'assets/static/*'], # can not inlcude a directory recursion
}
)
|
tests/riscv/APIs/api_getPageInfo_01_force.py | noahsherrill/force-riscv | 111 | 12747523 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
class MainSequence(Sequence):
"""Exercise different combinations of values for the parameters for
the genPA instruction. Focus in this test is to try values of the Size,
Align and CanAlias parameters. Type is always 'D'; Bank is always '0'.
"""
def generate(self, **kargs):
ldstr_byte_ops = ["LB##RISCV", "SB##RISCV"]
ldstr_half_ops = ["LH##RISCV", "SH##RISCV"]
ldstr_word_ops = ["LW##RISCV", "SW##RISCV"]
ldstr_double_ops = ["LD##RISCV", "SD##RISCV"]
theType = "D"
theBank = 0
theCanAlias = 0
loopCount = 2
# Iterate through Size and Align values. Force requires Align to
# be a power of 2. This 1st block tests smaller values of size -
# 1 byte to 32 bytes.
for theSize in [2 ** x for x in range(0, 5)]:
for theAlign in [2 ** x for x in range(0, 6)]:
if theAlign < theSize:
continue
for _ in range(loopCount):
rand_PA = self.genPA(
Size=theSize,
Align=theAlign,
Type=theType,
Bank=theBank,
CanAlias=theCanAlias,
)
rand_VA = self.genVAforPA(
PA=rand_PA,
Bank=theBank,
FlatMap=0,
Type=theType,
Size=theSize,
)
self.notice(
">>>>>> Requested Alignment: {:6d} Requested "
"Size: {:6d} PA target= {:16X} VA target= "
"{:16X}".format(theAlign, theSize, rand_PA, rand_VA)
)
# Bank argument must be 0 now as the 3rd argument.
# May not be required at some point.
page_info = self.getPageInfo(rand_VA, "VA", 0)
# This section displays the keys and values for the
# second and third level dictionaries.
if "Page" in page_info.keys():
self.notice(
">>>>>>>>>> VA Page info <<<<<<<<<<<<<<<<<<<<"
"<<<<<<<<<<<<<"
)
for k in page_info["Page"]:
if k != "DescriptorDetails":
if k == "MemoryType" or k == "MemoryAttr":
self.notice(
">>>>>>>>>> Key: {:15} Value: "
"{}".format(k, page_info["Page"][k])
)
else:
self.notice(
">>>>>>>>>> Key: {:15} Value: "
"0x{:x}".format(
k, page_info["Page"][k]
)
)
else:
for j in page_info["Page"][
k
]: # Descriptor details are in 3rd level
# dict in page_info object
self.notice(
">>>>>>>>>> DescriptorDetails: "
"Key: {:22} Value: {}".format(
j, page_info["Page"][k][j]
)
)
else:
self.error(
">>>>>>>>>> VA Page info: Nothing returned "
'from getPageInfo "VA"'
)
if "Table" in page_info.keys():
self.notice(
">>>>>>>>>> VA Table info <<<<<<<<<<<<<<<<<"
"<<<<<<<<<<<<<<<<"
)
for k in page_info["Table"]:
self.notice(
">>>>>>>>>> Key: {:12} Value: {}".format(
k, page_info["Table"][k]
)
)
else:
self.notice(
">>>>>>>>>> VA Table info: No Table info "
'returned from getPageInfo "VA"'
)
# Just making sure we can actually generate an
# instruction with the rand_VA determined above.
instr_id = self.genInstruction(
self.choice(ldstr_byte_ops), {"LSTarget": rand_VA}
)
# Iterate through Size and Align values. Force requires Align to be
# a power of 2. This 2nd block tests larger values of size - 32K to 8M.
for theSize in [2 ** x for x in range(15, 17)]:
for theAlign in [2 ** x for x in range(15, 18)]:
if theAlign < theSize:
continue
for _ in range(loopCount):
rand_PA = self.genPA(
Size=theSize,
Align=theAlign,
Type=theType,
Bank=theBank,
CanAlias=theCanAlias,
)
rand_VA = self.genVAforPA(
PA=rand_PA,
Bank=theBank,
FlatMap=0,
CanAlias=0,
ForceNewAddress=1,
Type=theType,
Size=theSize,
)
self.notice(
">>>>>> Requested Alignment: {:6d} Requested "
"Size: {:6d} PA target= {:16X} VA target= "
"{:16X}".format(theAlign, theSize, rand_PA, rand_VA)
)
instr_id = self.genInstruction(
self.choice(ldstr_byte_ops), {"LSTarget": rand_VA}
)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
|
deepscm/experiments/medical/ukbb/sem_vi/base_sem_experiment.py | biomedia-mira/deepscm | 183 | 12747525 | <reponame>biomedia-mira/deepscm<gh_stars>100-1000
import pyro
from typing import Mapping
from pyro.infer import SVI, TraceGraph_ELBO
from pyro.nn import pyro_method
from pyro.optim import Adam
from torch.distributions import Independent
import torch
from pyro.distributions.torch_transform import ComposeTransformModule
from pyro.distributions.transforms import (
ComposeTransform, AffineTransform, ExpTransform, Spline
)
from pyro.distributions import LowRankMultivariateNormal, MultivariateNormal, Normal, TransformedDistribution
from deepscm.arch.medical import Decoder, Encoder
from deepscm.distributions.transforms.reshape import ReshapeTransform
from deepscm.distributions.transforms.affine import LowerCholeskyAffine
from deepscm.distributions.deep import DeepMultivariateNormal, DeepIndepNormal, Conv2dIndepNormal, DeepLowRankMultivariateNormal
import numpy as np
from deepscm.experiments.medical.base_experiment import BaseCovariateExperiment, BaseSEM, EXPERIMENT_REGISTRY, MODEL_REGISTRY # noqa: F401
class CustomELBO(TraceGraph_ELBO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trace_storage = {'model': None, 'guide': None}
def _get_trace(self, model, guide, args, kwargs):
model_trace, guide_trace = super()._get_trace(model, guide, args, kwargs)
self.trace_storage['model'] = model_trace
self.trace_storage['guide'] = guide_trace
return model_trace, guide_trace
class Lambda(torch.nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class BaseVISEM(BaseSEM):
context_dim = 0
def __init__(self, latent_dim: int, logstd_init: float = -5, enc_filters: str = '16,32,64,128', dec_filters: str = '128,64,32,16',
num_convolutions: int = 2, use_upconv: bool = False, decoder_type: str = 'fixed_var', decoder_cov_rank: int = 10, **kwargs):
super().__init__(**kwargs)
self.img_shape = (1, 192 // self.downsample, 192 // self.downsample) if self.downsample > 0 else (1, 192, 192)
self.latent_dim = latent_dim
self.logstd_init = logstd_init
self.enc_filters = tuple(int(f.strip()) for f in enc_filters.split(','))
self.dec_filters = tuple(int(f.strip()) for f in dec_filters.split(','))
self.num_convolutions = num_convolutions
self.use_upconv = use_upconv
self.decoder_type = decoder_type
self.decoder_cov_rank = decoder_cov_rank
# decoder parts
decoder = Decoder(
num_convolutions=self.num_convolutions, filters=self.dec_filters,
latent_dim=self.latent_dim + self.context_dim, upconv=self.use_upconv,
output_size=self.img_shape)
if self.decoder_type == 'fixed_var':
self.decoder = Conv2dIndepNormal(decoder, 1, 1)
torch.nn.init.zeros_(self.decoder.logvar_head.weight)
self.decoder.logvar_head.weight.requires_grad = False
torch.nn.init.constant_(self.decoder.logvar_head.bias, self.logstd_init)
self.decoder.logvar_head.bias.requires_grad = False
elif self.decoder_type == 'learned_var':
self.decoder = Conv2dIndepNormal(decoder, 1, 1)
torch.nn.init.zeros_(self.decoder.logvar_head.weight)
self.decoder.logvar_head.weight.requires_grad = False
torch.nn.init.constant_(self.decoder.logvar_head.bias, self.logstd_init)
self.decoder.logvar_head.bias.requires_grad = True
elif self.decoder_type == 'independent_gaussian':
self.decoder = Conv2dIndepNormal(decoder, 1, 1)
torch.nn.init.zeros_(self.decoder.logvar_head.weight)
self.decoder.logvar_head.weight.requires_grad = True
torch.nn.init.normal_(self.decoder.logvar_head.bias, self.logstd_init, 1e-1)
self.decoder.logvar_head.bias.requires_grad = True
elif self.decoder_type == 'multivariate_gaussian':
seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))
self.decoder = DeepMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape))
elif self.decoder_type == 'sharedvar_multivariate_gaussian':
seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))
self.decoder = DeepMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape))
torch.nn.init.zeros_(self.decoder.logdiag_head.weight)
self.decoder.logdiag_head.weight.requires_grad = False
torch.nn.init.zeros_(self.decoder.lower_head.weight)
self.decoder.lower_head.weight.requires_grad = False
torch.nn.init.normal_(self.decoder.logdiag_head.bias, self.logstd_init, 1e-1)
self.decoder.logdiag_head.bias.requires_grad = True
elif self.decoder_type == 'lowrank_multivariate_gaussian':
seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))
self.decoder = DeepLowRankMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape), decoder_cov_rank)
elif self.decoder_type == 'sharedvar_lowrank_multivariate_gaussian':
seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))
self.decoder = DeepLowRankMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape), decoder_cov_rank)
torch.nn.init.zeros_(self.decoder.logdiag_head.weight)
self.decoder.logdiag_head.weight.requires_grad = False
torch.nn.init.zeros_(self.decoder.factor_head.weight)
self.decoder.factor_head.weight.requires_grad = False
torch.nn.init.normal_(self.decoder.logdiag_head.bias, self.logstd_init, 1e-1)
self.decoder.logdiag_head.bias.requires_grad = True
else:
raise ValueError('unknown ')
# encoder parts
self.encoder = Encoder(num_convolutions=self.num_convolutions, filters=self.enc_filters, latent_dim=self.latent_dim, input_size=self.img_shape)
latent_layers = torch.nn.Sequential(torch.nn.Linear(self.latent_dim + self.context_dim, self.latent_dim), torch.nn.ReLU())
self.latent_encoder = DeepIndepNormal(latent_layers, self.latent_dim, self.latent_dim)
# priors
self.register_buffer('age_base_loc', torch.zeros([1, ], requires_grad=False))
self.register_buffer('age_base_scale', torch.ones([1, ], requires_grad=False))
self.sex_logits = torch.nn.Parameter(torch.zeros([1, ]))
self.register_buffer('ventricle_volume_base_loc', torch.zeros([1, ], requires_grad=False))
self.register_buffer('ventricle_volume_base_scale', torch.ones([1, ], requires_grad=False))
self.register_buffer('brain_volume_base_loc', torch.zeros([1, ], requires_grad=False))
self.register_buffer('brain_volume_base_scale', torch.ones([1, ], requires_grad=False))
self.register_buffer('z_loc', torch.zeros([latent_dim, ], requires_grad=False))
self.register_buffer('z_scale', torch.ones([latent_dim, ], requires_grad=False))
self.register_buffer('x_base_loc', torch.zeros(self.img_shape, requires_grad=False))
self.register_buffer('x_base_scale', torch.ones(self.img_shape, requires_grad=False))
self.register_buffer('age_flow_lognorm_loc', torch.zeros([], requires_grad=False))
self.register_buffer('age_flow_lognorm_scale', torch.ones([], requires_grad=False))
self.register_buffer('ventricle_volume_flow_lognorm_loc', torch.zeros([], requires_grad=False))
self.register_buffer('ventricle_volume_flow_lognorm_scale', torch.ones([], requires_grad=False))
self.register_buffer('brain_volume_flow_lognorm_loc', torch.zeros([], requires_grad=False))
self.register_buffer('brain_volume_flow_lognorm_scale', torch.ones([], requires_grad=False))
# age flow
self.age_flow_components = ComposeTransformModule([Spline(1)])
self.age_flow_lognorm = AffineTransform(loc=self.age_flow_lognorm_loc.item(), scale=self.age_flow_lognorm_scale.item())
self.age_flow_constraint_transforms = ComposeTransform([self.age_flow_lognorm, ExpTransform()])
self.age_flow_transforms = ComposeTransform([self.age_flow_components, self.age_flow_constraint_transforms])
# other flows shared components
self.ventricle_volume_flow_lognorm = AffineTransform(loc=self.ventricle_volume_flow_lognorm_loc.item(), scale=self.ventricle_volume_flow_lognorm_scale.item()) # noqa: E501
self.ventricle_volume_flow_constraint_transforms = ComposeTransform([self.ventricle_volume_flow_lognorm, ExpTransform()])
self.brain_volume_flow_lognorm = AffineTransform(loc=self.brain_volume_flow_lognorm_loc.item(), scale=self.brain_volume_flow_lognorm_scale.item())
self.brain_volume_flow_constraint_transforms = ComposeTransform([self.brain_volume_flow_lognorm, ExpTransform()])
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name == 'age_flow_lognorm_loc':
self.age_flow_lognorm.loc = self.age_flow_lognorm_loc.item()
elif name == 'age_flow_lognorm_scale':
self.age_flow_lognorm.scale = self.age_flow_lognorm_scale.item()
elif name == 'ventricle_volume_flow_lognorm_loc':
self.ventricle_volume_flow_lognorm.loc = self.ventricle_volume_flow_lognorm_loc.item()
elif name == 'ventricle_volume_flow_lognorm_scale':
self.ventricle_volume_flow_lognorm.scale = self.ventricle_volume_flow_lognorm_scale.item()
elif name == 'brain_volume_flow_lognorm_loc':
self.brain_volume_flow_lognorm.loc = self.brain_volume_flow_lognorm_loc.item()
elif name == 'brain_volume_flow_lognorm_scale':
self.brain_volume_flow_lognorm.scale = self.brain_volume_flow_lognorm_scale.item()
def _get_preprocess_transforms(self):
return super()._get_preprocess_transforms().inv
def _get_transformed_x_dist(self, latent):
x_pred_dist = self.decoder.predict(latent)
x_base_dist = Normal(self.x_base_loc, self.x_base_scale).to_event(3)
preprocess_transform = self._get_preprocess_transforms()
if isinstance(x_pred_dist, MultivariateNormal) or isinstance(x_pred_dist, LowRankMultivariateNormal):
chol_transform = LowerCholeskyAffine(x_pred_dist.loc, x_pred_dist.scale_tril)
reshape_transform = ReshapeTransform(self.img_shape, (np.prod(self.img_shape), ))
x_reparam_transform = ComposeTransform([reshape_transform, chol_transform, reshape_transform.inv])
elif isinstance(x_pred_dist, Independent):
x_pred_dist = x_pred_dist.base_dist
x_reparam_transform = AffineTransform(x_pred_dist.loc, x_pred_dist.scale, 3)
return TransformedDistribution(x_base_dist, ComposeTransform([x_reparam_transform, preprocess_transform]))
@pyro_method
def guide(self, x, age, sex, ventricle_volume, brain_volume):
raise NotImplementedError()
@pyro_method
def svi_guide(self, x, age, sex, ventricle_volume, brain_volume):
self.guide(x, age, sex, ventricle_volume, brain_volume)
@pyro_method
def svi_model(self, x, age, sex, ventricle_volume, brain_volume):
with pyro.plate('observations', x.shape[0]):
pyro.condition(self.model, data={'x': x, 'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume})()
@pyro_method
def infer_z(self, *args, **kwargs):
return self.guide(*args, **kwargs)
@pyro_method
def infer(self, **obs):
_required_data = ('x', 'sex', 'age', 'ventricle_volume', 'brain_volume')
assert set(obs.keys()) == set(_required_data), 'got: {}'.format(tuple(obs.keys()))
z = self.infer_z(**obs)
exogeneous = self.infer_exogeneous(z=z, **obs)
exogeneous['z'] = z
return exogeneous
@pyro_method
def reconstruct(self, x, age, sex, ventricle_volume, brain_volume, num_particles: int = 1):
obs = {'x': x, 'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume}
z_dist = pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']
recons = []
for _ in range(num_particles):
z = pyro.sample('z', z_dist)
recon, *_ = pyro.poutine.condition(
self.sample, data={'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume, 'z': z})(x.shape[0])
recons += [recon]
return torch.stack(recons).mean(0)
@pyro_method
def counterfactual(self, obs: Mapping, condition: Mapping = None, num_particles: int = 1):
_required_data = ('x', 'sex', 'age', 'ventricle_volume', 'brain_volume')
assert set(obs.keys()) == set(_required_data), 'got: {}'.format(tuple(obs.keys()))
z_dist = pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']
counterfactuals = []
for _ in range(num_particles):
z = pyro.sample('z', z_dist)
exogeneous = self.infer_exogeneous(z=z, **obs)
exogeneous['z'] = z
# condition on sex if sex isn't included in 'do' as it's a root node and we don't have the exogeneous noise for it yet...
if 'sex' not in condition.keys():
exogeneous['sex'] = obs['sex']
counter = pyro.poutine.do(pyro.poutine.condition(self.sample_scm, data=exogeneous), data=condition)(obs['x'].shape[0])
counterfactuals += [counter]
return {k: v for k, v in zip(('x', 'z', 'sex', 'age', 'ventricle_volume', 'brain_volume'), (torch.stack(c).mean(0) for c in zip(*counterfactuals)))}
@classmethod
def add_arguments(cls, parser):
parser = super().add_arguments(parser)
parser.add_argument('--latent_dim', default=100, type=int, help="latent dimension of model (default: %(default)s)")
parser.add_argument('--logstd_init', default=-5, type=float, help="init of logstd (default: %(default)s)")
parser.add_argument('--enc_filters', default='16,24,32,64,128', type=str, help="number of filters to use (default: %(default)s)")
parser.add_argument('--dec_filters', default='128,64,32,24,16', type=str, help="number of filters to use (default: %(default)s)")
parser.add_argument('--num_convolutions', default=3, type=int, help="number of convolutions to build model (default: %(default)s)")
parser.add_argument('--use_upconv', default=False, action='store_true', help="toogle upconv (default: %(default)s)")
parser.add_argument(
'--decoder_type', default='fixed_var', help="var type (default: %(default)s)",
choices=['fixed_var', 'learned_var', 'independent_gaussian', 'sharedvar_multivariate_gaussian', 'multivariate_gaussian',
'sharedvar_lowrank_multivariate_gaussian', 'lowrank_multivariate_gaussian'])
parser.add_argument('--decoder_cov_rank', default=10, type=int, help="rank for lowrank cov approximation (requires lowrank decoder) (default: %(default)s)") # noqa: E501
return parser
class SVIExperiment(BaseCovariateExperiment):
def __init__(self, hparams, pyro_model: BaseSEM):
super().__init__(hparams, pyro_model)
self.svi_loss = CustomELBO(num_particles=hparams.num_svi_particles)
self._build_svi()
def _build_svi(self, loss=None):
def per_param_callable(module_name, param_name):
params = {'eps': 1e-5, 'amsgrad': self.hparams.use_amsgrad, 'weight_decay': self.hparams.l2}
if 'flow_components' in module_name or 'sex_logits' in param_name:
params['lr'] = self.hparams.pgm_lr
else:
params['lr'] = self.hparams.lr
print(f'building opt for {module_name} - {param_name} with p: {params}')
return params
if loss is None:
loss = self.svi_loss
if self.hparams.use_cf_guide:
def guide(*args, **kwargs):
return self.pyro_model.counterfactual_guide(*args, **kwargs, counterfactual_type=self.hparams.cf_elbo_type)
self.svi = SVI(self.pyro_model.svi_model, guide, Adam(per_param_callable), loss)
else:
self.svi = SVI(self.pyro_model.svi_model, self.pyro_model.svi_guide, Adam(per_param_callable), loss)
self.svi.loss_class = loss
def backward(self, *args, **kwargs):
pass # No loss to backpropagate since we're using Pyro's optimisation machinery
def print_trace_updates(self, batch):
with torch.no_grad():
print('Traces:\n' + ('#' * 10))
guide_trace = pyro.poutine.trace(self.pyro_model.svi_guide).get_trace(**batch)
model_trace = pyro.poutine.trace(pyro.poutine.replay(self.pyro_model.svi_model, trace=guide_trace)).get_trace(**batch)
guide_trace = pyro.poutine.util.prune_subsample_sites(guide_trace)
model_trace = pyro.poutine.util.prune_subsample_sites(model_trace)
model_trace.compute_log_prob()
guide_trace.compute_score_parts()
print(f'model: {model_trace.nodes.keys()}')
for name, site in model_trace.nodes.items():
if site["type"] == "sample":
fn = site['fn']
if isinstance(fn, Independent):
fn = fn.base_dist
print(f'{name}: {fn} - {fn.support}')
log_prob_sum = site["log_prob_sum"]
is_obs = site["is_observed"]
print(f'model - log p({name}) = {log_prob_sum} | obs={is_obs}')
if torch.isnan(log_prob_sum):
value = site['value'][0]
conc0 = fn.concentration0
conc1 = fn.concentration1
print(f'got:\n{value}\n{conc0}\n{conc1}')
raise Exception()
print(f'guide: {guide_trace.nodes.keys()}')
for name, site in guide_trace.nodes.items():
if site["type"] == "sample":
fn = site['fn']
if isinstance(fn, Independent):
fn = fn.base_dist
print(f'{name}: {fn} - {fn.support}')
entropy = site["score_parts"].entropy_term.sum()
is_obs = site["is_observed"]
print(f'guide - log q({name}) = {entropy} | obs={is_obs}')
def get_trace_metrics(self, batch):
metrics = {}
model = self.svi.loss_class.trace_storage['model']
guide = self.svi.loss_class.trace_storage['guide']
metrics['log p(x)'] = model.nodes['x']['log_prob'].mean()
metrics['log p(age)'] = model.nodes['age']['log_prob'].mean()
metrics['log p(sex)'] = model.nodes['sex']['log_prob'].mean()
metrics['log p(ventricle_volume)'] = model.nodes['ventricle_volume']['log_prob'].mean()
metrics['log p(brain_volume)'] = model.nodes['brain_volume']['log_prob'].mean()
metrics['p(z)'] = model.nodes['z']['log_prob'].mean()
metrics['q(z)'] = guide.nodes['z']['log_prob'].mean()
metrics['log p(z) - log q(z)'] = metrics['p(z)'] - metrics['q(z)']
return metrics
def prep_batch(self, batch):
x = batch['image'] * 255.
age = batch['age'].unsqueeze(1).float()
sex = batch['sex'].unsqueeze(1).float()
ventricle_volume = batch['ventricle_volume'].unsqueeze(1).float()
brain_volume = batch['brain_volume'].unsqueeze(1).float()
x = x.float()
if self.training:
x += torch.rand_like(x)
return {'x': x, 'age': age, 'sex': sex, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume}
def training_step(self, batch, batch_idx):
batch = self.prep_batch(batch)
if self.hparams.validate:
print('Validation:')
self.print_trace_updates(batch)
loss = self.svi.step(**batch)
metrics = self.get_trace_metrics(batch)
if np.isnan(loss):
self.logger.experiment.add_text('nan', f'nand at {self.current_epoch}:\n{metrics}')
raise ValueError('loss went to nan with metrics:\n{}'.format(metrics))
tensorboard_logs = {('train/' + k): v for k, v in metrics.items()}
tensorboard_logs['train/loss'] = loss
self.log_dict(tensorboard_logs)
return torch.Tensor([loss])
def validation_step(self, batch, batch_idx):
batch = self.prep_batch(batch)
loss = self.svi.evaluate_loss(**batch)
metrics = self.get_trace_metrics(batch)
return {'loss': loss, **metrics}
def test_step(self, batch, batch_idx):
batch = self.prep_batch(batch)
loss = self.svi.evaluate_loss(**batch)
metrics = self.get_trace_metrics(batch)
samples = self.build_test_samples(batch)
return {'loss': loss, **metrics, 'samples': samples}
@classmethod
def add_arguments(cls, parser):
parser = super().add_arguments(parser)
parser.add_argument('--num_svi_particles', default=4, type=int, help="number of particles to use for ELBO (default: %(default)s)")
parser.add_argument('--num_sample_particles', default=32, type=int, help="number of particles to use for MC sampling (default: %(default)s)")
parser.add_argument('--use_cf_guide', default=False, action='store_true', help="whether to use counterfactual guide (default: %(default)s)")
parser.add_argument(
'--cf_elbo_type', default=-1, choices=[-1, 0, 1, 2],
help="-1: randomly select per batch, 0: shuffle thickness, 1: shuffle intensity, 2: shuffle both (default: %(default)s)")
return parser
EXPERIMENT_REGISTRY[SVIExperiment.__name__] = SVIExperiment
|
src/prefect/cli/kv_store.py | concreted/prefect | 8,633 | 12747635 | <reponame>concreted/prefect<filename>src/prefect/cli/kv_store.py
import sys
import click
from prefect import config
from prefect.backend import kv_store
from prefect.backend.kv_store import NON_CLOUD_BACKEND_ERROR_MESSAGE
from prefect.cli.build_register import (
handle_terminal_error,
TerminalError,
log_exception,
)
@click.group()
def kv():
"""
Interact with Prefect Cloud KV Store
\b
Usage:
$ prefect kv [COMMAND]
"""
if config.backend != "cloud":
click.secho(NON_CLOUD_BACKEND_ERROR_MESSAGE, fg="red")
sys.exit(1)
@kv.command(name="set")
@click.argument("key")
@click.argument("value")
@handle_terminal_error
def set_command(key, value):
"""
Set a key value pair, overriding existing values if key exists
\b
Arguments:
key TEXT Key to set
value TEXT Value associated with key to set
"""
try:
kv_store.set_key_value(key=key, value=value)
click.secho("Key value pair set successfully", fg="green")
except Exception as exc:
log_exception(exc)
raise TerminalError("An error occurred setting the key value pair")
@kv.command(name="get")
@click.argument("key")
@handle_terminal_error
def get_command(key):
"""
Get the value of a key
\b
Arguments:
key TEXT Key to get
"""
try:
result = kv_store.get_key_value(key=key)
click.secho(f"Key {key!r} has value {result!r}", fg="green")
except Exception as exc:
log_exception(exc)
raise TerminalError(f"Error retrieving value for key {key!r}")
@kv.command(name="delete")
@click.argument("key")
@handle_terminal_error
def delete_command(key):
"""
Delete a key value pair
\b
Arguments:
key TEXT Key to delete
"""
try:
kv_store.delete_key(key=key)
click.secho(f"Key {key!r} has been deleted", fg="green")
except Exception as exc:
log_exception(exc)
raise TerminalError("An error occurred deleting the key")
@kv.command(name="list")
@handle_terminal_error
def list_command():
"""
List all key value pairs
"""
try:
result = kv_store.list_keys()
if result:
click.secho("\n".join(result), fg="green")
else:
click.secho("No keys found", fg="yellow")
except Exception as exc:
log_exception(exc)
raise TerminalError("An error occurred when listing keys")
|
.modules/.CMSeeK/deepscans/joom/check_debug.py | termux-one/EasY_HaCk | 1,103 | 12747636 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 Tuhinshubhra
import cmseekdb.basic as cmseek
# I know there is no reason at all to create a separate module for this.. there's something that's going to be added here so.. trust me!
def start(source):
# print(source)
if 'Joomla! Debug Console' in source or 'xdebug.org/docs/all_settings' in source:
cmseek.success('Debug mode on!')
return '1'
else:
return '0'
|
core/tests/test_polyaxonfile/test_patch_specification.py | admariner/polyaxon | 3,200 | 12747643 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon import pkg, types
from polyaxon.config_reader.utils import deep_update
from polyaxon.containers.names import MAIN_JOB_CONTAINER
from polyaxon.polyaxonfile import OperationSpecification
from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind
from polyaxon.schemas.patch_strategy import V1PatchStrategy
from polyaxon.utils.tz_utils import now
from tests.utils import BaseTestCase
@pytest.mark.polyaxonfile_mark
class TestPatchSpecifications(BaseTestCase):
DEFAULT_INT_VALUE = 2
DEFAULT_DT_VALUE = now().isoformat()
DEFAULT_STR_VALUE = "test"
PATCH_INT_VALUE = 13
PATCH_DT_VALUE = now().isoformat()
PATCH_STR_VALUE = "patch"
def get_empty_operation(self):
return OperationSpecification.read(
{"version": pkg.SCHEMA_VERSION, "hubRef": "test"}
)
def get_full_operation(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": self.DEFAULT_STR_VALUE,
"description": self.DEFAULT_STR_VALUE,
"tags": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"presets": [self.DEFAULT_STR_VALUE],
"queue": "{}/{}".format(self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE),
"cache": {
"disable": False,
"ttl": self.DEFAULT_INT_VALUE,
},
"termination": {
"maxRetries": self.DEFAULT_INT_VALUE,
"ttl": self.DEFAULT_INT_VALUE,
"timeout": self.DEFAULT_INT_VALUE,
},
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"build": {
"params": {
"patch-key1": {"value": "{}2".format(self.DEFAULT_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.DEFAULT_STR_VALUE,
"git": {"revision": self.DEFAULT_STR_VALUE},
}
],
},
"hubRef": self.DEFAULT_STR_VALUE,
},
"hooks": [
{
"hubRef": "{}1".format(self.DEFAULT_STR_VALUE),
"trigger": "succeeded",
"connection": "{}1".format(self.DEFAULT_STR_VALUE),
},
{
"connection": "{}2".format(self.DEFAULT_STR_VALUE),
"hubRef": "{}2".format(self.DEFAULT_STR_VALUE),
},
],
"params": {
"patch-key1": {"value": "{}2".format(self.DEFAULT_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.DEFAULT_STR_VALUE,
"git": {"revision": self.DEFAULT_STR_VALUE},
}
],
"connections": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"container": {
"resources": {"requests": {"cpu": self.DEFAULT_INT_VALUE}}
},
"environment": {
"nodeSelector": {"polyaxon": "core"},
"serviceAccountName": self.DEFAULT_STR_VALUE,
"imagePullSecrets": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
},
},
"schedule": {
"kind": "cron",
"cron": "0 0 * * *",
"startAt": self.DEFAULT_DT_VALUE,
"endAt": self.DEFAULT_DT_VALUE,
},
"events": [
{
"kinds": [V1EventKind.RUN_STATUS_SCHEDULED],
"ref": "{}1".format(self.DEFAULT_STR_VALUE),
},
{
"kinds": [V1EventKind.RUN_STATUS_SCHEDULED],
"ref": "{}2".format(self.DEFAULT_STR_VALUE),
},
],
"joins": [
{
"query": "{}1".format(self.DEFAULT_STR_VALUE),
"sort": "{}1".format(self.DEFAULT_STR_VALUE),
"params": {
"u": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
},
{
"query": "{}2".format(self.DEFAULT_STR_VALUE),
"sort": "{}2".format(self.DEFAULT_STR_VALUE),
"params": {
"v": {
"value": "{}2".format(self.DEFAULT_STR_VALUE),
"contextOnly": True,
},
},
},
],
"matrix": {
"concurrency": self.DEFAULT_INT_VALUE,
"kind": "mapping",
"values": [
{"a": self.DEFAULT_INT_VALUE},
{"b": self.DEFAULT_INT_VALUE},
],
},
"dependencies": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"trigger": "all_succeeded",
"conditions": self.DEFAULT_STR_VALUE,
"skipOnUpstreamSkip": True,
"hubRef": self.DEFAULT_STR_VALUE,
}
)
def get_full_operation_with_component(self):
operation = self.get_full_operation()
config_dict = {
"inputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
operation.component = V1Component.from_dict(config_dict)
return operation
def get_full_preset(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": self.PATCH_STR_VALUE,
"isPreset": True,
"description": self.PATCH_STR_VALUE,
"tags": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"presets": [self.PATCH_STR_VALUE],
"queue": "{}/{}".format(self.PATCH_STR_VALUE, self.PATCH_STR_VALUE),
"cache": {
"disable": True,
"ttl": self.PATCH_INT_VALUE,
},
"termination": {
"maxRetries": self.PATCH_INT_VALUE,
"ttl": self.PATCH_INT_VALUE,
"timeout": self.PATCH_INT_VALUE,
},
"plugins": {
"auth": True,
"shm": True,
"collectLogs": True,
"collectArtifacts": True,
"collectResources": True,
},
"build": {
"params": {
"patch-key1": {"value": "{}2".format(self.PATCH_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.PATCH_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.PATCH_STR_VALUE,
"git": {"revision": self.PATCH_STR_VALUE},
}
],
},
"hubRef": self.PATCH_STR_VALUE,
},
"hooks": [
{
"hubRef": "{}1".format(self.PATCH_STR_VALUE),
"trigger": "succeeded",
"connection": "{}1".format(self.PATCH_STR_VALUE),
},
{
"connection": "{}1".format(self.PATCH_STR_VALUE),
"hubRef": "{}2".format(self.PATCH_STR_VALUE),
},
],
"params": {
"patch-key1": {"value": "{}2".format(self.PATCH_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.PATCH_STR_VALUE)},
},
"runPatch": {
"init": [
{"connection": self.PATCH_STR_VALUE, "git": {"revision": "dev"}}
],
"connections": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"container": {
"resources": {
"requests": {
"cpu": self.PATCH_INT_VALUE,
"memory": self.PATCH_INT_VALUE,
}
}
},
"environment": {
"nodeSelector": {"polyaxon-patch": "core"},
"serviceAccountName": self.PATCH_STR_VALUE,
"imagePullSecrets": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
},
},
"schedule": {"kind": "datetime", "startAt": self.PATCH_DT_VALUE},
"events": [
{
"kinds": [V1EventKind.RUN_STATUS_DONE],
"ref": self.PATCH_STR_VALUE,
},
{
"kinds": [V1EventKind.RUN_STATUS_DONE],
"ref": self.PATCH_STR_VALUE,
},
],
"joins": [
{
"query": self.PATCH_STR_VALUE,
"sort": self.PATCH_STR_VALUE,
"params": {
"u": {"value": self.PATCH_STR_VALUE},
},
},
{
"query": self.PATCH_STR_VALUE,
"sort": self.PATCH_STR_VALUE,
"params": {
"x": {"value": self.PATCH_STR_VALUE, "contextOnly": True},
},
},
],
"matrix": {
"concurrency": self.PATCH_INT_VALUE,
"kind": "mapping",
"values": [
{"a": self.PATCH_INT_VALUE},
{"c": self.PATCH_INT_VALUE},
],
},
"dependencies": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"trigger": "all_succeeded",
"conditions": "",
"skipOnUpstreamSkip": True,
}
)
def get_empty_preset(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": None,
"isPreset": True,
"description": "",
"tags": [],
"presets": [],
"queue": "",
"cache": {},
"termination": {},
"plugins": {},
"build": None,
"hooks": [],
"params": {},
"runPatch": {
"init": [],
"connections": [],
"container": {},
"environment": {
"nodeSelector": {},
"serviceAccountName": "",
"imagePullSecrets": [],
},
},
"schedule": None,
"events": [],
"joins": [],
"matrix": None,
"dependencies": [],
"trigger": None,
"conditions": None,
"skipOnUpstreamSkip": None,
}
)
def test_patch_replace_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.REPLACE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_replace_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_replace_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.REPLACE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict.pop("name") == operation.name
assert result_dict.pop("trigger") == operation.trigger
assert result_dict.pop("conditions") == operation.conditions
assert result_dict.pop("skipOnUpstreamSkip") == operation.skip_on_upstream_skip
assert result_dict.pop("schedule") == operation.schedule.to_dict()
assert result_dict.pop("conditions", None) is None
assert result_dict.pop("matrix") == operation.matrix.to_dict()
assert result_dict.pop("cache") == operation.cache.to_dict()
assert result_dict.pop("plugins") == operation.plugins.to_dict()
assert result_dict.pop("termination") == operation.termination.to_dict()
assert result_dict.pop("build", None) is not None
expected = preset.to_dict()
expected.pop("isPreset")
expected.pop("cache")
expected.pop("plugins")
expected.pop("termination")
assert result_dict == expected
def test_patch_replace_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
assert result.to_dict() == operation.to_dict()
def test_patch_isnull_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
assert result.to_dict() == operation.to_dict()
def test_patch_post_merge_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.POST_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_post_merge_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_post_merge_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.POST_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict["description"] == ""
result_dict["description"] = self.DEFAULT_STR_VALUE
assert result_dict["queue"] == ""
result_dict["queue"] = "{}/{}".format(
self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE
)
result_dict["presets"] = [self.DEFAULT_STR_VALUE]
# Since there's no component to validate the runPatch section it stays the same
assert result_dict == operation.to_dict()
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict["description"] == ""
result_dict["description"] = self.DEFAULT_STR_VALUE
assert result_dict["queue"] == ""
result_dict["queue"] = "{}/{}".format(
self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE
)
# Run patch was validated and merged
assert result_dict["runPatch"]["environment"]["serviceAccountName"] == ""
result_dict["runPatch"]["environment"][
"serviceAccountName"
] = operation.run_patch["environment"]["serviceAccountName"]
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert result_dict == operation.to_dict()
def test_patch_post_merge_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
expected = preset.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
expected.pop("isPreset")
expected["tags"] = operation.tags + expected["tags"]
expected["presets"] = operation.presets + expected["presets"]
expected["hooks"] = [i.to_dict() for i in operation.hooks] + expected["hooks"]
expected["dependencies"] = operation.dependencies + expected["dependencies"]
expected["events"] = [i.to_dict() for i in operation.events] + expected[
"events"
]
expected["joins"] = [i.to_dict() for i in operation.joins] + expected["joins"]
expected["matrix"]["values"] = (
operation.matrix.values + expected["matrix"]["values"]
)
# Since there's no component to validate the runPatch section it stays the same
expected["runPatch"] = operation.run_patch
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict == expected
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_full_preset()
expected = preset.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
expected.pop("isPreset")
expected["tags"] = operation.tags + expected["tags"]
expected["presets"] = operation.presets + expected["presets"]
expected["hooks"] = [i.to_dict() for i in operation.hooks] + expected["hooks"]
expected["dependencies"] = operation.dependencies + expected["dependencies"]
expected["events"] = [i.to_dict() for i in operation.events] + expected[
"events"
]
expected["joins"] = [i.to_dict() for i in operation.joins] + expected["joins"]
expected["matrix"]["values"] = (
operation.matrix.values + expected["matrix"]["values"]
)
# Run patch was validated and merged
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert (
result_dict["runPatch"]["connections"]
== operation.run_patch["connections"] + expected["runPatch"]["connections"]
)
result_dict["runPatch"]["connections"] = expected["runPatch"]["connections"]
assert (
result_dict["runPatch"]["init"]
== operation.run_patch["init"] + expected["runPatch"]["init"]
)
result_dict["runPatch"]["init"] = expected["runPatch"]["init"]
assert (
result_dict["runPatch"]["environment"]["imagePullSecrets"]
== operation.run_patch["environment"]["imagePullSecrets"]
+ expected["runPatch"]["environment"]["imagePullSecrets"]
)
result_dict["runPatch"]["environment"]["imagePullSecrets"] = expected[
"runPatch"
]["environment"]["imagePullSecrets"]
assert result_dict["runPatch"]["environment"]["nodeSelector"] == {
**operation.run_patch["environment"]["nodeSelector"],
**expected["runPatch"]["environment"]["nodeSelector"],
}
result_dict["runPatch"]["environment"]["nodeSelector"] = expected["runPatch"][
"environment"
]["nodeSelector"]
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict.pop("component") == operation.component.to_dict()
expected["runPatch"]["container"].pop("name")
assert result_dict == expected
def test_patch_pre_merge_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.PRE_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_pre_merge_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_pre_merge_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.PRE_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
# Since there's no component to validate the runPatch section it stays the same
assert result_dict == operation.to_dict()
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
# Run patch was validated and merged
assert result_dict == operation.to_dict()
def test_patch_pre_merge_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
preset_dict = preset.to_dict()
expected = operation.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
expected["tags"] = preset_dict["tags"] + operation.tags
expected["presets"] = preset_dict["presets"] + operation.presets
expected["hooks"] = preset_dict["hooks"] + [
i.to_dict() for i in operation.hooks
]
expected["dependencies"] = preset_dict["dependencies"] + operation.dependencies
expected["events"] = preset_dict["events"] + [
i.to_dict() for i in operation.events
]
expected["joins"] = preset_dict["joins"] + [
i.to_dict() for i in operation.joins
]
expected["matrix"]["values"] = (
preset_dict["matrix"]["values"] + operation.matrix.values
)
assert result_dict == expected
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_full_preset()
preset_dict = preset.to_dict()
expected = operation.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
expected["tags"] = preset_dict["tags"] + operation.tags
expected["presets"] = preset_dict["presets"] + operation.presets
expected["hooks"] = preset_dict["hooks"] + [
i.to_dict() for i in operation.hooks
]
expected["dependencies"] = preset_dict["dependencies"] + operation.dependencies
expected["events"] = preset_dict["events"] + [
i.to_dict() for i in operation.events
]
expected["joins"] = preset_dict["joins"] + [
i.to_dict() for i in operation.joins
]
expected["matrix"]["values"] = (
preset_dict["matrix"]["values"] + operation.matrix.values
)
# Run patch was validated and merged
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert result_dict["runPatch"]["container"].pop("resources") == deep_update(
preset_dict["runPatch"]["container"]["resources"],
expected["runPatch"]["container"]["resources"],
)
result_dict["runPatch"]["container"]["resources"] = expected["runPatch"][
"container"
]["resources"]
assert (
result_dict["runPatch"]["connections"]
== preset_dict["runPatch"]["connections"]
+ expected["runPatch"]["connections"]
)
result_dict["runPatch"]["connections"] = expected["runPatch"]["connections"]
assert (
result_dict["runPatch"]["init"]
== preset_dict["runPatch"]["init"] + expected["runPatch"]["init"]
)
result_dict["runPatch"]["init"] = expected["runPatch"]["init"]
assert (
result_dict["runPatch"]["environment"]["imagePullSecrets"]
== preset_dict["runPatch"]["environment"]["imagePullSecrets"]
+ expected["runPatch"]["environment"]["imagePullSecrets"]
)
result_dict["runPatch"]["environment"]["imagePullSecrets"] = expected[
"runPatch"
]["environment"]["imagePullSecrets"]
assert result_dict["runPatch"]["environment"]["nodeSelector"] == {
**preset_dict["runPatch"]["environment"]["nodeSelector"],
**expected["runPatch"]["environment"]["nodeSelector"],
}
result_dict["runPatch"]["environment"]["nodeSelector"] = expected["runPatch"][
"environment"
]["nodeSelector"]
assert result_dict == expected
class BaseTestApplyPreset(BaseTestCase):
def setUp(self):
super().setUp()
op_spec = OperationSpecification.read(
{
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"tags": ["tag1", "tag2"],
"trigger": "all_succeeded",
"component": {
"name": "build-template",
"tags": ["tag1", "tag2"],
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
},
},
}
)
self.compiled_operation = OperationSpecification.compile_operation(op_spec)
self.preset = {"runPatch": {}, "patchStrategy": V1PatchStrategy.POST_MERGE}
@pytest.mark.polyaxonfile_mark
class TestApplyPresetEnvironment(BaseTestApplyPreset):
def assert_environment(self, environment1, environment2):
self.preset["runPatch"]["environment"] = environment1
assert self.compiled_operation.run.environment is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment1
# Updating the preset
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
self.preset["runPatch"]["environment"] = environment2
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
def test_compile_injects_labels(self):
environment1 = {"labels": {"label1": "value1"}}
environment2 = {"labels": {"label1": "value11"}}
self.assert_environment(environment1, environment2)
# Updating the preset
environment3 = {"labels": {"label2": "value2"}}
self.preset["runPatch"]["environment"] = environment3
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == {"labels": {"label2": "value2"}}
def test_compile_injects_annotations(self):
environment1 = {"annotations": {"anno1": "value1"}}
environment2 = {"annotations": {"anno1": "value11"}}
self.assert_environment(environment1, environment2)
# Updating the preset
environment3 = {"annotations": {"anno2": "value2"}}
self.preset["runPatch"]["environment"] = environment3
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == {"annotations": {"anno2": "value2"}}
def test_compile_injects_node_selector(self):
environment1 = {"nodeSelector": {"plx": "selector1"}}
environment2 = {"nodeSelector": {"plx": "selector2"}}
self.assert_environment(environment1, environment2)
def test_compile_injects_affinity(self):
environment1 = {"affinity": {"podAffinity": {}}}
environment2 = {"affinity": {"podAffinity": {"foo": "bar"}}}
self.assert_environment(environment1, environment2)
def test_compile_injects_tolerations(self):
environment1 = {"tolerations": [{"key": "key1", "operator": "Exists"}]}
environment2 = {"tolerations": [{"key": "key2", "operator": "NotExists"}]}
self.assert_environment(environment1, environment2)
def test_compile_injects_service_account_name(self):
environment1 = {"serviceAccountName": "sa1"}
environment2 = {"serviceAccountName": "sa2"}
self.assert_environment(environment1, environment2)
def test_compile_injects_image_pull_secrets(self):
environment1 = {"imagePullSecrets": ["ps1", "ps2"]}
environment2 = {"imagePullSecrets": ["ps3"]}
self.assert_environment(environment1, environment2)
def test_compile_injects_security_context(self):
environment1 = {"securityContext": {"runAsUser": 1000, "runAsGroup": 3000}}
environment2 = {"securityContext": {"runAsUser": 100, "runAsGroup": 300}}
self.assert_environment(environment1, environment2)
@pytest.mark.polyaxonfile_mark
class TestApplyPresetPlugins(BaseTestApplyPreset):
def assert_plugins(self, plugins1, plugins2):
self.preset["plugins"] = plugins1
assert self.compiled_operation.plugins is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.plugins is not None
env = self.compiled_operation.plugins.to_dict()
assert env == plugins1
# Updating the preset
self.preset["plugins"] = plugins2
plugins = self.compiled_operation.plugins.to_dict()
assert plugins == plugins1
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.plugins is not None
plugins = self.compiled_operation.plugins.to_dict()
assert plugins == plugins2
def test_compile_injects_log_level(self):
plugins = {"logLevel": "DEBUG"}
plugins2 = {"logLevel": "INFO"}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_auth(self):
plugins = {"auth": True}
plugins2 = {"auth": False}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_docker(self):
plugins = {"docker": True}
plugins2 = {"docker": False}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_shm(self):
plugins = {"shm": True}
plugins2 = {"shm": False}
self.assert_plugins(plugins, plugins2)
@pytest.mark.polyaxonfile_mark
class TestApplyPresetTermination(BaseTestApplyPreset):
def assert_termination(self, termination1, termination2):
self.preset["termination"] = termination1
assert self.compiled_operation.termination is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination1
# Updating the preset
self.preset["termination"] = termination2
assert self.compiled_operation.termination.to_dict() == termination1
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination2
def test_compile_injects_max_retries(self):
termination1 = {"maxRetries": 10}
termination2 = {"maxRetries": 1}
self.assert_termination(termination1, termination2)
def test_compile_injects_timeout(self):
termination1 = {"timeout": 10}
termination2 = {"timeout": 1}
self.assert_termination(termination1, termination2)
def test_compile_injects_ttl(self):
termination1 = {"ttl": 10}
termination2 = {"ttl": 1}
self.assert_termination(termination1, termination2)
@pytest.mark.polyaxonfile_mark
class TestApplyPreset(BaseTestApplyPreset):
def test_patch_does_not_alter_with_no_preset(self):
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=None,
)
== self.compiled_operation
)
def test_patch_does_not_alter_with_preset_with_no_environment_or_contexts_or_termination(
self,
):
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
def test_patch_environment_and_termination(self):
termination1 = {"maxRetries": 1, "timeout": 1, "ttl": 1}
environment1 = {
"labels": {"label1": "value1"},
"annotations": {"anno1": "value1"},
"nodeSelector": {"plx": "selector1"},
"affinity": {"podAffinity": {}},
"tolerations": [{"key": "key1", "operator": "Exists"}],
"serviceAccountName": "sa1",
"imagePullSecrets": ["ps1", "ps2"],
"securityContext": {"runAsUser": 1000, "runAsGroup": 3000},
}
plugins1 = {
"logLevel": "DEBUG",
"auth": True,
"docker": True,
"shm": True,
}
self.preset["termination"] = termination1
self.preset["runPatch"]["environment"] = environment1
self.preset["plugins"] = plugins1
assert self.compiled_operation.termination is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination1
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment1
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins1
termination2 = {"maxRetries": 10, "timeout": 10, "ttl": 10}
environment2 = {
"labels": {"label1": "value12"},
"annotations": {"anno1": "value12"},
"nodeSelector": {"plx": "selector12"},
"affinity": {"podAffinity": {"k": "v"}},
"tolerations": [{"key": "key11", "operator": "NotExists"}],
"serviceAccountName": "sa2",
"imagePullSecrets": ["ps2", "ps22"],
"securityContext": {"runAsUser": 100, "runAsGroup": 300},
}
plugins2 = {
"logLevel": "INFO",
"auth": False,
"docker": False,
"shm": False,
}
# Updating the preset
self.preset["termination"] = termination2
self.preset["runPatch"]["environment"] = environment2
self.preset["plugins"] = plugins2
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination2
assert self.compiled_operation.termination is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins2
termination3 = {"maxRetries": 15}
environment3 = {
"labels": {},
"annotations": {},
"nodeSelector": {},
"affinity": {"podAffinity": {"k": "v"}},
"tolerations": [],
"securityContext": {"runAsUser": 10, "runAsGroup": 30},
"serviceAccountName": "sa2",
"imagePullSecrets": ["ps2", "ps22"],
}
# Updating the preset
self.preset["termination"] = termination3
self.preset["runPatch"]["environment"] = environment3
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == {
"maxRetries": 15,
"timeout": 10,
"ttl": 10,
}
assert self.compiled_operation.termination is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment3
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins2
|
src/lib/trains/train_factory.py | EvelynYihuiYang/MCMOT | 306 | 12747665 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .mot import MotTrainer
train_factory = {
'mot': MotTrainer,
}
|
lib/pose/hrnet/pose_estimation/gen_kpts.py | aibodygym/GAST-Net-3DPoseEstimation | 235 | 12747673 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import os.path as osp
import argparse
import time
import numpy as np
from tqdm import tqdm
import json
import torch
import torch.backends.cudnn as cudnn
import cv2
import _init_paths
from _init_paths import get_path
from utils.utilitys import plot_keypoint, PreProcess, write, load_json
from config import cfg, update_config
from utils.transforms import *
from utils.inference import get_final_preds
import models
sys.path.pop(0)
pre_dir, cur_dir, chk_root, data_root, lib_root, output_root = get_path(__file__)
cfg_dir = pre_dir + '/experiments/coco/hrnet/'
model_dir = chk_root + 'hrnet/pose_coco/'
# Loading human detector model
sys.path.insert(0, lib_root)
from detector import load_model as yolo_model
from detector import yolo_human_det as yolo_det
from track.sort import Sort
sys.path.pop(0)
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default=cfg_dir + 'w48_384x288_adam_lr1e-3.yaml',
help='experiment configure file name')
parser.add_argument('opts', nargs=argparse.REMAINDER, default=None,
help="Modify config options using the command-line")
parser.add_argument('--modelDir', type=str, default=model_dir + 'pose_hrnet_w48_384x288.pth',
help='The model directory')
parser.add_argument('--det-dim', type=int, default=416,
help='The input dimension of the detected image')
parser.add_argument('--thred-score', type=float, default=0.70,
help='The threshold of object Confidence')
parser.add_argument('-a', '--animation', action='store_true',
help='output animation')
parser.add_argument('-np', '--num-person', type=int, default=1,
help='The maximum number of estimated poses')
parser.add_argument("-v", "--video", type=str, default='camera',
help="input video file name")
args = parser.parse_args()
return args
def reset_config(args):
update_config(cfg, args)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# load model
def model_load(config):
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(config, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(config.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def load_default_model():
args = parse_args()
reset_config(args)
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(cfg.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def gen_img_kpts(image, human_model, pose_model, human_sort, det_dim=416, num_peroson=2):
"""
:param image: Input image matrix instead of image path
:param human_model: The YOLOv3 model
:param pose_model: The HRNet model
:param human_sort: Input initialized sort tracker
:param det_dim: The input dimension of YOLOv3. [160, 320, 416]
:param num_peroson: The number of tracked people
:return:
kpts: (M, N, 2)
scores: (M, N, 1)
bboxs_track: (x1, y1, x2, y2, ID)
human_sort: Updated human_sort
"""
args = parse_args()
reset_config(args)
thred_score = args.thred_score
bboxs, bbox_scores = yolo_det(image, human_model, reso=det_dim, confidence=thred_score)
if bboxs is None or not bboxs.any():
return None, None, None
# Using Sort to track people
# people_track: Num_bbox × [x1, y1, x2, y2, ID]
people_track = human_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
bboxs_track = people_track[-1].reshape(1, 5)
else:
people_track_ = people_track[-num_peroson:].reshape(num_peroson, 5)
bboxs_track = people_track_[::-1]
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(image, bboxs_track, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17, 1), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score
human_indexes = []
for i in range(len(bboxs_track)):
human_indexes.append(bboxs_track[i, -1])
return kpts, scores, human_indexes
def gen_video_kpts(video, det_dim=416, num_peroson=1, gen_output=False):
# Updating configuration
args = parse_args()
reset_config(args)
cap = cv2.VideoCapture(video)
assert cap.isOpened(), 'Cannot capture source'
# Loading detector and pose model, initialize sort for track
human_model = yolo_model(inp_dim=det_dim)
pose_model = model_load(cfg)
people_sort = Sort()
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# video_length = 1000
# collect keypoints coordinate
print('Generating 2D pose ...')
kpts_result = []
scores_result = []
for i in tqdm(range(video_length)):
ret, frame = cap.read()
if not ret:
continue
# start = time.time()
try:
bboxs, scores = yolo_det(frame, human_model, reso=det_dim, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-num_peroson:, :-1].reshape(num_peroson, 4)
people_track_ = people_track_[::-1]
else:
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 2) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
exit(0)
continue
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, track_bboxs, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
if gen_output:
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score.squeeze()
kpts_result.append(kpts)
scores_result.append(scores)
else:
index_bboxs = [bbox + [i] for i, bbox in enumerate(track_bboxs)]
list(map(lambda x: write(x, frame), index_bboxs))
plot_keypoint(frame, preds, maxvals, 0.3)
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if gen_output:
keypoints = np.array(kpts_result)
scores = np.array(scores_result)
keypoints = keypoints.transpose(1, 0, 2, 3) # (T, M, N, 2) --> (M, T, N, 2)
scores = scores.transpose(1, 0, 2) # (T, M, N) --> (M, T, N)
return keypoints, scores
def generate_ntu_kpts_json(video_path, kpts_file):
args = parse_args()
reset_config(args)
# Loading detector and pose model, initialize sort for track
human_model = yolo_model()
pose_model = model_load(cfg)
people_sort = Sort()
with torch.no_grad():
cap = cv2.VideoCapture(video_path)
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# collect keypoints information
kpts_info = dict()
data = []
for i in tqdm(range(video_length)):
frame_info = {'frame_index': i + 1}
ret, frame = cap.read()
try:
bboxs, scores = yolo_det(frame, human_model, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-2:, :-1].reshape(2, 4)
people_track_ = people_track_[::-1]
else:
skeleton = {'skeleton': [{'pose': [], 'score': [], 'bbox': []}]}
frame_info.update(skeleton)
data.append(frame_info)
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 3) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
continue
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, bboxs, cfg, args.num_person)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs.cuda())
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center),
np.asarray(scale))
skeleton = []
for num, bbox in enumerate(track_bboxs):
pose = preds[num].tolist()
score = maxvals[num].tolist()
pose = round_list(pose)
score = round_list(score)
one_skeleton = {'pose': pose,
'score': score,
'bbox': bbox}
skeleton.append(one_skeleton)
frame_info.update({'skeleton': skeleton})
data.append(frame_info)
kpts_info.update({'data': data})
with open(kpts_file, 'w') as fw:
json.dump(kpts_info, fw)
print('Finishing!')
def round_list(input_list, decimals=3):
dim = len(input_list)
for i in range(dim):
for j in range(len(input_list[i])):
input_list[i][j] = round(input_list[i][j], decimals)
return input_list |
octopus/tests/BTC/test_explorer.py | SillyTin/octopus | 212 | 12747714 | from octopus.platforms.BTC.explorer import BitcoinExplorerRPC
from octopus.platforms.BTC.explorer import RPC_USER, RPC_PASSWORD, RPC_HOST
import unittest
class BitcoinExplorerTestCase(unittest.TestCase):
explorer = BitcoinExplorerRPC(host=('%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST)))
blockhash = '00000000000000000024fb37364cbf81fd49cc2d51c09c75c35433c3a1945d04'
txid = '1b5bfc2681d40c872126919ccb1752de4cca42dcfc594899f2ef11db4b05bb39'
tx_raw = '0200000001686b654b40737f0daa1532f64e525dc925e60d075403d38cfb12ac9097764015040000006a473044022009ec3f26984906a813faae05d968ec06bf1c68883e09a00b6333126ea87d96b302201cf1d2b9165442aa178fdf772a3909c3d2ba69e454eb8660fa35df8645e3bcb60121022f2caec3ad2f3b174d048a0d46f4f6e8ba4e9d02f6bdbba64ac6817f7ac6c131ffffffff02060d0700000000001976a91407c5acae3abc91735a1471e275e33abbffada89088ac00581300000000001976a91432f2e30111e1dc45f415430ef082cb64225c538a88ac00000000'
wallet_address = '15wDxrRCn7YiCXdvqjcih6G8svrmq5AQSS'
script_hex = "76a82096b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf88ac"
script_asm = 'OP_DUP OP_SHA256 96b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf OP_EQUALVERIFY OP_CHECKSIG'
def testRPCCommand(self):
#######################
# HIGHT-LEVEL METHODS #
#######################
self.assertEqual(self.explorer.get_transaction(self.txid, 0), self.tx_raw)
self.assertEqual(len(self.explorer.get_block_by_hash(self.blockhash)), 18)
self.assertEqual(len(self.explorer.get_block_by_number(500000)), 18)
####################
# JSON-RPC METHODS #
####################
self.assertEqual(self.explorer.decoderawtransaction(self.tx_raw)['txid'], self.txid)
self.assertEqual(self.explorer.decodescript(self.script_hex)['asm'], self.script_asm)
self.assertEqual(len(self.explorer.getbestblockhash()), len(self.blockhash))
self.assertEqual(len(self.explorer.getblock(self.blockhash)), 18)
self.assertEqual(len(self.explorer.getblockchaininfo()), 11)
self.assertEqual(type(self.explorer.getblockcount()), int)
self.assertEqual(self.explorer.getblockhash(500000), self.blockhash)
# self.assertEqual(len(self.explorer.getchaintips()), 2)
self.assertEqual(type(self.explorer.getconnectioncount()), int)
self.assertEqual(type(self.explorer.getdifficulty()), float)
self.assertEqual(len(self.explorer.getinfo()), 16)
self.assertEqual(len(self.explorer.getmempoolinfo()), 5)
self.assertEqual(len(self.explorer.getmininginfo()), 8)
self.assertEqual(len(self.explorer.getnettotals()), 4)
self.assertEqual(type(self.explorer.getnetworkhashps()), float)
self.assertEqual(len(self.explorer.getnetworkinfo()), 13)
self.assertEqual(len(self.explorer.getpeerinfo()), 8)
self.assertEqual(type(self.explorer.getrawmempool()), list)
self.assertEqual(self.explorer.getrawtransaction(self.txid), self.tx_raw)
self.assertEqual(type(self.explorer.getreceivedbyaccount('')), float)
self.assertEqual(type(self.explorer.getreceivedbyaddress(self.wallet_address)), float)
self.assertEqual(len(self.explorer.gettxout(self.txid, 0)), 5)
self.assertEqual(len(self.explorer.gettxoutproof([self.txid])), 818)
self.assertEqual(type(self.explorer.getunconfirmedbalance()), float)
self.assertEqual(len(self.explorer.getwalletinfo()), 9)
self.assertEqual(type(self.explorer.help()), str)
self.assertEqual(len(self.explorer.validateaddress(self.wallet_address)), 6)
self.assertEqual(self.explorer.verifytxoutproof(self.explorer.gettxoutproof([self.txid])), [self.txid])
# Not tested
'''
self.explorer.abandontransaction()
self.explorer.addmultisigaddress()
self.explorer.addnode()
self.explorer.createmultisig()
self.explorer.createrawtransaction()
self.explorer.dumpprivkey()
self.explorer.encryptwallet()
self.explorer.estimatefee()
self.explorer.estimatepriority()
self.explorer.getaccountaddress()
self.explorer.getaccount()
self.explorer.getaddednodeinfo()
self.explorer.getaddressesbyaccount()
self.explorer.getbalance()
self.explorer.gettransaction()
self.explorer.keypoolrefill()
self.explorer.listaccounts()
self.explorer.listaddressgroupings()
self.explorer.listlockunspent()
self.explorer.listreceivedbyaccount()
self.explorer.listreceivedbyaddress()
self.explorer.listtransactions()
self.explorer.listunspent()
self.explorer.lockunspent()
self.explorer.prioritisetransaction()
self.explorer.sendfrom()
self.explorer.sendmany()
self.explorer.sendrawtransaction()
self.explorer.sendtoaddress()
self.explorer.settxfee()
self.explorer.signmessage()
self.explorer.signrawtransaction()
self.explorer.submitblock()
self.explorer.verifymessage()
self.explorer.walletlock()
self.explorer.walletpassphrase()
self.explorer.walletpassphrasechange()
'''
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(BitcoinExplorerTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
tests/test_parameters.py | compose-x/troposphere | 4,573 | 12747725 | import unittest
from troposphere import Parameter, Ref
class TestInitArguments(unittest.TestCase):
def test_title_max_length(self):
title = "i" * 256
with self.assertRaises(ValueError):
Parameter(title, Type="String")
def test_ref_can_be_requested(self):
param = Parameter("title", Type="String")
reference = param.ref()
self.assertIsInstance(reference, Ref)
self.assertDictEqual(reference.data, {"Ref": "title"})
if __name__ == "__main__":
unittest.main()
|
netket/graph/__init__.py | gpescia/MyNetKet | 352 | 12747783 | <gh_stars>100-1000
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .abstract_graph import AbstractGraph
from .graph import Graph, Edgeless, DoubledGraph, disjoint_union
from .lattice import Lattice
from .common_lattices import (
Grid,
Hypercube,
Cube,
Square,
Chain,
BCC,
FCC,
Diamond,
Pyrochlore,
Triangular,
Honeycomb,
Kagome,
)
from netket.utils import _hide_submodules
_hide_submodules(__name__)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.