max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/probnum/linalg/solvers/_probabilistic_linear_solver.py | alpiges/probnum | 226 | 12719405 | """Probabilistic linear solvers.
Iterative probabilistic numerical methods solving linear systems :math:`Ax = b`.
"""
class ProbabilisticLinearSolver:
r"""Compose a custom probabilistic linear solver.
Class implementing probabilistic linear solvers. Such (iterative) solvers infer
solutions to problems of the form
.. math:: Ax=b,
where :math:`A \in \mathbb{R}^{n \times n}` and :math:`b \in \mathbb{R}^{n}`.
They return a probability measure which quantifies uncertainty in the output arising
from finite computational resources or stochastic input. This class unifies and
generalizes probabilistic linear solvers as described in the literature. [1]_ [2]_
[3]_ [4]_
Parameters
----------
References
----------
.. [1] <NAME>., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on
Optimization*, 2015, 25, 234-260
.. [2] <NAME>. et al., A Bayesian Conjugate Gradient Method, *Bayesian
Analysis*, 2019, 14, 937-1012
.. [3] <NAME> al., Probabilistic Linear Solvers: A Unifying View,
*Statistics and Computing*, 2019
.. [4] <NAME>. and <NAME>., Probabilistic Linear Solvers for Machine Learning,
*Advances in Neural Information Processing Systems (NeurIPS)*, 2020
See Also
--------
problinsolve : Solve linear systems in a Bayesian framework.
bayescg : Solve linear systems with prior information on the solution.
Examples
--------
"""
|
27. LeetCode Problems/mini-peaks.py | Ujjawalgupta42/Hacktoberfest2021-DSA | 225 | 12719443 | def miniPeaks(nums):
result = []
left = 0
right = 0
for i in range(1, len(nums) - 1):
left = nums[i - 1]
right = nums[i + 1]
if nums[i] > left and nums[i] > right:
result.append(nums[i])
return result
# Time Complexity : O(n)
# Space Complexity : O(m),
# n = nos of elements
# m = nos of peak elements
|
srcs/python/kungfu/tensorflow/v1/benchmarks/__main__.py | Pandinosaurus/KungFu | 291 | 12719461 | """
Usage:
kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method CPU
kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method NCCL
kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method NCCL+CPU
mpirun -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method HOROVOD
"""
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
from kungfu._utils import measure, one_based_range
from kungfu.python import _get_cuda_index
from kungfu.tensorflow.ops import (current_cluster_size, current_rank,
group_all_reduce, group_nccl_all_reduce)
from kungfu.tensorflow.ops.collective import group_hierarchical_nccl_all_reduce
from kungfu.tensorflow.v1.benchmarks import model_sizes
from kungfu.tensorflow.v1.helpers.utils import show_rate, show_size
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
def _tensor_size(t):
return t.shape.num_elements() * t.dtype.size
def hvd_init():
import horovod.tensorflow as hvd
hvd.init()
def hvd_group_all_reduce(ts):
import horovod.tensorflow as hvd
return [hvd.allreduce(t, average=False) for t in ts]
def get_cluster_size(method):
if method == 'HOROVOD':
import horovod.tensorflow as hvd
return hvd.size()
else:
return current_cluster_size()
def get_rank(method):
if method == 'HOROVOD':
import horovod.tensorflow as hvd
return hvd.rank()
else:
return current_rank()
_group_all_reduce_func = {
'CPU': group_all_reduce,
'NCCL': group_nccl_all_reduce,
'NCCL+CPU': group_hierarchical_nccl_all_reduce,
'HOROVOD': hvd_group_all_reduce,
}
_model_sizes = {
'ResNet50': model_sizes.resnet50_imagenet,
'VGG16': model_sizes.vgg16_imagenet,
'BERT': model_sizes.bert,
}
def _config(method):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if method == 'HOROVOD':
import horovod.tensorflow as hvd
config.gpu_options.visible_device_list = str(hvd.local_rank())
else:
config.gpu_options.visible_device_list = str(_get_cuda_index())
return config
def _rank(method):
if method == 'HOROVOD':
import horovod.tensorflow as hvd
return hvd.rank()
else:
return current_rank()
def parse_args():
p = argparse.ArgumentParser(description='Perf Benchmarks.')
p.add_argument('--model',
type=str,
default='ResNet50',
help='ResNet50 | VGG16 | BERT')
p.add_argument('--method',
type=str,
default='CPU',
help='CPU | NCCL | HOROVOD')
p.add_argument('--fuse', action='store_true', default=False, help='')
p.add_argument('--max-count', type=int, default=0, help='max grad count')
p.add_argument('--steps',
type=int,
default=10,
help='number of steps to run')
p.add_argument('--warmup-steps',
type=int,
default=5,
help='number of warmup steps')
return p.parse_args()
def log_detailed_result(value, error, attrs):
import json
attr_str = json.dumps(attrs, separators=(',', ':'))
# grep -o RESULT.* *.log
unit = 'GiB/s'
print('RESULT: %f +-%f (%s) %s' % (value, error, unit, attr_str))
def log_final_result(values, args):
attrs = {
'method': args.method,
'np': get_cluster_size(args.method),
'model': args.model,
'fuse': args.fuse,
}
values = np.array(values)
if args.method != 'HOROVOD':
attrs['strategy'] = os.getenv('KUNGFU_ALLREDUCE_STRATEGY')
attrs['nvlink'] = os.getenv('KUNGFU_ALLOW_NVLINK')
log_detailed_result(values.mean(), 1.96 * values.std(), attrs)
def all_reduce_benchmark(sizes, dtype, args):
rank = _rank(args.method)
def log(msg):
if rank == 0:
print(msg)
xs = [tf.Variable(tf.ones([n], dtype)) for n in sizes]
tot_size = sum(_tensor_size(x) for x in xs)
np = get_cluster_size(args.method)
multiplier = 4 * (np - 1)
log('all reduce %d tensors of total size: %s among %d peers, using %s' %
(len(sizes), show_size(tot_size), np, args.method))
ys = _group_all_reduce_func[args.method](xs)
init = tf.global_variables_initializer()
values = []
with tf.Session(config=_config(args.method)) as sess:
duration, _ = measure(lambda: sess.run(init))
log('tensorflow init took %.fs' % (duration))
for step in one_based_range(args.warmup_steps):
duration, _ = measure(lambda: sess.run(ys))
log('warmup step %d, took %.2fs, equivalent data rate: %s' %
(step, duration, show_rate(tot_size * multiplier, duration)))
for step in one_based_range(args.steps):
duration, _ = measure(lambda: sess.run(ys))
gi = 1024 * 1024 * 1024
values.append(tot_size * multiplier / gi / duration)
log('step %d, took %.2fs, equivalent data rate: %s' %
(step, duration, show_rate(tot_size * multiplier, duration)))
if get_rank(args.method) == 0:
log_final_result(values, args)
def main(_):
args = parse_args()
if args.method == 'HOROVOD':
hvd_init()
dtype = tf.float32
sizes = _model_sizes[args.model]
if args.fuse:
sizes = [sum(sizes)]
if args.max_count > 0 and len(sizes) > args.max_count:
sizes = sizes[:args.max_count]
all_reduce_benchmark(sizes, dtype, args)
if __name__ == "__main__":
main(sys.argv)
|
netmiko/a10/a10_ssh.py | mtuska/netmiko | 2,833 | 12719565 | """A10 support."""
from netmiko.cisco_base_connection import CiscoSSHConnection
class A10SSH(CiscoSSHConnection):
"""A10 support."""
def session_preparation(self) -> None:
"""A10 requires to be enable mode to disable paging."""
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
self.enable()
# terminal width ill not do anything without A10 specific command
# self.set_terminal_width()
self.disable_paging(command="terminal length 0")
def save_config(
self, cmd: str = "", confirm: bool = False, confirm_response: str = ""
) -> str:
"""Not Implemented"""
raise NotImplementedError
|
aliyun-python-sdk-domain/aliyunsdkdomain/__init__.py | silent-beaters/aliyun-openapi-python-sdk | 1,001 | 12719570 | __version__ = '3.14.6' |
atcoder/_math.py | SotaroHattori/ac-library-python | 108 | 12719625 | <filename>atcoder/_math.py
import typing
def _is_prime(n: int) -> bool:
'''
Reference:
<NAME> and <NAME>,
Fast Primality Testing for Integers That Fit into a Machine Word
'''
if n <= 1:
return False
if n == 2 or n == 7 or n == 61:
return True
if n % 2 == 0:
return False
d = n - 1
while d % 2 == 0:
d //= 2
for a in (2, 7, 61):
t = d
y = pow(a, t, n)
while t != n - 1 and y != 1 and y != n - 1:
y = y * y % n
t <<= 1
if y != n - 1 and t % 2 == 0:
return False
return True
def _inv_gcd(a: int, b: int) -> typing.Tuple[int, int]:
a %= b
if a == 0:
return (b, 0)
# Contracts:
# [1] s - m0 * a = 0 (mod b)
# [2] t - m1 * a = 0 (mod b)
# [3] s * |m1| + t * |m0| <= b
s = b
t = a
m0 = 0
m1 = 1
while t:
u = s // t
s -= t * u
m0 -= m1 * u # |m1 * u| <= |m1| * s <= b
# [3]:
# (s - t * u) * |m1| + t * |m0 - m1 * u|
# <= s * |m1| - t * u * |m1| + t * (|m0| + |m1| * u)
# = s * |m1| + t * |m0| <= b
s, t = t, s
m0, m1 = m1, m0
# by [3]: |m0| <= b/g
# by g != b: |m0| < b/g
if m0 < 0:
m0 += b // s
return (s, m0)
def _primitive_root(m: int) -> int:
if m == 2:
return 1
if m == 167772161:
return 3
if m == 469762049:
return 3
if m == 754974721:
return 11
if m == 998244353:
return 3
divs = [2] + [0] * 19
cnt = 1
x = (m - 1) // 2
while x % 2 == 0:
x //= 2
i = 3
while i * i <= x:
if x % i == 0:
divs[cnt] = i
cnt += 1
while x % i == 0:
x //= i
i += 2
if x > 1:
divs[cnt] = x
cnt += 1
g = 2
while True:
for i in range(cnt):
if pow(g, (m - 1) // divs[i], m) == 1:
break
else:
return g
g += 1
|
examples/run_random.py | Roryoung/realworldrl_suite | 284 | 12719652 | <filename>examples/run_random.py<gh_stars>100-1000
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a random policy on realworldrl."""
import os
from absl import app
from absl import flags
import numpy as np
import realworldrl_suite.environments as rwrl
flags.DEFINE_string('domain_name', 'cartpole', 'domain to solve')
flags.DEFINE_string('task_name', 'realworld_balance', 'task to solve')
flags.DEFINE_string('save_path', '/tmp/rwrl', 'where to save results')
flags.DEFINE_integer('total_episodes', 100, 'number of episodes')
FLAGS = flags.FLAGS
def random_policy(action_spec):
def _act(timestep):
del timestep
return np.random.uniform(
low=action_spec.minimum,
high=action_spec.maximum,
size=action_spec.shape)
return _act
def run():
"""Runs a random agent on a given environment."""
env = rwrl.load(
domain_name=FLAGS.domain_name,
task_name=FLAGS.task_name,
safety_spec=dict(enable=True),
delay_spec=dict(enable=True, actions=20),
log_output=os.path.join(FLAGS.save_path, 'log.npz'),
environment_kwargs=dict(
log_safety_vars=True, log_every=20, flat_observation=True))
policy = random_policy(action_spec=env.action_spec())
rewards = []
for _ in range(FLAGS.total_episodes):
timestep = env.reset()
total_reward = 0.
while not timestep.last():
action = policy(timestep)
timestep = env.step(action)
total_reward += timestep.reward
rewards.append(total_reward)
print('Random policy total reward per episode: {:.2f} +- {:.2f}'.format(
np.mean(rewards), np.std(rewards)))
def main(argv):
del argv # Unused.
run()
if __name__ == '__main__':
app.run(main)
|
bridge/npbackend/bohrium/reorganization.py | bh107/bohrium | 236 | 12719661 | <reponame>bh107/bohrium
"""
Reorganization of Array Elements Routines
===========================
"""
import warnings
import numpy_force as numpy
from . import bhary
from bohrium_api import _info
from ._util import is_scalar
from .bhary import fix_biclass_wrapper
from . import array_create
from . import array_manipulation
from . import ufuncs
from . import numpy_backport
@fix_biclass_wrapper
def gather(ary, indexes):
"""
gather(ary, indexes)
Gather elements from 'ary' selected by 'indexes'.
The values of 'indexes' are absolute indexed into a flatten 'ary'
The shape of the returned array equals indexes.shape.
Parameters
----------
ary : array_like
The array to gather elements from.
indexes : array_like, interpreted as integers
Array or list of indexes that will be gather from 'array'
Returns
-------
r : ndarray
The gathered array freshly-allocated.
"""
from . import _bh
ary = array_manipulation.flatten(array_create.array(ary))
# Convert a scalar index to a 1-element array
if is_scalar(indexes):
indexes = [indexes]
indexes = array_create.array(indexes, dtype=numpy.uint64, bohrium=True)
ret = array_create.empty(indexes.shape, dtype=ary.dtype, bohrium=True)
if ary.size == 0 or indexes.size == 0:
return array_create.array([])
_bh.ufunc(_info.op['gather']['id'], (ret, ary, indexes))
return ret
@fix_biclass_wrapper
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like, interpreted as integers
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
if not bhary.check(a):
indices = array_create.array(indices, bohrium=False)
return numpy.take(a, indices, axis=axis, out=out, mode=mode)
if mode != "raise":
warnings.warn("Bohrium only supports the 'raise' mode not '%s', "
"it will be handled by the original NumPy." % mode, UserWarning, 2)
a = array_create.array(a, bohrium=False)
indices = array_create.array(indices, bohrium=False)
return numpy.take(a, indices, axis=axis, out=out, mode=mode)
if axis is not None and a.ndim > 1:
warnings.warn("Bohrium does not support the 'axis' argument, "
"it will be handled by the original NumPy.", UserWarning, 2)
a = array_create.array(a, bohrium=False)
indices = array_create.array(indices, bohrium=False)
return numpy.take(a, indices, axis=axis, out=out, mode=mode)
ret = gather(a, indices)
if out is not None:
out[...] = ret
return out
else:
return ret
@fix_biclass_wrapper
def take_using_index_tuple(a, index_tuple, out=None):
"""
Take elements from the array 'a' specified by 'index_tuple'
This function is very similar to take(), but takes a tuple of index arrays rather than a single index array
Parameters
----------
a : array_like
The source array.
index_tuple : tuple of array_like, interpreted as integers
Each array in the tuple specified the indices of the values to extract for that axis.
The number of arrays in 'index_tuple' must equal the number of dimension in 'a'
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
"""
if not bhary.check(a):
ret = a[index_tuple]
if out is not None:
out[...] = ret
return out
else:
return ret
assert len(index_tuple) == a.ndim
if a.size == 0:
return array_create.array([], dtype=a.dtype)
if a.ndim == 1:
return take(a, index_tuple[0], out=out)
# Make sure that all index arrays are uint64 bohrium arrays
index_list = []
for index in index_tuple:
index_list.append(array_create.array(index, dtype=numpy.uint64, bohrium=True))
if index_list[-1].size == 0:
return array_create.empty((0,), dtype=a.dtype)
# And then broadcast them into the same shape
index_list = array_manipulation.broadcast_arrays(*index_list)[0]
# Let's find the absolute index
abs_index = index_list[-1].copy()
stride = a.shape[-1]
for i in range(len(index_list) - 2, -1, -1): # Iterate backwards from index_list[-2]
abs_index += index_list[i] * stride
stride *= a.shape[i]
# take() support absolute indices
ret = take(a, abs_index).reshape(index_list[0].shape)
if out is not None:
out[...] = ret
return out
else:
return ret
@fix_biclass_wrapper
def scatter(ary, indexes, values):
"""
scatter(ary, indexes, values)
Scatter 'values' into 'ary' selected by 'indexes'.
The values of 'indexes' are absolute indexed into a flatten 'ary'
The shape of 'indexes' and 'value' must be equal.
Parameters
----------
ary : array_like
The target array to write the values to.
indexes : array_like, interpreted as integers
Array or list of indexes that will be written to in 'ary'
values : array_like
Values to write into 'ary"
"""
from . import _bh
indexes = array_manipulation.flatten(array_create.array(indexes, dtype=numpy.uint64), always_copy=False)
values = array_manipulation.flatten(array_create.array(values, dtype=ary.dtype), always_copy=False)
assert indexes.shape == values.shape
if ary.size == 0 or indexes.size == 0:
return
# In order to ensure a contiguous array, we do the scatter on a flatten copy
flat = array_manipulation.flatten(ary, always_copy=True)
_bh.ufunc(_info.op['scatter']['id'], (flat, values, indexes))
ary[...] = flat.reshape(ary.shape)
@fix_biclass_wrapper
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place, take
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
if ind.size == 0:
return # Nothing to insert!
if not bhary.check(a):
return numpy.put(a, ind.astype(numpy.int64), v, mode=mode)
if mode != "raise":
warnings.warn("Bohrium only supports the 'raise' mode not '%s', "
"it will be handled by the original NumPy." % mode, UserWarning, 2)
return numpy.put(a, ind, v, mode=mode)
indexes = array_manipulation.flatten(array_create.array(ind, dtype=numpy.uint64), always_copy=False)
values = array_manipulation.flatten(array_create.array(v, dtype=a.dtype), always_copy=False)
# Now let's make the shape of 'indexes' and 'values' match
if indexes.size > values.size:
if values.size == 1:
# When 'values' is a scalar, we can broadcast it to match 'indexes'
values = numpy_backport.as_strided(values, shape=indexes.shape, strides=(0,))
else: # else we repeat 'values' enough times to be larger than 'indexes'
values = numpy_backport.as_strided(values,
shape=(indexes.size // values.size + 2, values.size),
strides=(0, values.itemsize))
values = array_manipulation.flatten(values, always_copy=False)
# When 'values' is too large, we simple cut the end off
if values.size > indexes.size:
values = values[0:indexes.size]
# Now that 'indexes' and 'values' have the same shape, we can call 'scatter'
scatter(a, indexes, values)
@fix_biclass_wrapper
def put_using_index_tuple(a, index_tuple, v):
"""
Replaces specified elements of an array with given values.
This function is very similar to put(), but takes a tuple of index arrays rather than a single index array.
The indexing works like fancy indexing:
::
a[index_tuple] = v
Parameters
----------
a : array_like
The source array.
index_tuple : tuple of array_like, interpreted as integers
Each array in the tuple specified the indices of the values to extract for that axis.
The number of arrays in 'index_tuple' must equal the number of dimension in 'a'
v : array_like
Values to place in `a`.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
"""
if not bhary.check(a):
a[index_tuple] = array_create.array(v, bohrium=False)
return
v = array_create.array(v, bohrium=True)
assert len(index_tuple) == a.ndim
if a.size == 0:
return
if a.ndim == 1:
return put(a, index_tuple[0], v)
# Make sure that all index arrays are uint64 bohrium arrays
index_list = []
for index in index_tuple:
index_list.append(array_create.array(index, dtype=numpy.uint64, bohrium=True))
if index_list[-1].size == 0:
return array_create.empty((0,), dtype=a.dtype)
# And then broadcast them into the same shape
index_list = array_manipulation.broadcast_arrays(*index_list)[0]
# Let's find the absolute index
abs_index = index_list[-1].copy()
stride = a.shape[-1]
for i in range(len(index_list) - 2, -1, -1): # Iterate backwards from index_list[-2]
abs_index += index_list[i] * stride
stride *= a.shape[i]
# put() support absolute indices
put(a, abs_index, v)
@fix_biclass_wrapper
def cond_scatter(ary, indexes, values, mask):
"""
scatter(ary, indexes, values, mask)
Scatter 'values' into 'ary' selected by 'indexes' where 'mask' is true.
The values of 'indexes' are absolute indexed into a flatten 'ary'
The shape of 'indexes', 'value', and 'mask' must be equal.
Parameters
----------
ary : array_like
The target array to write the values to.
indexes : array_like, interpreted as integers
Array or list of indexes that will be written to in 'ary'
values : array_like
Values to write into 'ary'
mask : array_like, interpreted as booleans
A mask that specifies which indexes and values to include and exclude
"""
from . import _bh
indexes = array_manipulation.flatten(array_create.array(indexes, dtype=numpy.uint64), always_copy=False)
values = array_manipulation.flatten(array_create.array(values, dtype=ary.dtype), always_copy=False)
mask = array_manipulation.flatten(array_create.array(mask, dtype=numpy.bool), always_copy=False)
assert (indexes.shape == values.shape and values.shape == mask.shape)
if ary.size == 0 or indexes.size == 0:
return
# In order to ensure a contiguous array, we do the scatter on a flatten copy
flat = array_manipulation.flatten(ary, always_copy=True)
_bh.ufunc(_info.op['cond_scatter']['id'], (flat, values, indexes, mask))
ary[...] = flat.reshape(ary.shape)
@fix_biclass_wrapper
def pack(ary, mask):
"""
pack(ary, mask)
Packing the elements of 'ary' specified by 'mask' into new array that are contiguous
The values of 'indexes' are absolute indexed into a flatten 'ary'
The shape of 'mask' and 'ary' must be equal.
Parameters
----------
ary : array_like, read flatten
The array to read from.
mask : array_like, interpreted as a flatten boolean array
A mask that specifies which indexes of 'ary' to read
"""
ary = array_manipulation.flatten(array_create.array(ary), always_copy=False)
mask = array_manipulation.flatten(array_create.array(mask, dtype=numpy.bool), always_copy=False)
assert (ary.shape == mask.shape)
if ary.size == 0 or mask.size == 0:
return
true_indexes = ufuncs.add.accumulate(mask)
true_count = int(true_indexes[-1])
if true_count == 0:
return array_create.empty((0,), dtype=ary.dtype)
else:
ret = array_create.empty((true_count,), dtype=ary.dtype)
cond_scatter(ret, true_indexes - 1, ary, mask)
return ret
@fix_biclass_wrapper
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
if a.dtype is not numpy.bool:
a = a != 0
new_indexes = array_create.arange(a.size, dtype=numpy.uint64)
return pack(new_indexes, a)
@fix_biclass_wrapper
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
if a.ndim == 1:
return (flatnonzero(a),)
if not a.flags['C_CONTIGUOUS']:
a = a.copy(order='C')
nz = flatnonzero(a)
ret = []
for stride_in_bytes in a.strides:
stride = stride_in_bytes // a.itemsize
assert stride_in_bytes % a.itemsize == 0
tmp = nz // stride
ret.append(tmp)
nz -= tmp * stride
return tuple(ret)
|
qt__pyqt__pyside__pyqode/move_window_on_center__QDesktopWidget.py | gil9red/SimplePyScripts | 117 | 12719681 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QApplication, QWidget
app = QApplication([])
mw = QWidget()
rect = mw.frameGeometry()
center = app.desktop().availableGeometry().center() # This is where QDesktopWidget is used
rect.moveCenter(center)
mw.move(rect.topLeft())
mw.show()
app.exec()
|
sparse_operation_kit/documents/tutorials/utility.py | x-y-z/HugeCTR | 130 | 12719682 | <reponame>x-y-z/HugeCTR
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../../unit_test/test_scripts/tf2/")))
from utils import *
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"DenseDemo/")))
from models import SOKDenseDemo
def TFDataset(filename, batchsize, as_sparse_tensor, repeat=1):
samples, labels = restore_from_file(filename)
dataset = tf_dataset(keys=samples, labels=labels,
batchsize=batchsize,
to_sparse_tensor=as_sparse_tensor,
repeat=repeat)
del samples
del labels
return dataset
def get_dataset(global_batch_size,
read_batchsize,
iter_num=10,
vocabulary_size=1024,
slot_num=10,
max_nnz=5,
use_sparse_mask=False,
repeat=1):
random_samples, ramdom_labels = generate_random_samples(
num_of_samples=global_batch_size * iter_num,
vocabulary_size=vocabulary_size,
slot_num=slot_num,
max_nnz=max_nnz,
use_sparse_mask=use_sparse_mask)
dataset = tf_dataset(keys=random_samples,
labels=ramdom_labels,
batchsize=read_batchsize,
to_sparse_tensor=use_sparse_mask,
repeat=repeat)
return dataset |
tests/basics/fun_name.py | peterson79/pycom-micropython-sigfox | 692 | 12719684 | def Fun():
pass
class A:
def __init__(self):
pass
def Fun(self):
pass
try:
print(Fun.__name__)
print(A.__init__.__name__)
print(A.Fun.__name__)
print(A().Fun.__name__)
except AttributeError:
print('SKIP')
|
neuralnilm/monitor/monitor.py | ceavelasquezpi/neuralnilm | 135 | 12719767 | from __future__ import print_function, division
from time import sleep
import pymongo
from monary import Monary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neuralnilm.consts import DATA_FOLD_NAMES
from neuralnilm.utils import get_colors
from neuralnilm.config import config
class Monitor(object):
def __init__(self, experiment_id, output_path='.',
update_period=1, max_num_lines=1000,
mongo_db='neuralnilm',
mongo_host=None):
"""
Parameters
----------
max_num_lines : int
Number of pixels.
"""
self.experiment_id = experiment_id
self.output_path = output_path
self.update_period = update_period
self.max_num_lines = max_num_lines
self._last_iteration_processed = {'train': 0, 'validation': 0}
if mongo_host is None:
self.mongo_host = config.get("MongoDB", "address")
else:
self.mongo_host = mongo_host
self.mongo_client = pymongo.MongoClient(self.mongo_host)
self.db = self.mongo_client[mongo_db]
self.mongo_db = mongo_db
self._validation_metric_names = None
def start(self):
while True:
if self._new_scores_available('train'):
self._plot_train_scores()
if self._new_scores_available('validation'):
self._plot_validation_scores()
sleep(self.update_period)
def _new_costs_available(self, train_or_validation):
"""Returns True if new training costs are available from DB.
Parameters
----------
train_or_validation : str, {'train', 'validation'}
"""
collection = self.db[train_or_validation + '_scores']
document = collection.find_one(
filter={
'experiment_id': self.experiment_id,
'iteration': {
'$gt': self._last_iteration_processed[train_or_validation]}
}
)
return bool(document)
def _get_validation_mse(self):
monary = Monary(host=self.mongo_host)
def get_mse_for_fold(fold):
iterations, loss, source_id = monary.query(
db=self.mongo_db,
coll='validation_scores',
query={'experiment_id': self.experiment_id, 'fold': fold},
fields=['iteration', 'scores.regression.mean_squared_error',
'source_id'],
types=['int32', 'float32', 'int8']
)
scores_df = pd.DataFrame(
{'loss': loss, 'source_id': source_id}, index=iterations)
scores_df = scores_df.sort_index()
return scores_df
FOLDS = ['unseen_appliances', 'unseen_activations_of_seen_appliances']
scores = {}
for fold in FOLDS:
scores[fold] = get_mse_for_fold(fold)
return scores
def _get_train_costs(self):
# Get train scores
monary = Monary(host=self.mongo_host)
iterations, loss, source_id = monary.query(
db=self.mongo_db,
coll='train_scores',
query={'experiment_id': self.experiment_id},
fields=['iteration', 'loss', 'source_id'],
types=['int32', 'float32', 'int8']
)
scores_df = pd.DataFrame(
{'loss': loss, 'source_id': source_id}, index=iterations)
scores_df = scores_df.sort_index()
return scores_df
def _plot_train_scores(self):
train_scores_df = self._get_train_costs()
all_scores = self._get_validation_mse()
all_scores.update({'train': train_scores_df})
fig, ax = plt.subplots(1)
source_names = self.source_names
for fold, scores_df in all_scores.iteritems():
sources = scores_df['source_id'].unique()
for source_i in sources:
# Get losses for just this source
mask = scores_df['source_id'] == source_i
loss = scores_df[mask]['loss']
# Downsample if necessary
loss_for_source = self._downsample(loss)
# Plot
ax.plot(loss_for_source.index, loss_for_source.values,
label='{} : {}'.format(fold, source_names[source_i]))
ax.legend()
plt.title('Training costs')
ax.set_xlabel('Iteration')
ax.set_ylabel('Mean squared error')
plt.show()
try:
self._last_iteration_processed['train'] = train_scores_df.index[-1]
except IndexError:
# No data loaded
pass
@property
def validation_metric_names(self):
"""
Returns
-------
metric_names : list
e.g. ['regression.mean_squared_error',
'classification_2_state.f1_score']
"""
if self._validation_metric_names is None:
scores = self.db.validation_scores.find_one(
filter={'experiment_id': self.experiment_id})['scores']
self._validation_metric_names = []
for metric_type, metrics in scores.iteritems():
for metric_name in metrics:
self._validation_metric_names.append(
metric_type + '.' + metric_name)
return self._validation_metric_names
@property
def source_names(self):
"""
Returns
-------
source_names : dict
"""
metadata = self.db.experiments.find_one({'_id': self.experiment_id})
sources = metadata['data']['pipeline']['sources']
source_names = {int(i): sources[i]['name'] for i in sources}
return source_names
def _plot_validation_scores(self):
validation_sources = self.db.validation_scores.distinct(
key='source_id', filter={'experiment_id': self.experiment_id})
validation_sources.sort()
num_cols = len(validation_sources)
fig, axes = plt.subplots(
nrows=3, ncols=num_cols, sharex="col", sharey=True,
squeeze=False)
fig.patch.set_facecolor('white')
source_names = self.source_names
for col, source_id in enumerate(validation_sources):
for row, fold in enumerate(DATA_FOLD_NAMES):
ax = axes[row, col]
self._plot_validation_scores_for_source_and_fold(
ax=ax, source_id=source_id, fold=fold,
show_axes_labels=(row == 0),
show_scales=(col == num_cols-1))
if row == 0:
ax.set_title(source_names[source_id], position=(.5, 1.05))
elif row == 2:
ax.set_xlabel('Iteration', labelpad=10)
if col == 0:
ax.set_ylabel(fold.replace("_", " ").title(), labelpad=10)
ax.patch.set_facecolor((0.95, 0.95, 0.95))
plt.subplots_adjust(
top=0.91, bottom=0.05, left=0.03, right=0.7,
hspace=0.15, wspace=0.1)
plt.show()
def _plot_validation_scores_for_source_and_fold(self, ax, source_id, fold,
show_axes_labels,
show_scales):
fields = ['iteration'] + ['scores.' + metric_name for metric_name in
self.validation_metric_names]
monary = Monary(host=self.mongo_host)
result = monary.query(
db=self.mongo_db,
coll='validation_scores',
query={
'experiment_id': self.experiment_id,
'source_id': source_id,
'fold': fold
},
fields=fields,
types=['int32'] + ['float32'] * len(self.validation_metric_names)
)
index = result[0]
data = {metric_name: result[i+1] for i, metric_name in
enumerate(self.validation_metric_names)}
df = pd.DataFrame(data, index=index)
df = df.sort_index()
df = self._downsample(df)
# Create multiple independent axes. Adapted from <NAME>'s answer:
# http://stackoverflow.com/a/7734614
# Colours
n = len(self.validation_metric_names)
colors = get_colors(n)
# Twin the x-axis to make independent y-axes.
axes = [ax]
for metric_name in self.validation_metric_names[1:]:
axes.append(ax.twinx())
SEP = 0.2
if show_scales:
for i, axis in enumerate(axes):
axis.yaxis.tick_right()
if i != 0:
# To make the border of the right-most axis visible,
# we need to turn the frame on. This hides the other plots,
# however, so we need to turn its fill off.
axis.set_frame_on(True)
axis.patch.set_visible(False)
# Move the last y-axes spines over to the right.
axis.spines['right'].set_position(
('axes', 1 + (SEP * i)))
else:
for axis in axes:
axis.tick_params(labelright=False, labelleft=False)
axis.yaxis.set_ticks_position('none')
axis.spines['right'].set_visible(False)
for axis in axes:
for spine in ['top', 'left', 'bottom']:
axis.spines[spine].set_visible(False)
axis.xaxis.set_ticks_position('none')
lines = []
for i, (axis, metric_name, color) in enumerate(
zip(axes, self.validation_metric_names, colors)):
axis.tick_params(axis='y', colors=color, direction='out')
label = metric_name.replace("regression.", "")
label = label.replace("classification_", "")
label = label.replace("_", " ")
label = label.replace(".", " ")
label = label.replace(" ", "\n")
line, = axis.plot(
df.index, df[metric_name].values, color=color, label=label)
if show_axes_labels and show_scales:
axis.set_ylabel(
label, color=color, rotation=0, fontsize=8, va='bottom')
if i == 0:
coords = (1.05, 1.1)
else:
coords = (1.05 + (SEP * i), 1.1)
axis.yaxis.set_label_coords(*coords)
lines.append(line)
self._last_iteration_processed['validation'] = index[-1]
return lines
def _downsample(self, data):
"""Downsample `data` if necessary."""
if len(data) > self.max_num_lines:
divisor = int(np.ceil(len(data) / self.max_num_lines))
data = data.groupby(lambda x: x // divisor).mean()
data.index *= divisor
return data
|
scripts/visualize.py | nexxt-intelligence/simalign | 198 | 12719773 | <filename>scripts/visualize.py
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Text, Tuple
def line2matrix(line: Text, n: int, m: int) -> Tuple[np.ndarray, np.ndarray]:
'''
converts alignemnt given in the format "0-1 3p4 5-6" to alignment matrices
n, m: maximum length of the involved sentences (i.e., dimensions of the alignemnt matrices)
'''
def convert(i, j):
i, j = int(i), int(j)
if i >= n or j >= m:
raise ValueError("Error in Gold Standard?")
return i, j
possibles = np.zeros((n, m))
sures = np.zeros((n, m))
for elem in line.split(" "):
if "p" in elem:
i, j = convert(*elem.split("p"))
possibles[i, j] = 1
elif "-" in elem:
i, j = convert(*elem.split("-"))
possibles[i, j] = 1
sures[i, j] = 1
return sures, possibles
def plot_alignments(e: List[Text],
f: List[Text],
sures: np.ndarray,
possibles: np.ndarray,
alignment1: np.ndarray,
alignment2: np.ndarray = None,
title: Text = None,
filename: Text = None):
shorter = min(len(e), len(f))
scalefactor = min((4 / shorter), 1)
groundtruth = 0.75 * sures + 0.4 * possibles
fig, ax = plt.subplots()
im = ax.imshow(groundtruth, cmap="Greens", vmin=0, vmax=1.5)
# show all ticks...
ax.set_xticks(np.arange(len(f)))
ax.set_yticks(np.arange(len(e)))
# ... and label them
ax.set_xticklabels(f, fontsize=25 * scalefactor)
ax.set_yticklabels(e, fontsize=25 * scalefactor)
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=30, ha="left",
rotation_mode="default")
plt.setp(ax.get_yticklabels(), rotation=0, ha="right",
rotation_mode="anchor")
ax.set_xticks(np.arange(groundtruth.shape[1] + 1) - .5, minor=True)
ax.set_yticks(np.arange(groundtruth.shape[0] + 1) - .5, minor=True)
# set grid
ax.grid(which="minor", color="black", linestyle='-', linewidth=1)
ax.tick_params(which="minor", bottom=False, left=False)
# Loop over data dimensions and create text annotations.
circle = dict(boxstyle="circle,pad=0.3", fc=(0, 0, 0, 0.0), ec="black", lw=3)
roundthing = dict(boxstyle="square,pad=0.3", fc="black", ec=(0, 0, 0, 0.0), lw=2)
# plot alignments
for i in range(len(e)):
for j in range(len(f)):
if alignment1[i, j] > 0:
t = ax.text(j, i, "x", ha="center", va="center",
size=25 * scalefactor,
bbox=circle, color=(0, 0, 0, 0.0))
if alignment2 is not None and alignment2[i, j] > 0:
t = ax.text(j, i, "x", ha="center", va="center",
size=12 * scalefactor,
bbox=roundthing, color=(0, 0, 0, 0.0))
if title:
ax.set_title(title)
fig.tight_layout()
if filename:
plt.savefig(filename)
else:
plt.show()
if __name__ == '__main__':
line2matrix("0-0 1p1 2-1", 3, 2)
plot_alignments(["Testing", "this", "."],
["Hier", "wird", "getestet", "."],
np.array([[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]]),
np.array([[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]]),
np.array([[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]]),
np.array([[0, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]]),
"Example")
|
pubnub/endpoints/objects_v2/uuid/get_uuid.py | natekspencer/pubnub-python | 146 | 12719794 | from pubnub.endpoints.objects_v2.objects_endpoint import ObjectsEndpoint, \
IncludeCustomEndpoint, UuidEndpoint
from pubnub.enums import PNOperationType
from pubnub.enums import HttpMethod
from pubnub.models.consumer.objects_v2.uuid import PNGetUUIDMetadataResult
class GetUuid(ObjectsEndpoint, UuidEndpoint, IncludeCustomEndpoint):
GET_UID_PATH = "/v2/objects/%s/uuids/%s"
def __init__(self, pubnub):
ObjectsEndpoint.__init__(self, pubnub)
UuidEndpoint.__init__(self)
IncludeCustomEndpoint.__init__(self)
def build_path(self):
return GetUuid.GET_UID_PATH % (self.pubnub.config.subscribe_key, self._effective_uuid())
def validate_specific_params(self):
self._validate_uuid()
def create_response(self, envelope):
return PNGetUUIDMetadataResult(envelope)
def operation_type(self):
return PNOperationType.PNGetUuidMetadataOperation
def name(self):
return "Get UUID"
def http_method(self):
return HttpMethod.GET
|
hlib/tests/test_math.py | pasqoc/heterocl | 236 | 12719799 | <reponame>pasqoc/heterocl
import heterocl as hcl
import heterocl.tvm as tvm
import numpy as np
import numpy.testing as tst
import hlib
dtype = hcl.Float(64)
_sum = hcl.reducer(0, lambda x, y: x + y, dtype)
_max = hcl.reducer(-100000, lambda x, y: tvm.make.Max(x, y), dtype)
_min = hcl.reducer(100000, lambda x, y: tvm.make.Min(x, y), dtype)
_prod = hcl.reducer(1, lambda x, y: x * y, dtype)
def test_exp():
def _test(in_shape):
hcl.init(hcl.Float())
data = hcl.placeholder(in_shape)
def math_func(data):
return hlib.op.math.exp(data)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 10 * np.random.random(in_shape) - 5
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
real_out = np.exp(_in)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out, 4)
_test((1, 3))
_test((3, 3, 3))
_test((5, 5, 3, 2))
def test_log():
def _test(in_shape):
hcl.init(hcl.Float())
data = hcl.placeholder(in_shape)
def math_func(data):
return hlib.op.math.log(data)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 10 * np.random.random(in_shape) + 1
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
real_out = np.log(_in)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out, 5)
_test((1, 3))
_test((3, 3, 3))
_test((5, 5, 3, 2))
def test_sigmoid():
def _test(in_shape):
data = hcl.placeholder(in_shape)
def math_func(data):
return hlib.op.math.sigmoid(data)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 10 * np.random.random(in_shape) - 5
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
def sigmoid(data):
return 1 / (1 + np.exp(-data))
real_out = sigmoid(_in)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out, 5)
_test((1, 3))
_test((3, 3, 3))
_test((5, 5, 3, 2))
def test_sqrt():
def _test(in_shape):
data = hcl.placeholder(in_shape)
def math_func(data):
return hlib.op.math.sqrt(data)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 100 * np.random.random(in_shape) + 1
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
real_out = np.sqrt(_in)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out, 5)
_test((1, 3))
_test((3, 3, 3))
_test((5, 5, 3, 2))
def tanh_test():
def _test(in_shape):
hcl.init(hcl.Float())
data = hcl.placeholder(in_shape)
def math_func(data):
return hlib.op.math.tanh(data)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 100 * np.random.random(in_shape) - 50
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
real_out = np.tanh(_in)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out, 5)
_test((1, 3))
_test((3, 3, 3))
_test((5, 5, 3, 2))
def test_clip():
def _test(in_shape, x_min, x_max):
hcl.init(hcl.Float())
data = hcl.placeholder(in_shape)
def math_func(data, x_min=x_min, x_max=x_max):
return hlib.op.math.clip(data, a_min=x_min, a_max=x_max)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = 10 * np.random.random(in_shape) - 5
out = hcl.asarray(np.zeros(in_shape).astype('float32'))
real_out = np.clip(_in, x_min, x_max)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out)
_test((1, 3), 0, 4)
_test((1, 3, 3), -4, 4)
_test((1, 3), 0, 4)
_test((3, 3), 0, 0.01)
def test_sum():
def _test(in_shape, axis=None, keepdims=False):
hcl.init()
new_shape = []
if axis is None:
for i in range(len(in_shape)):
new_shape.append(1)
else:
if isinstance(axis, int):
if axis < 0:
axis = len(in_shape) + axis
axis = [axis]
for i in range(len(in_shape)):
if i in axis:
new_shape.append(1)
else:
new_shape.append(in_shape[i])
data = hcl.placeholder(in_shape)
def math_func(data, axis=axis, keepdims=keepdims):
return hlib.op.math.sum(data, axis, keepdims)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = np.random.randint(10, size=in_shape)
if keepdims:
out = hcl.asarray(np.zeros(new_shape))
else:
out = hcl.asarray(np.squeeze(np.zeros(new_shape)))
f(hcl.asarray(_in), out)
real_out = np.sum(_in, axis=axis, keepdims=keepdims)
tst.assert_almost_equal(real_out, out.asnumpy())
_test((3, 3), axis=(0,))
_test((3, 3), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(0,))
_test((2, 2, 2), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(1,))
_test((2, 2, 2), axis=(1,), keepdims=True)
_test((2, 2, 2), axis=(2,))
_test((2, 2, 2), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(0,))
_test((2, 2, 2, 3), axis=(0,), keepdims=True)
_test((2, 2, 2, 3), axis=(1,))
_test((2, 2, 2, 3), axis=(1,), keepdims=True)
_test((2, 2, 2, 3), axis=(2,))
_test((2, 2, 2, 3), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(3,))
_test((2, 2, 2, 3), axis=(3,), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 1))
_test((2, 2, 2, 3), axis=(0, 1), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 2))
_test((2, 2, 2, 3), axis=(0, 2), keepdims=True)
_test((5, 2, 4, 3), axis=(3,))
_test((5, 2, 4, 3), axis=(3,), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 1))
_test((5, 2, 4, 3), axis=(0, 1), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 2))
_test((5, 2, 4, 3), axis=(0, 2), keepdims=True)
def test_max():
def _test(in_shape, axis=None, keepdims=True):
hcl.init()
new_shape = []
if axis is None:
for i in range(len(in_shape)):
new_shape.append(1)
else:
if isinstance(axis, int):
if axis < 0:
axis = len(in_shape) + axis
axis = [axis]
for i in range(len(in_shape)):
if i in axis:
new_shape.append(1)
else:
new_shape.append(in_shape[i])
data = hcl.placeholder(in_shape)
def math_func(data, axis=axis, keepdims=keepdims):
return hlib.op.math.max(data, axis, keepdims)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = np.random.randint(10, size=in_shape)
if keepdims:
out = hcl.asarray(np.zeros(new_shape))
else:
out = hcl.asarray(np.squeeze(np.zeros(new_shape)))
real_out = np.amax(_in, tuple(axis), keepdims=keepdims)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out)
_test((3, 3), axis=(0,))
_test((3, 3), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(0,))
_test((2, 2, 2), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(1,))
_test((2, 2, 2), axis=(1,), keepdims=True)
_test((2, 2, 2), axis=(2,))
_test((2, 2, 2), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(0,))
_test((2, 2, 2, 3), axis=(0,), keepdims=True)
_test((2, 2, 2, 3), axis=(1,))
_test((2, 2, 2, 3), axis=(1,), keepdims=True)
_test((2, 2, 2, 3), axis=(2,))
_test((2, 2, 2, 3), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(3,))
_test((2, 2, 2, 3), axis=(3,), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 1))
_test((2, 2, 2, 3), axis=(0, 1), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 2))
_test((2, 2, 2, 3), axis=(0, 2), keepdims=True)
_test((5, 2, 4, 3), axis=(3,))
_test((5, 2, 4, 3), axis=(3,), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 1))
_test((5, 2, 4, 3), axis=(0, 1), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 2))
_test((5, 2, 4, 3), axis=(0, 2), keepdims=True)
def test_prod():
def _test(in_shape, axis=None, keepdims=True):
hcl.init(hcl.Float())
new_shape = []
if axis is None:
for i in range(len(in_shape)):
new_shape.append(1)
else:
if isinstance(axis, int):
if axis < 0:
axis = len(in_shape) + axis
axis = [axis]
for i in range(len(in_shape)):
if i in axis:
new_shape.append(1)
else:
new_shape.append(in_shape[i])
data = hcl.placeholder(in_shape)
def math_func(data, axis=axis, keepdims=keepdims):
return hlib.op.math.prod(data, axis, keepdims)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = np.random.random(size=in_shape)
if keepdims:
out = hcl.asarray(np.zeros(new_shape))
else:
out = hcl.asarray(np.squeeze(np.zeros(new_shape)))
real_out = np.prod(_in, tuple(axis), keepdims=keepdims)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out)
_test((3, 3), axis=(0,))
_test((3, 3), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(0,))
_test((2, 2, 2), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(1,))
_test((2, 2, 2), axis=(1,), keepdims=True)
_test((2, 2, 2), axis=(2,))
_test((2, 2, 2), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(0,))
_test((2, 2, 2, 3), axis=(0,), keepdims=True)
_test((2, 2, 2, 3), axis=(1,))
_test((2, 2, 2, 3), axis=(1,), keepdims=True)
_test((2, 2, 2, 3), axis=(2,))
_test((2, 2, 2, 3), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(3,))
_test((2, 2, 2, 3), axis=(3,), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 1))
_test((2, 2, 2, 3), axis=(0, 1), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 2))
_test((2, 2, 2, 3), axis=(0, 2), keepdims=True)
_test((5, 2, 4, 3), axis=(3,))
_test((5, 2, 4, 3), axis=(3,), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 1))
_test((5, 2, 4, 3), axis=(0, 1), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 2))
_test((5, 2, 4, 3), axis=(0, 2), keepdims=True)
def test_min():
def _test(in_shape, axis=None, keepdims=True):
hcl.init()
new_shape = []
if axis is None:
for i in range(len(in_shape)):
new_shape.append(1)
else:
if isinstance(axis, int):
if axis < 0:
axis = len(in_shape) + axis
axis = [axis]
for i in range(len(in_shape)):
if i in axis:
new_shape.append(1)
else:
new_shape.append(in_shape[i])
data = hcl.placeholder(in_shape)
def math_func(data, axis=axis, keepdims=keepdims):
return hlib.op.math.min(data, axis, keepdims)
s = hcl.create_schedule(data, math_func)
f = hcl.build(s)
_in = np.random.randint(10, size=in_shape)
if keepdims:
out = hcl.asarray(np.zeros(new_shape))
else:
out = hcl.asarray(np.squeeze(np.zeros(new_shape)))
real_out = np.amin(_in, tuple(axis), keepdims=keepdims)
f(hcl.asarray(_in), out)
tst.assert_almost_equal(out.asnumpy(), real_out)
_test((3, 3), axis=(0,))
_test((3, 3), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(0,))
_test((2, 2, 2), axis=(0,), keepdims=True)
_test((2, 2, 2), axis=(1,))
_test((2, 2, 2), axis=(1,), keepdims=True)
_test((2, 2, 2), axis=(2,))
_test((2, 2, 2), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(0,))
_test((2, 2, 2, 3), axis=(0,), keepdims=True)
_test((2, 2, 2, 3), axis=(1,))
_test((2, 2, 2, 3), axis=(1,), keepdims=True)
_test((2, 2, 2, 3), axis=(2,))
_test((2, 2, 2, 3), axis=(2,), keepdims=True)
_test((2, 2, 2, 3), axis=(3,))
_test((2, 2, 2, 3), axis=(3,), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 1))
_test((2, 2, 2, 3), axis=(0, 1), keepdims=True)
_test((2, 2, 2, 3), axis=(0, 2))
_test((2, 2, 2, 3), axis=(0, 2), keepdims=True)
_test((5, 2, 4, 3), axis=(3,))
_test((5, 2, 4, 3), axis=(3,), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 1))
_test((5, 2, 4, 3), axis=(0, 1), keepdims=True)
_test((5, 2, 4, 3), axis=(0, 2))
_test((5, 2, 4, 3), axis=(0, 2), keepdims=True)
|
viewer_examples/1_collection_viewer.py | ritamonteiroo/scikit | 453 | 12719814 | <gh_stars>100-1000
from skimage import data
from skimage.viewer import CollectionViewer
from skimage.transform import pyramid_gaussian
img = data.lena()
img_collection = tuple(pyramid_gaussian(img))
view = CollectionViewer(img_collection)
view.show()
|
app/tests/plugins/another_test_plugin/__init__.py | golem4300/quattran | 183 | 12719839 | from app.tests import test_kernel
@test_kernel.container.register('another_test_plugin', tags=['plugin'])
class AnotherTestPlugin(object):
pass
|
apps/molecular_generation/SD_VAE/train_zinc.py | agave233/PaddleHelix | 454 | 12719886 | <filename>apps/molecular_generation/SD_VAE/train_zinc.py
#!/usr/bin/python3
#-*-coding:utf-8-*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
train zinc
"""
from __future__ import print_function
from past.builtins import range
import os
import os.path
import sys
import numpy as np
import math
import random
from paddle.io import Dataset
import paddle
import paddle.nn.functional as F
import paddle.nn as nn
sys.path.append('./mol_common')
from cmd_args import cmd_args
from pahelix.model_zoo.sd_vae_model import MolVAE
import h5py
import json
def load_zinc_SD_data():
"""
tbd
"""
h5f = h5py.File(cmd_args.smiles_file, 'r')
all_true_binary = h5f['x'][:]
all_rule_masks = h5f['masks'][:]
h5f.close()
return all_true_binary, all_rule_masks
class CreateDataset(Dataset):
"""
tbd
"""
def __init__(self, true_binary, rule_masks):
self.data_binary = true_binary
self.data_masks = rule_masks
def __len__(self):
"""
Computes a number of objects in the dataset
"""
return self.data_binary.shape[0]
def __getitem__(self, index):
true_binary = self.data_binary[index, :, :].astype(np.float32)
rule_masks = self.data_masks[index, :, :].astype(np.float32)
x_inputs = np.transpose(true_binary, [1, 0])
true_binary = paddle.to_tensor(true_binary)
rule_masks = paddle.to_tensor(rule_masks)
x_inputs = paddle.to_tensor(x_inputs)
return x_inputs, true_binary, rule_masks
def _train_epoch(model, data_loader, epoch, kl_weight, optimizer=None):
"""
tbd
"""
if optimizer is None:
model.eval()
else:
model.train()
kl_loss_values = []
perplexity_loss_values = []
loss_values = []
for batch_id, data in enumerate(data_loader()):
# read batch data
x_inputs_batch, true_binary_batch, rule_masks_batch = data
# transpose the axes of data
true_binary_batch = paddle.transpose(true_binary_batch, (1, 0, 2))
rule_masks_batch = paddle.transpose(rule_masks_batch, (1, 0, 2))
# forward
loss_list = model(x_inputs_batch, true_binary_batch, rule_masks_batch,)
if len(loss_list) == 1: # only perplexity
perplexity = loss_list[0]
kl_loss = paddle.to_tensor(0)
else:
perplexity= loss_list[0]
kl_loss = loss_list[1]
loss = kl_weight * kl_loss + perplexity
if optimizer is not None:
# backward
loss.backward()
# optimize
optimizer.step()
# clear gradients
optimizer.clear_grad()
# Log
kl_loss_values.append(kl_loss.numpy()[0])
perplexity_loss_values.append(perplexity.numpy()[0])
loss_values.append(loss.numpy()[0])
lr = (optimizer.get_lr()
if optimizer is not None
else 0)
if batch_id % 200 == 0 and batch_id > 0:
print('batch:%s, kl_loss:%f, perplexity_loss:%f' % (batch_id, float(np.mean(kl_loss_values)), \
float(np.mean(perplexity_loss_values))), flush=True)
postfix = {
'epoch': epoch,
'kl_weight': kl_weight,
'lr': lr,
'kl_loss': np.mean(kl_loss_values),
'perplexity_loss': np.mean(perplexity_loss_values),
'loss': np.mean(loss_values),
'mode': 'Eval' if optimizer is None else 'Train'}
return postfix
def _train(model, train_dataloader):
"""
tbd
"""
# train the model
n_epoch = cmd_args.num_epochs
clip_grad = nn.ClipGradByNorm(clip_norm=cmd_args.clip_grad)
optimizer = paddle.optimizer.Adam(parameters=model.parameters(),
learning_rate=cmd_args.learning_rate,
grad_clip=clip_grad)
# start to train
for epoch in range(n_epoch):
#kl_weight = kl_annealer(epoch)
kl_weight = cmd_args.kl_coeff
print('##########################################################################################', flush=True)
print('EPOCH:%d' % (epoch), flush=True)
postfix = _train_epoch(model, train_dataloader, epoch, kl_weight, optimizer=optimizer)
# save state_dict
paddle.save(model.state_dict(), cmd_args.save_dir + 'train_model_epoch' + str(epoch))
paddle.save(optimizer.state_dict(), cmd_args.save_dir + 'train_optimizer_epoch' + str(epoch))
print('epoch:%d loss:%f kl_loss:%f perplexity_loss:%f' % \
(epoch, postfix['loss'], postfix['kl_loss'], postfix['perplexity_loss']), flush=True)
print('##########################################################################################', flush=True)
# lr_annealer.step()
return model
def main():
"""
tbd
"""
# get model config
model_config = json.load(open(cmd_args.model_config, 'r'))
# set gpu
paddle.set_device(cmd_args.mode)
all_true_binary, all_rule_masks = load_zinc_SD_data()
all_true_binary = all_true_binary
all_rule_masks = all_rule_masks
# load the data loader
train_dataset = CreateDataset(all_true_binary, all_rule_masks)
train_dataloader = paddle.io.DataLoader(train_dataset, batch_size=cmd_args.batch_size, shuffle=True)
# load model
model = MolVAE(model_config)
# train the model
_train(model, train_dataloader)
if __name__ == '__main__':
main()
|
dress/scripts/readability/getFKGL.py | XingxingZhang/dress | 157 | 12719893 | <gh_stars>100-1000
#!/usr/bin/python
from readability import Readability
import sys
if __name__ == '__main__':
infile = sys.argv[1]
text = open(infile).read()
rd = Readability(text)
print(rd.FleschKincaidGradeLevel())
|
modules/dbnd/src/targets/marshalling/numpy.py | ipattarapong/dbnd | 224 | 12719921 | from __future__ import absolute_import
import numpy as np
from targets.marshalling.marshaller import Marshaller
from targets.target_config import FileFormat
class NumpyArrayMarshaller(Marshaller):
type = np.ndarray
file_format = FileFormat.numpy
def target_to_value(self, target, **kwargs):
"""
:param obj: object to pickle
:return:
"""
with target.open("rb") as fp:
return np.load(fp, **kwargs)
def value_to_target(self, value, target, **kwargs):
"""
:param obj: object to pickle
:return:
"""
target.mkdir_parent()
with target.open("wb") as fp:
np.save(fp, value, **kwargs)
class NumpyArrayPickleMarshaler(NumpyArrayMarshaller):
file_format = FileFormat.pickle
def target_to_value(self, target, **kwargs):
return np.load(target.path, allow_pickle=True, **kwargs)
def value_to_target(self, value, target, **kwargs):
np.save(target.path, value, allow_pickle=True, **kwargs)
|
esphome/components/monochromatic/light.py | OttoWinter/esphomeyaml | 249 | 12719962 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import light, output
from esphome.const import CONF_OUTPUT_ID, CONF_OUTPUT
monochromatic_ns = cg.esphome_ns.namespace("monochromatic")
MonochromaticLightOutput = monochromatic_ns.class_(
"MonochromaticLightOutput", light.LightOutput
)
CONFIG_SCHEMA = light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend(
{
cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(MonochromaticLightOutput),
cv.Required(CONF_OUTPUT): cv.use_id(output.FloatOutput),
}
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_OUTPUT_ID])
await light.register_light(var, config)
out = await cg.get_variable(config[CONF_OUTPUT])
cg.add(var.set_output(out))
|
aztk_cli/spark/endpoints/job/submit.py | Geims83/aztk | 161 | 12719977 | import argparse
import typing
import aztk.spark
from aztk_cli import config
from aztk_cli.config import JobConfig
def setup_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--id",
dest="job_id",
required=False,
help="The unique id of your Spark Job. Defaults to the id value in .aztk/job.yaml",
)
parser.add_argument(
"--configuration",
"-c",
dest="job_conf",
required=False,
help="Path to the job.yaml configuration file. Defaults to .aztk/job.yaml",
)
def execute(args: typing.NamedTuple):
spark_client = aztk.spark.Client(config.load_aztk_secrets())
job_conf = JobConfig()
job_conf.merge(args.job_id, args.job_conf)
# by default, load spark configuration files in .aztk/
spark_configuration = config.load_aztk_spark_config()
# overwrite with values in job_conf if they exist
if job_conf.spark_defaults_conf:
spark_configuration.spark_defaults_conf = job_conf.spark_defaults_conf
if job_conf.spark_env_sh:
spark_configuration.spark_env_sh = job_conf.spark_env_sh
if job_conf.core_site_xml:
spark_configuration.core_site_xml = job_conf.core_site_xml
job_configuration = aztk.spark.models.JobConfiguration(
id=job_conf.id,
applications=job_conf.applications,
spark_configuration=spark_configuration,
vm_size=job_conf.vm_size,
toolkit=job_conf.toolkit,
max_dedicated_nodes=job_conf.max_dedicated_nodes,
max_low_pri_nodes=job_conf.max_low_pri_nodes,
subnet_id=job_conf.subnet_id,
worker_on_master=job_conf.worker_on_master,
scheduling_target=job_conf.scheduling_target,
)
# TODO: utils.print_job_conf(job_configuration)
spark_client.job.submit(job_configuration)
|
migrations/20211109_01_xKblp-change-comments-on-black-jack-record.py | zw-g/Funny-Nation | 126 | 12719993 | <reponame>zw-g/Funny-Nation<gh_stars>100-1000
"""
change comments on black jack record
"""
from yoyo import step
__depends__ = {'20211103_04_Y0xbO-remove-uuid-s-primary-key-on-black-jack-record'}
steps = [
step("ALTER TABLE `blackJackGameRecord` CHANGE `status` `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose; 2 represent win; 3 represent draw; 4 represent closed; ';")
]
|
geosnap/tests/test_incs.py | weikang9009/geosnap | 148 | 12720086 | from geosnap import analyze
linc = analyze.incs.linc
def test_linc():
labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4]
labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]
res = linc([labels_0, labels_1])
assert res[4] == 1.0
assert res[7] == 0.0 == res[-1]
labels_2 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]
res = linc([labels_1, labels_2])
assert res[0] == 0.0
res = linc([labels_0, labels_1, labels_2])
assert res[0] == 0.25
|
Chapter06/extract_contents.py | add54/ADMIN_SYS_PYTHON | 116 | 12720117 | <filename>Chapter06/extract_contents.py
import tarfile
import os
os.mkdir('work')
with tarfile.open('work.tar', 'r') as t:
t.extractall('work')
print(os.listdir('work'))
|
lib/yahoo/oauth.py | goztrk/django-htk | 206 | 12720144 | # Python Standard Library Imports
import time
from rauth import OAuth1Service
from rauth.service import process_token_request
from rauth.utils import parse_utf8_qsl
YAHOO_OAUTH_REQUEST_TOKEN_URL = 'https://api.login.yahoo.com/oauth/v2/get_request_token'
YAHOO_OAUTH_ACCESS_TOKEN_URL = 'https://api.login.yahoo.com/oauth/v2/get_token'
YAHOO_OAUTH_AUTHORIZE_URL = 'https://api.login.yahoo.com/oauth/v2/request_auth'
def refresh_token_if_needed(func):
"""Decorator to make sure we refresh the token if needed before every query
"""
def keys_from_response(text):
return_array = []
response_array = text.split('&')
for e in response_array:
pair = e.split('=', 2 )
return_array.append(pair[0])
return return_array
def refresh(self, *args, **kwargs):
"""
`self` is an instance of YahooOAuthClient
"""
# Let's refresh 5 minutes before the expiration time
expires = self.user_social_auth.extra_data['expires']
expires_time = int(expires) - 300 if expires else 0
now = int(time.time())
# print('comparing n: {0} vs expire: {1}'.format(now, expires))
if expires is None or expires < now:
#print('------ Refreshing Token ------')
r = self.oauth.get_raw_access_token(
request_token=self.access_token['oauth_token'],
request_token_secret=self.access_token['oauth_token_secret'],
params={'oauth_session_handle':self.access_token['oauth_session_handle']},
)
keys = keys_from_response(r.text)
access_token = process_token_request(r, parse_utf8_qsl, *keys)
for i,k in enumerate(keys):
self.access_token[k] = access_token[i]
# Save back to UserSocialAuth Model
self.user_social_auth.extra_data['access_token'] = self.access_token
current_time = int(time.time())
self.user_social_auth.extra_data['expires'] = current_time + int(self.access_token['oauth_expires_in'])
# print('current time: {0}, expiring oauth at {1}'.format(current_time, self.user_social_auth.extra_data['expires']))
self.user_social_auth.save()
token = (self.access_token['oauth_token'], self.access_token['oauth_token_secret'])
self.session = self.oauth.get_session(token)
return func(self, *args, **kwargs)
return refresh
class YahooOAuthClient(object):
def __init__(self, app_key, app_secret, user_social_auth):
"""Constructor for YahooOAuthClient
`app_key` - Yahoo App Key
`app_secret` - Yahoo App Secret
`user_social_auth` - UserSocialAuth model to store refreshed token
"""
# UserSocialAuth needed to access the access token
self.last_error = None
self.user_social_auth = user_social_auth
self.access_token = user_social_auth.extra_data.get('access_token')
self.oauth = OAuth1Service(
name='Yahoo',
consumer_key=app_key,
consumer_secret=app_secret,
request_token_url=YAHOO_OAUTH_REQUEST_TOKEN_URL,
access_token_url=YAHOO_OAUTH_ACCESS_TOKEN_URL,
authorize_url=YAHOO_OAUTH_AUTHORIZE_URL,
)
self.session = self.oauth.get_session((self.access_token['oauth_token'], self.access_token['oauth_token_secret']))
|
social_core/tests/backends/test_stackoverflow.py | sg4e/social-core | 745 | 12720154 | <gh_stars>100-1000
import json
from urllib.parse import urlencode
from .oauth import OAuth2Test
class StackoverflowOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.stackoverflow.StackoverflowOAuth2'
user_data_url = 'https://api.stackexchange.com/2.1/me'
expected_username = 'foobar'
access_token_body = urlencode({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'items': [{
'user_id': 101010,
'user_type': 'registered',
'creation_date': 1278525551,
'display_name': 'foobar',
'profile_image': 'http: //www.gravatar.com/avatar/'
'5280f15cedf540b544eecc30fcf3027c?'
'd=identicon&r=PG',
'reputation': 547,
'reputation_change_day': 0,
'reputation_change_week': 0,
'reputation_change_month': 0,
'reputation_change_quarter': 65,
'reputation_change_year': 65,
'age': 22,
'last_access_date': 1363544705,
'last_modified_date': 1354035327,
'is_employee': False,
'link': 'http: //stackoverflow.com/users/101010/foobar',
'location': 'Fooland',
'account_id': 101010,
'badge_counts': {
'gold': 0,
'silver': 3,
'bronze': 6
}
}],
'quota_remaining': 9997,
'quota_max': 10000,
'has_more': False
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
tests/runner.py | QIANDY2021/blender-retarget | 112 | 12720159 | import sys
import unittest
import coverage
cov = coverage.Coverage(
branch=True,
source=['animation_retarget'],
)
cov.start()
suite = unittest.defaultTestLoader.discover('.')
if not unittest.TextTestRunner().run(suite).wasSuccessful():
exit(1)
cov.stop()
cov.xml_report()
if '--save-html-report' in sys.argv:
cov.html_report()
|
contrib/pyln-client/pyln/client/gossmap.py | Bladez1753/lightning | 2,288 | 12720190 | <gh_stars>1000+
#! /usr/bin/python3
from pyln.spec.bolt7 import (channel_announcement, channel_update,
node_announcement)
from pyln.proto import ShortChannelId, PublicKey
from typing import Any, Dict, List, Optional, Union, cast
import io
import struct
# These duplicate constants in lightning/common/gossip_store.h
GOSSIP_STORE_VERSION = 9
GOSSIP_STORE_LEN_DELETED_BIT = 0x80000000
GOSSIP_STORE_LEN_PUSH_BIT = 0x40000000
GOSSIP_STORE_LEN_MASK = (~(GOSSIP_STORE_LEN_PUSH_BIT
| GOSSIP_STORE_LEN_DELETED_BIT))
# These duplicate constants in lightning/gossipd/gossip_store_wiregen.h
WIRE_GOSSIP_STORE_PRIVATE_CHANNEL = 4104
WIRE_GOSSIP_STORE_PRIVATE_UPDATE = 4102
WIRE_GOSSIP_STORE_DELETE_CHAN = 4103
WIRE_GOSSIP_STORE_ENDED = 4105
WIRE_GOSSIP_STORE_CHANNEL_AMOUNT = 4101
class GossipStoreHeader(object):
def __init__(self, buf: bytes):
length, self.crc, self.timestamp = struct.unpack('>III', buf)
self.deleted = (length & GOSSIP_STORE_LEN_DELETED_BIT) != 0
self.length = (length & GOSSIP_STORE_LEN_MASK)
class GossmapHalfchannel(object):
"""One direction of a GossmapChannel."""
def __init__(self, channel: 'GossmapChannel', direction: int,
timestamp: int, cltv_expiry_delta: int,
htlc_minimum_msat: int, htlc_maximum_msat: int,
fee_base_msat: int, fee_proportional_millionths: int):
self.channel = channel
self.direction = direction
self.source = channel.node1 if direction == 0 else channel.node2
self.destination = channel.node2 if direction == 0 else channel.node1
self.timestamp: int = timestamp
self.cltv_expiry_delta: int = cltv_expiry_delta
self.htlc_minimum_msat: int = htlc_minimum_msat
self.htlc_maximum_msat: Optional[int] = htlc_maximum_msat
self.fee_base_msat: int = fee_base_msat
self.fee_proportional_millionths: int = fee_proportional_millionths
def __repr__(self):
return "GossmapHalfchannel[{}x{}]".format(str(self.channel.scid), self.direction)
class GossmapNodeId(object):
def __init__(self, buf: Union[bytes, str]):
if isinstance(buf, str):
buf = bytes.fromhex(buf)
if len(buf) != 33 or (buf[0] != 2 and buf[0] != 3):
raise ValueError("{} is not a valid node_id".format(buf.hex()))
self.nodeid = buf
def to_pubkey(self) -> PublicKey:
return PublicKey(self.nodeid)
def __eq__(self, other):
if not isinstance(other, GossmapNodeId):
return False
return self.nodeid.__eq__(other.nodeid)
def __lt__(self, other):
if not isinstance(other, GossmapNodeId):
raise ValueError(f"Cannot compare GossmapNodeId with {type(other)}")
return self.nodeid.__lt__(other.nodeid) # yes, that works
def __hash__(self):
return self.nodeid.__hash__()
def __repr__(self):
return "GossmapNodeId[{}]".format(self.nodeid.hex())
@classmethod
def from_str(cls, s: str):
if s.startswith('0x'):
s = s[2:]
if len(s) != 66:
raise ValueError(f"{s} is not a valid hexstring of a node_id")
return cls(bytes.fromhex(s))
class GossmapChannel(object):
"""A channel: fields of channel_announcement are in .fields, optional updates are in .updates_fields, which can be None if there has been no channel update."""
def __init__(self,
fields: Dict[str, Any],
announce_offset: int,
scid,
node1: 'GossmapNode',
node2: 'GossmapNode',
is_private: bool):
self.fields = fields
self.announce_offset = announce_offset
self.is_private = is_private
self.scid = scid
self.node1 = node1
self.node2 = node2
self.updates_fields: List[Optional[Dict[str, Any]]] = [None, None]
self.updates_offset: List[Optional[int]] = [None, None]
self.satoshis = None
self.half_channels: List[Optional[GossmapHalfchannel]] = [None, None]
def _update_channel(self,
direction: int,
fields: Dict[str, Any],
off: int):
self.updates_fields[direction] = fields
self.updates_offset[direction] = off
half = GossmapHalfchannel(self, direction,
fields['timestamp'],
fields['cltv_expiry_delta'],
fields['htlc_minimum_msat'],
fields.get('htlc_maximum_msat', None),
fields['fee_base_msat'],
fields['fee_proportional_millionths'])
self.half_channels[direction] = half
def get_direction(self, direction: int):
""" returns the GossmapHalfchannel if known by channel_update """
if not 0 <= direction <= 1:
raise ValueError("direction can only be 0 or 1")
return self.half_channels[direction]
def __repr__(self):
return "GossmapChannel[{}]".format(str(self.scid))
class GossmapNode(object):
"""A node: fields of node_announcement are in .announce_fields, which can be None of there has been no node announcement.
.channels is a list of the GossmapChannels attached to this node.
"""
def __init__(self, node_id: Union[GossmapNodeId, bytes, str]):
if isinstance(node_id, bytes) or isinstance(node_id, str):
node_id = GossmapNodeId(node_id)
self.announce_fields: Optional[Dict[str, Any]] = None
self.announce_offset: Optional[int] = None
self.channels: List[GossmapChannel] = []
self.node_id = node_id
def __repr__(self):
return "GossmapNode[{}]".format(self.node_id.nodeid.hex())
def __eq__(self, other):
if not isinstance(other, GossmapNode):
return False
return self.node_id.__eq__(other.node_id)
def __lt__(self, other):
if not isinstance(other, GossmapNode):
raise ValueError(f"Cannot compare GossmapNode with {type(other)}")
return self.node_id.__lt__(other.node_id)
class Gossmap(object):
"""Class to represent the gossip map of the network"""
def __init__(self, store_filename: str = "gossip_store"):
self.store_filename = store_filename
self.store_file = open(store_filename, "rb")
self.store_buf = bytes()
self.nodes: Dict[GossmapNodeId, GossmapNode] = {}
self.channels: Dict[ShortChannelId, GossmapChannel] = {}
self._last_scid: Optional[str] = None
version = self.store_file.read(1)
if version[0] != GOSSIP_STORE_VERSION:
raise ValueError("Invalid gossip store version {}".format(int(version)))
self.bytes_read = 1
self.refresh()
def _new_channel(self,
fields: Dict[str, Any],
announce_offset: int,
scid: ShortChannelId,
node1: GossmapNode,
node2: GossmapNode,
is_private: bool):
c = GossmapChannel(fields, announce_offset,
scid, node1, node2,
is_private)
self._last_scid = scid
self.channels[scid] = c
node1.channels.append(c)
node2.channels.append(c)
def _del_channel(self, scid: ShortChannelId):
c = self.channels[scid]
del self.channels[scid]
c.node1.channels.remove(c)
c.node2.channels.remove(c)
# Beware self-channels n1-n1!
if len(c.node1.channels) == 0 and c.node1 != c.node2:
del self.nodes[c.node1.node_id]
if len(c.node2.channels) == 0:
del self.nodes[c.node2.node_id]
def _add_channel(self, rec: bytes, off: int, is_private: bool):
fields = channel_announcement.read(io.BytesIO(rec[2:]), {})
# Add nodes one the fly
node1_id = GossmapNodeId(fields['node_id_1'])
node2_id = GossmapNodeId(fields['node_id_2'])
if node1_id not in self.nodes:
self.nodes[node1_id] = GossmapNode(node1_id)
if node2_id not in self.nodes:
self.nodes[node2_id] = GossmapNode(node2_id)
self._new_channel(fields, off,
ShortChannelId.from_int(fields['short_channel_id']),
self.get_node(node1_id), self.get_node(node2_id),
is_private)
def _set_channel_amount(self, rec: bytes):
""" Sets channel capacity of last added channel """
sats, = struct.unpack(">Q", rec[2:])
self.channels[self._last_scid].satoshis = sats
def get_channel(self, short_channel_id: ShortChannelId):
""" Resolves a channel by its short channel id """
if isinstance(short_channel_id, str):
short_channel_id = ShortChannelId.from_str(short_channel_id)
return self.channels.get(short_channel_id)
def get_node(self, node_id: Union[GossmapNodeId, str]):
""" Resolves a node by its public key node_id """
if isinstance(node_id, str):
node_id = GossmapNodeId.from_str(node_id)
return self.nodes.get(cast(GossmapNodeId, node_id))
def _update_channel(self, rec: bytes, off: int):
fields = channel_update.read(io.BytesIO(rec[2:]), {})
direction = fields['channel_flags'] & 1
c = self.channels[ShortChannelId.from_int(fields['short_channel_id'])]
c._update_channel(direction, fields, off)
def _add_node_announcement(self, rec: bytes, off: int):
fields = node_announcement.read(io.BytesIO(rec[2:]), {})
node_id = GossmapNodeId(fields['node_id'])
self.nodes[node_id].announce_fields = fields
self.nodes[node_id].announce_offset = off
def reopen_store(self):
"""FIXME: Implement!"""
assert False
def _remove_channel_by_deletemsg(self, rec: bytes):
scidint, = struct.unpack(">Q", rec[2:])
scid = ShortChannelId.from_int(scidint)
# It might have already been deleted when we skipped it.
if scid in self.channels:
self._del_channel(scid)
def _pull_bytes(self, length: int) -> bool:
"""Pull bytes from file into our internal buffer"""
if len(self.store_buf) < length:
self.store_buf += self.store_file.read(length
- len(self.store_buf))
return len(self.store_buf) >= length
def _read_record(self) -> Optional[bytes]:
"""If a whole record is not in the file, returns None.
If deleted, returns empty."""
if not self._pull_bytes(12):
return None
hdr = GossipStoreHeader(self.store_buf[:12])
if not self._pull_bytes(12 + hdr.length):
return None
self.bytes_read += len(self.store_buf)
ret = self.store_buf[12:]
self.store_buf = bytes()
if hdr.deleted:
ret = bytes()
return ret
def refresh(self):
"""Catch up with any changes to the gossip store"""
while True:
off = self.bytes_read
rec = self._read_record()
# EOF?
if rec is None:
break
# Deleted?
if len(rec) == 0:
continue
rectype, = struct.unpack(">H", rec[:2])
if rectype == channel_announcement.number:
self._add_channel(rec, off, False)
elif rectype == WIRE_GOSSIP_STORE_PRIVATE_CHANNEL:
self._add_channel(rec[2 + 8 + 2:], off + 2 + 8 + 2, True)
elif rectype == WIRE_GOSSIP_STORE_CHANNEL_AMOUNT:
self._set_channel_amount(rec)
elif rectype == channel_update.number:
self._update_channel(rec, off)
elif rectype == WIRE_GOSSIP_STORE_PRIVATE_UPDATE:
self._update_channel(rec[2 + 2:], off + 2 + 2)
elif rectype == WIRE_GOSSIP_STORE_DELETE_CHAN:
self._remove_channel_by_deletemsg(rec)
elif rectype == node_announcement.number:
self._add_node_announcement(rec, off)
elif rectype == WIRE_GOSSIP_STORE_ENDED:
self.reopen_store()
else:
continue
|
tests/protocols/mrp/test_mrp_interface.py | Jacobs4/pyatv | 532 | 12720211 | """Unit tests for interface implementations in pyatv.protocols.mrp."""
import math
import pytest
from pyatv import exceptions
from pyatv.protocols.mrp import MrpAudio, messages, protobuf
DEVICE_UID = "F2204E63-BCAB-4941-80A0-06C46CB71391"
# This mock is _extremely_ basic, so needs to be adjusted heavily when adding
# new tests
class MrpProtocolMock:
def __init__(self):
self._listeners = {}
self.sent_messages = []
def add_listener(self, listener, message_type, data=None):
self._listeners[message_type] = listener
async def send(self, message):
self.sent_messages.append(message)
async def inject(self, message):
await self._listeners[message.type](message, None)
async def volume_controls_changed(self, device_uid, controls_available):
message = messages.create(
protobuf.VOLUME_CONTROL_CAPABILITIES_DID_CHANGE_MESSAGE
)
message.inner().outputDeviceUID = device_uid
message.inner().capabilities.volumeControlAvailable = controls_available
await self.inject(message)
@pytest.fixture(name="protocol")
def protocol_fixture(event_loop):
yield MrpProtocolMock()
# MrpAudio
@pytest.fixture(name="audio")
def audio_fixture(protocol):
yield MrpAudio(protocol)
async def test_audio_volume_control_availability(protocol, audio):
assert not audio.is_available
await protocol.volume_controls_changed(DEVICE_UID, True)
assert audio.is_available
await protocol.volume_controls_changed(DEVICE_UID, False)
assert not audio.is_available
@pytest.mark.parametrize(
"device_uid,controls_available,controls_expected",
[
(DEVICE_UID, True, True),
],
)
async def test_audio_volume_control_capabilities_changed(
protocol, audio, device_uid, controls_available, controls_expected
):
assert not audio.is_available
await protocol.volume_controls_changed(device_uid, controls_available)
assert audio.is_available == controls_expected
@pytest.mark.parametrize(
"device_uid,volume,expected_volume",
[
("foo", 0.2, 0.0), # deviceUID mismatch => no update
(DEVICE_UID, 0.2, 20.0), # deviceUID matches => update
],
)
async def test_audio_volume_did_change(
protocol, audio, device_uid, volume, expected_volume
):
await protocol.volume_controls_changed(DEVICE_UID, True)
assert math.isclose(audio.volume, 0.0)
message = messages.create(protobuf.VOLUME_DID_CHANGE_MESSAGE)
message.inner().outputDeviceUID = device_uid
message.inner().volume = volume
await protocol.inject(message)
assert math.isclose(audio.volume, expected_volume)
async def test_audio_set_volume(protocol, audio):
await protocol.volume_controls_changed(DEVICE_UID, True)
await audio.set_volume(0.0)
assert len(protocol.sent_messages) == 1
message = protocol.sent_messages.pop()
assert message.type == protobuf.SET_VOLUME_MESSAGE
assert message.inner().outputDeviceUID == DEVICE_UID
assert math.isclose(message.inner().volume, 0.0, rel_tol=1e-02)
async def test_audio_set_volume_no_output_device(audio):
with pytest.raises(exceptions.ProtocolError):
await audio.set_volume(10)
|
constants.py | AlexRogalskiy/smart-social-distancing | 113 | 12720214 | <reponame>AlexRogalskiy/smart-social-distancing
PROCESSOR_VERSION = "0.7.0"
# Entities
AREAS = "areas"
CAMERAS = "cameras"
ALL_AREAS = "ALL"
# Metrics
OCCUPANCY = "occupancy"
SOCIAL_DISTANCING = "social-distancing"
FACEMASK_USAGE = "facemask-usage"
IN_OUT = "in-out"
DWELL_TIME = "dwell-time"
|
lib/django-1.4/django/contrib/formtools/tests/wizard/sessionstorage.py | MiCHiLU/google_appengine_sdk | 790 | 12720217 | <filename>lib/django-1.4/django/contrib/formtools/tests/wizard/sessionstorage.py<gh_stars>100-1000
from django.test import TestCase
from django.contrib.formtools.tests.wizard.storage import TestStorage
from django.contrib.formtools.wizard.storage.session import SessionStorage
class TestSessionStorage(TestStorage, TestCase):
def get_storage(self):
return SessionStorage
|
mayan/apps/checkouts/events.py | eshbeata/open-paperless | 2,743 | 12720223 | <reponame>eshbeata/open-paperless
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from events.classes import Event
event_document_auto_check_in = Event(
name='checkouts_document_auto_check_in',
label=_('Document automatically checked in')
)
event_document_check_in = Event(
name='checkouts_document_check_in', label=_('Document checked in')
)
event_document_check_out = Event(
name='checkouts_document_check_out', label=_('Document checked out')
)
event_document_forceful_check_in = Event(
name='checkouts_document_forceful_check_in',
label=_('Document forcefully checked in')
)
|
tools/test_eval.py | yoxu515/CFBI | 312 | 12720227 | import sys
sys.path.append('.')
sys.path.append('..')
from networks.engine.eval_manager import Evaluator
import importlib
def main():
import argparse
parser = argparse.ArgumentParser(description="Test CFBI")
parser.add_argument('--gpu_id', type=int, default=7)
parser.add_argument('--config', type=str, default='configs.resnet101_cfbi')
parser.add_argument('--ckpt_path', type=str, default='test')
args = parser.parse_args()
config = importlib.import_module(args.config)
cfg = config.cfg
cfg.TEST_GPU_ID = args.gpu_id
cfg.TEST_DATASET = 'test'
cfg.TEST_CKPT_PATH = args.ckpt_path
cfg.TEST_MULTISCALE = [0.5, 1]
cfg.TEST_FLIP = True
evaluator = Evaluator(cfg=cfg)
evaluator.evaluating()
if __name__ == '__main__':
main()
|
python/lib/qfloatslider.py | Azrod29150/Systematic-LEDs | 101 | 12720237 | <gh_stars>100-1000
from PyQt5 import QtCore, QtGui, QtWidgets
__all__ = ['QFloatSlider']
class QFloatSlider(QtWidgets.QSlider):
"""
Subclass of QtWidgets.QSlider
Horizontal slider giving floating point values.
Usage: QFloatSlider(min, max, step, default)
where min = minimum value of slider
max = maximum value of slider
step = interval between values. Must be a factor of (max-min)
default = default (starting) value of slider
"""
def __init__(self, min_value, max_value, step, default):
super().__init__(QtCore.Qt.Horizontal)
self.precision = 0.0001
self.min_value = min_value
self.max_value = max_value
self.step = step
self.default = default
self.quotient, self.remainder = self._float_divmod(\
self.max_value-self.min_value, self.step)
if self.remainder:
raise ValueError("{} does not fit evenly between {} and {}"\
.format(step, min_value, max_value))
super().setMinimum(0)
super().setMaximum(self.quotient)
super().setSingleStep(1)
super().setValue(self._float_to_int(self.default))
super().valueChanged.connect(self._value_handler)
#self.slider_value = 2.0
def setValue(self, value):
super().setValue(self._float_to_int(value))
# This is mostly disgusting python i hate floating points >:(
def _float_divmod(self,a,b):
"""
Basically the divmod function but it works for floats (try 0.3 % 0.1 smh)
Returns the quotient, and a remainder.
"""
a = abs(a)
b = abs(b)
n = 1
while True:
c = a - b
c = abs(c)
if c < self.precision:
return (n, 0)
elif c > a:
return (n-1, a)
a = c
n += 1
def _float_to_int(self, a):
return int(round(a/self.step))
def _int_to_float(self, a):
return self.min_value+a*self.step
def _value_handler(self):
self.slider_value = self._int_to_float(super().value())
|
TicTacToe-GUI/TicTacToe.py | avinashkranjan/PraticalPythonProjects | 930 | 12720245 | # -*- coding: utf-8 -*-
"""
Tic Toe Using pygame , numpy and sys with Graphical User Interface
"""
import pygame
import sys
from pygame.locals import *
import numpy as np
# ------
# constants
# -------
width = 800
height = 800
#row and columns
board_rows = 3
board_columns = 3
cross_width = 25
square_size = width//board_columns
# colors in RGB format
line_Width = 15
red = (255, 0, 0)
bg_color = (28, 170, 156)
line_color = (23, 145, 135)
circle_color = (239, 231, 200)
cross_color = (66, 66, 66)
space = square_size//4
# circle
circle_radius = square_size//3
circle_width = 14
pygame.init()
screen = pygame.display.set_mode((height, width))
pygame.display.set_caption('Tic Tac Toe!')
screen.fill(bg_color)
# color to display restart
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 128)
font = pygame.font.Font('freesansbold.ttf', 25)
# create a text suface object,
# on which text is drawn on it.
text = font.render('Press R to restart', True, green, blue)
Won = font.render(" Won", True, blue, green)
leave = font.render("Press X to Exit", True, white, red)
# create a rectangular object for the
# text surface object
leaveRect = text.get_rect()
textRect = text.get_rect()
winRect = Won.get_rect()
winRect.center = (100, 30)
textRect.center = (width-400, 30)
leaveRect.center = (width-120, 30)
board = np.zeros((board_rows, board_columns))
# print(board)
#pygame.draw.line( screen ,red ,(10,10),(300,300),10)
def draw_figures():
for row in range(board_rows):
for col in range(board_columns):
if board[row][col] == 1:
pygame.draw.circle(screen, circle_color, (int(col*square_size + square_size//2), int(
row*square_size + square_size//2)), circle_radius, circle_width)
elif board[row][col] == 2:
pygame.draw.line(screen, cross_color, (col*square_size + space, row*square_size + square_size -
space), (col*square_size+square_size - space, row*square_size + space), cross_width)
pygame.draw.line(screen, cross_color, (col*square_size + space, row*square_size + space),
(col*square_size + square_size - space, row*square_size + square_size - space), cross_width)
def draw_lines():
pygame.draw.line(screen, line_color, (0, square_size),
(width, square_size), line_Width)
# 2nd horizontal line
pygame.draw.line(screen, line_color, (0, 2*square_size),
(width, 2*square_size), line_Width)
# 1st verticle
pygame.draw.line(screen, line_color, (square_size, 0),
(square_size, height), line_Width)
# 2nd verticle
pygame.draw.line(screen, line_color, (2*square_size, 0),
(2*square_size, height), line_Width)
# To mark which square player has chosen
def mark_square(row, col, player):
board[row][col] = player
# TO check the availablity of a square
def available_square(row, col):
return board[row][col] == 0
# check board full or not
def is_board_full():
k = False
for row in range(board_rows):
for col in range(board_columns):
if board[row][col] == 0:
k = False
else:
k = True
return k
def check_win(player):
# check verticle win
for col in range(board_columns):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
draw_vertical_winning_line(col, player)
return True
# check Horizontal win
for row in range(board_rows):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
draw_horizontal_winning_line(row, player)
return True
# check for asc win
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
draw_asc_diagonal(player)
return True
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
draw_des_diagonal(player)
return True
def draw_horizontal_winning_line(row, player):
posY = row*square_size + square_size//2
if(player == 1):
color = circle_color
else:
color = cross_color
pygame.draw.line(screen, color, (15, posY), (width-15, posY), 15)
def draw_vertical_winning_line(col, player):
posX = col*square_size + square_size//2
if(player == 1):
color = circle_color
else:
color = cross_color
pygame.draw.line(screen, color, (posX, 15), (posX, width-15), 15)
def draw_asc_diagonal(player):
if(player == 1):
color = circle_color
else:
color = cross_color
pygame.draw.line(screen, color, (15, height-15), (width-15, 15), 15)
def draw_des_diagonal(player):
if(player == 1):
color = circle_color
else:
color = cross_color
pygame.draw.line(screen, color, (15, 15), (width-15, height-15), 15)
def restart():
screen.fill(bg_color)
draw_lines()
player = 1
for row in range(board_rows):
for col in range(board_columns):
board[row][col] = 0
draw_lines()
# player
player = 1
game_over = False
while True: # main game loop
for event in pygame.event.get(): # constantly looks for the event
if event.type == pygame.QUIT: # if user clicks exit pygame.QUIT and sys exits
pygame.quit()
sys.exit()
board_full = is_board_full()
if board_full and not game_over:
Won = font.render(" It's a Tie ", True, blue, green)
screen.blit(Won, winRect)
screen.blit(text, textRect)
screen.blit(leave, leaveRect)
if event.type == pygame.MOUSEBUTTONDOWN and not game_over:
mouseX = event.pos[0] # x
mouseY = event.pos[1] # y
clicked_row = int(mouseY // square_size)
clicked_column = int(mouseX // square_size)
if available_square(clicked_row, clicked_column):
mark_square(clicked_row, clicked_column, player)
if(check_win(player)):
game_over = True
Won = font.render("Player"+str(player) +
" Won ", True, blue, green)
screen.blit(Won, winRect)
screen.blit(text, textRect)
screen.blit(leave, leaveRect)
player = player % 2 + 1
if not game_over and not board_full:
Won = font.render("Player"+str(player) +
" Turn ", True, blue, green)
screen.blit(Won, winRect)
draw_figures()
# to restart the game
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
game_over = False
elif event.key == pygame.K_x:
pygame.quit()
sys.exit()
# print(board)
pygame.display.update()
|
pypeit/deprecated/waveimage.py | ykwang1/PypeIt | 107 | 12720261 | """
Module for guiding construction of the Wavelength Image
.. include common links, assuming primary doc root is up one directory
.. include:: ../links.rst
"""
import inspect
import numpy as np
import os
from pypeit import msgs
from pypeit import utils
from pypeit import datamodel
from IPython import embed
class WaveImage(datamodel.DataContainer):
version = '1.0.0'
# I/O
output_to_disk = None #('WVTILTS_IMAGE', 'WVTILTS_FULLMASK', 'WVTILTS_DETECTOR_CONTAINER')
hdu_prefix = None
# Master fun
master_type = 'Wave'
master_file_format = 'fits'
datamodel = {
'image': dict(otype=np.ndarray, atype=np.floating, desc='2D Wavelength image'),
'PYP_SPEC': dict(otype=str, desc='PypeIt spectrograph name'),
}
def __init__(self, image, PYP_SPEC=None):
# Parse
args, _, _, values = inspect.getargvalues(inspect.currentframe())
d = dict([(k,values[k]) for k in args[1:]])
# Setup the DataContainer
datamodel.DataContainer.__init__(self, d=d)
class BuildWaveImage(object):
"""
Class to generate the Wavelength Image
Args:
slits (:class:`pypeit.edgetrace.SlitTraceSet`):
Object holding the slit edge locations
tilts (np.ndarray or None):
Tilt image
wv_calib (dict or None): wavelength solution dictionary
Parameters are read from wv_calib['par']
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The `Spectrograph` instance that sets the
instrument used to take the observations. Used to set
:attr:`spectrograph`.
det (int or None):
Attributes:
image (np.ndarray): Wavelength image
steps (list): List of the processing steps performed
"""
master_type = 'Wave'
# @classmethod
# def from_master_file(cls, master_file):
# """
#
# Args:
# master_file (str):
#
# Returns:
# waveimage.WaveImage:
#
# """
# # Spectrograph
# spectrograph, extras = masterframe.items_from_master_file(master_file)
# head0 = extras[0]
# # Master info
# master_dir = head0['MSTRDIR']
# master_key = head0['MSTRKEY']
# # Instantiate
# slf = cls(None, None, None, spectrograph, None, master_dir=master_dir,
# master_key=master_key, reuse_masters=True)
# slf.image = slf.load(ifile=master_file)
# # Return
# return slf
# TODO: Is maskslits ever anything besides slits.mask? (e.g., see calibrations.py call)
def __init__(self, slits, tilts, wv_calib, spectrograph, det):
# MasterFrame
#masterframe.MasterFrame.__init__(self, self.master_type, master_dir=master_dir,
# master_key=master_key, reuse_masters=reuse_masters)
# Required parameters
self.spectrograph = spectrograph
self.det = det
# TODO: Do we need to assign slits to self?
self.slits = slits
self.tilts = tilts
self.wv_calib = wv_calib
if self.slits is None:
self.slitmask = None
self.slit_spat_pos = None
else:
# NOTE: This uses the pad defined by EdgeTraceSetPar
self.slitmask = self.slits.slit_img()
# This selects the coordinates for the tweaked edges if
# they exist, original otherwise.
self.slit_spat_pos = self.slits.spatial_coordinates()
# For echelle order, primarily
# TODO: only echelle is ever used. Do we need to keep the whole
# thing?
self.par = wv_calib['par'] if wv_calib is not None else None
# Main output
self.image = None
self.steps = []
def build_wave(self):
"""
Main algorithm to build the wavelength image
Returns:
`numpy.ndarray`_: The wavelength image.
"""
# Loop on slits
ok_slits = np.where(np.invert(self.slits.mask))[0]
self.image = np.zeros_like(self.tilts)
nspec = self.slitmask.shape[0]
# Error checking on the wv_calib
#if (nspec-1) != int(self.wv_calib[str(0)]['fmax']):
# msgs.error('Your wavelength fits used inconsistent normalization. Something is wrong!')
# If this is echelle print out a status message and do some error checking
if self.par['echelle']:
msgs.info('Evaluating 2-d wavelength solution for echelle....')
if len(self.wv_calib['fit2d']['orders']) != len(ok_slits):
msgs.error('wv_calib and ok_slits do not line up. Something is very wrong!')
# Unpack some 2-d fit parameters if this is echelle
for slit in ok_slits:
thismask = (self.slitmask == slit)
if self.par['echelle']:
# TODO: Put this in `SlitTraceSet`?
order, indx = self.spectrograph.slit2order(self.slit_spat_pos[slit])
# evaluate solution
self.image[thismask] = utils.func_val(self.wv_calib['fit2d']['coeffs'],
self.tilts[thismask],
self.wv_calib['fit2d']['func2d'],
x2=np.ones_like(self.tilts[thismask])*order,
minx=self.wv_calib['fit2d']['min_spec'],
maxx=self.wv_calib['fit2d']['max_spec'],
minx2=self.wv_calib['fit2d']['min_order'],
maxx2=self.wv_calib['fit2d']['max_order'])
self.image[thismask] /= order
else:
iwv_calib = self.wv_calib[str(slit)]
self.image[thismask] = utils.func_val(iwv_calib['fitc'], self.tilts[thismask],
iwv_calib['function'],
minx=iwv_calib['fmin'],
maxx=iwv_calib['fmax'])
# Return
self.steps.append(inspect.stack()[0][3])
return WaveImage(self.image, PYP_SPEC=self.spectrograph.spectrograph)
def __repr__(self):
# Generate sets string
txt = '<{:s}: >'.format(self.__class__.__name__)
return txt
|
src/poliastro/earth/atmosphere/__init__.py | bryanwweber/poliastro | 634 | 12720282 | <filename>src/poliastro/earth/atmosphere/__init__.py
from poliastro.earth.atmosphere.coesa62 import COESA62
from poliastro.earth.atmosphere.coesa76 import COESA76
__all__ = ["COESA62", "COESA76"]
|
hippy/module/standard/misc/funcs.py | jweinraub/hippyvm | 289 | 12720292 | <filename>hippy/module/standard/misc/funcs.py<gh_stars>100-1000
from rpython.rlib.rstring import StringBuilder
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rrandom import Random
from hippy.builtin import wrap, Optional, BoolArg, StringArg
from hippy.objects.base import W_Root
from hippy.module.date import timelib
from hippy.module.phpstruct import _unpack
from hippy.phpcompiler import compile_php
_random = Random()
def connection_aborted():
""" Check whether client disconnected"""
return NotImplementedError()
def connection_status():
""" Returns connection status bitfield"""
return NotImplementedError()
def connection_timeout():
""" Check if the script timed out"""
return NotImplementedError()
def _lookup_constant(interp, constname):
i = constname.find(':')
if i < 0:
return interp.locate_constant(constname, False)
elif i + 1 < len(constname) and constname[i + 1] == ':':
clsname = constname[:i]
realname = constname[i + 2:]
klass = interp.lookup_class_or_intf(clsname)
if klass is not None:
return klass.constants_w.get(realname, None)
@wrap(['interp', str])
def constant(interp, constname):
""" Returns the value of a constant"""
w_obj = _lookup_constant(interp, constname)
if w_obj is None:
interp.warn("constant(): Couldn't find constant %s" % constname)
return interp.space.w_Null
return w_obj
@wrap(['interp', str, W_Root, Optional(bool)])
def define(interp, name, w_obj, case_insensitive=False):
""" Defines a named constant"""
if interp.locate_constant(name, False) is not None:
interp.notice("Constant %s already defined" % name)
return interp.space.w_False
interp.declare_new_constant(name, w_obj)
return interp.space.w_True
@wrap(['interp', str])
def defined(interp, name):
""" Checks whether a given named constant exists"""
return interp.space.newbool(_lookup_constant(interp, name) is not None)
def die():
""" Equivalent to exit"""
return NotImplementedError()
def exit():
""" Output a message and terminate the current script"""
return NotImplementedError()
def get_browser():
""" Tells what the user's browser is capable of"""
return NotImplementedError()
def __halt_compiler():
""" Halts the compiler execution"""
return NotImplementedError()
def highlight_file():
""" Syntax highlighting of a file"""
return NotImplementedError()
def highlight_string():
""" Syntax highlighting of a string"""
return NotImplementedError()
def ignore_user_abort():
""" Set whether a client disconnect should abort script execution"""
return NotImplementedError()
def pack():
""" Pack data into binary string"""
return NotImplementedError()
def php_check_syntax():
""" Check the PHP syntax of (and execute) the specified file"""
return NotImplementedError()
def php_strip_whitespace():
""" Return source with stripped comments and whitespace"""
return NotImplementedError()
def show_source():
""" Alias of highlight_file"""
return NotImplementedError()
@wrap(['interp', int], error=False)
def sleep(interp, seconds):
""" Delay execution"""
if seconds < 0:
interp.warn("sleep(): Number of seconds must be greater than or equal to 0")
return interp.space.w_False
import time
time.sleep(seconds)
# TODO: when a signal is received and a handler is defined, the remaining
# number of seconds as float should be returned.
return interp.space.newint(0)
def sys_getloadavg():
""" Gets system load average"""
return NotImplementedError()
def time_nanosleep():
""" Delay for a number of seconds and nanoseconds"""
return NotImplementedError()
def time_sleep_until():
""" Make the script sleep until the specified time"""
return NotImplementedError()
def _zero_pad(s, c):
l = len(s)
if l > c:
return s
return "0" * (c - l) + s
@wrap(['space', Optional(str), Optional(BoolArg(None))])
def uniqid(space, prefix='', more_entropy=False):
""" Generate a unique ID"""
timeval = lltype.malloc(timelib.timeval, flavor='raw')
void = lltype.nullptr(rffi.VOIDP.TO)
timelib.c_gettimeofday(timeval, void)
sec = intmask(timeval.c_tv_sec)
usec = intmask(timeval.c_tv_usec)
builder = StringBuilder()
if prefix:
builder.append(prefix)
builder.append(_zero_pad(hex(sec)[2:], 8))
builder.append(_zero_pad(hex(usec)[2:], 5))
if more_entropy:
builder.append(".")
builder.append(str(_random.random())[2:11])
return space.newstr(builder.build())
@wrap(['space', StringArg(None), StringArg(None)])
def unpack(space, formats, string):
""" Unpack data from binary string"""
return _unpack(space, formats, string)
@wrap(['interp', int])
def usleep(interp, microseconds):
""" Delay execution in microseconds"""
if microseconds < 0:
interp.warn("usleep(): Number of microseconds must be greater than or equal to 0")
return interp.space.w_False
import time
time.sleep(microseconds / 1000000.0)
|
google/colab/html/_html.py | figufema/TesteClone | 1,521 | 12720322 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTML renderable element in notebooks."""
import base64
import json
import string
import uuid
import IPython
import six
from google.colab import output
from google.colab.html import _provide
from google.colab.html import _resources
_MSG_CHUNK_SIZE = 1 * 1024 * 1024
def _to_html_str(obj):
"""Renders an object as html string on a best effort basis.
IPython allows for registering of formatters. This
tries to format the object using that registered text/html
formater method. If it cannot and it is a string it returns
it unchanged, otherwise it tries to serialize to json.
The result should be something that can be html output
for the notebook outputcell.
Args:
obj: An object to try to convert into HTML.
Returns:
An html string representation of the object.
"""
ip = IPython.get_ipython()
formatter = ip.display_formatter.formatters['text/html']
try:
render = formatter.lookup(obj)
return render(obj)
except KeyError: # No html formatter exists
pass
if hasattr(obj, '_repr_html_'):
html = obj._repr_html_() # pylint: disable=protected-access
if html:
return html
elif isinstance(obj, six.string_types):
return obj
else:
try:
return json.dumps(obj)
except TypeError: # Not json serializable
pass
return str(obj)
def _call_js_function(js_function, *args):
"""Evaluates a javascript string with arguments and returns its value."""
serialized = json.dumps(args)
if len(serialized) < _MSG_CHUNK_SIZE:
return output.eval_js('({})(...{})'.format(js_function, serialized))
name = str(uuid.uuid4())
for i in range(0, len(serialized), _MSG_CHUNK_SIZE):
chunk = serialized[i:i + _MSG_CHUNK_SIZE]
output.eval_js(
"""window["{name}"] = (window["{name}"] || "") + atob("{b64_chunk}");
""".format(
name=name, b64_chunk=base64.b64encode(chunk.encode()).decode('ascii')),
ignore_result=True)
return output.eval_js("""
(function() {{
const msg = JSON.parse(window["{name}"]);
delete window["{name}"];
return ({js_function})(...msg);
}})();
""".format(name=name, js_function=js_function))
def _proxy(guid, msg):
"""Makes a proxy call on an element."""
template = _resources.get_data(__name__, 'js/_proxy.js')
if six.PY3:
# pkgutil.get_data returns bytes, but we want a str.
template = template.decode('utf8')
return _call_js_function(template, guid, msg)
def _exists(guid):
"""Checks if an element with the given guid exists."""
template = _resources.get_data(__name__, 'js/_proxy.js')
if six.PY3:
# pkgutil.get_data returns bytes, but we want a str.
template = template.decode('utf8')
return _call_js_function(template, guid, {'method': 'exists'}, False)
_utils_ref = None
def _utils_url():
"""Return the url to the utils script."""
global _utils_ref
if not _utils_ref:
src = _resources.get_data(__name__, 'js/_html.js')
if six.PY3:
# pkgutil.get_data returns bytes, but we want a str.
src = src.decode('utf8')
_utils_ref = _provide.create(content=src, extension='js')
return _utils_ref.url
_element_template = string.Template("""
$deps
<$tag id="$guid">
$children
</$tag>
<script>
(function() {
async function init() {
const name = '_google_colab_output_html';
let script = document.getElementById(name);
if (!script) {
script = document.createElement('script');
script.id = name;
script._is_loaded = new Promise((resolve, reject) => {
script.onload = resolve;
script.onerror = reject;
});
script.src = '$utils';
document.body.appendChild(script);
}
await script._is_loaded;
await window.google.colab.html._createElement($config);
}
window.google.colab.output.pauseOutputUntil(init());
})();
</script>
""")
class Element(object):
"""Create an object which will render as an html element in output cell."""
def __init__(self, tag, attributes=None, properties=None, src=None):
"""Initialize the element.
Args:
tag: Custom element tag name.
attributes: Initial attributes to set.
properties: Initial properties to set.
src: Entry point url of source for element. Should be a dict
containing one of the following keys script, html, module.
For example: {"script": "data:application/javascript;,"}
Raises:
ValueError: If invalid deps, attributes, or properites.
"""
if src:
if not ('script' in src or 'module' in src or 'html' in src):
raise ValueError('Must provide a valid src.')
self._src = src
if attributes and not isinstance(attributes, dict):
raise ValueError('attributes must be a dict.')
if properties and not isinstance(properties, dict):
raise ValueError('properties must be a dict.')
self._tag = tag
self._guid = str(uuid.uuid4())
self._attributes = attributes or {}
self._properties = properties or {}
self._children = []
self._js_listeners = {}
self._py_listeners = {}
self._parent = None
self._could_exist = False
def _exists(self):
if not self._could_exist:
return False
return _exists(self._guid)
def get_attribute(self, name):
if not self._exists():
return self._attributes.get(name)
return _proxy(self._guid, {'method': 'getAttribute', 'name': name})
def set_attribute(self, name, value):
if not isinstance(value, six.string_types):
raise ValueError('Attribute value must be a string')
if not self._exists():
self._attributes[name] = value
else:
_proxy(self._guid, {
'method': 'setAttribute',
'value': value,
'name': name
})
def get_property(self, name):
if not self._exists():
return self._properties.get(name)
return _proxy(self._guid, {'method': 'getProperty', 'name': name})
def set_property(self, name, value):
if not self._exists():
self._properties[name] = value
else:
_proxy(self._guid, {
'method': 'setProperty',
'value': value,
'name': name
})
def call(self, method, *args):
if not self._exists():
raise ValueError('Cannot call method on undisplayed element.')
return _proxy(self._guid, {'method': 'call', 'value': args, 'name': method})
def add_event_listener(self, name, callback):
"""Adds an event listener to the element.
Args:
name: Name of the event.
callback: The python function or js string to evaluate when event occurs.
Raises:
ValueError: If callback is not valid.
"""
msg = {'name': name}
if isinstance(callback, six.string_types):
callbacks = self._js_listeners.get(name, {})
if callback in callbacks:
raise ValueError('Callback is already added.')
callbacks[callback] = callback
self._js_listeners[name] = callbacks
msg['method'] = 'addJsEventListener'
msg['value'] = callback
elif callable(callback):
callbacks = self._py_listeners.get(name, {})
if callback in callbacks:
raise ValueError('Callback is already added.')
callback_name = str(uuid.uuid4())
output.register_callback(callback_name, callback)
callbacks[callback] = callback_name
self._py_listeners[name] = callbacks
msg['method'] = 'addPythonEventListener'
msg['value'] = callback_name
else:
raise ValueError('callback must be a js string or callable python')
if self._exists():
_proxy(self._guid, msg)
def remove_event_listener(self, name, callback):
"""Removes an event listener from the element.
Args:
name: String of the event.
callback: The callback passed into add_event_listener previously.
Raises:
ValueError: If the callback was not added previously.
"""
if isinstance(callback, six.string_types):
listener_map = self._js_listeners
else:
listener_map = self._py_listeners
if name not in listener_map:
raise ValueError('listener does not exist')
callbacks = listener_map[name]
if callback not in callbacks:
raise ValueError('listener does not exist')
callback_name = callbacks[callback]
del callbacks[callback]
if not callbacks:
del listener_map[name]
if self._exists():
_proxy(self._guid, {
'method': 'removeEventListener',
'name': name,
'value': callback_name
})
def append_child(self, child):
"""Append child to Element."""
# Child could be anything that can be converted to html.
if isinstance(child, Element):
child.remove()
child._parent = self # pylint: disable=protected-access
self._children.append(child)
def remove_child(self, child):
"""Remove child from Element."""
if isinstance(child, Element):
if child._parent != self: # pylint: disable=protected-access
raise ValueError('Child parent must match.')
child._parent = None # pylint: disable=protected-access
self._children = [c for c in self._children if c is not child]
def remove(self):
parent = self._parent
if not parent:
return
parent.remove_child(self)
def _repr_html_(self):
"""Converts element to HTML string."""
self._could_exist = True
deps = ''
if self._src:
if 'script' in self._src:
deps = '<script src="{}"></script>'.format(self._src['script'])
elif 'module' in self._src:
deps = '<script type="module">import "{}";</script>'.format(
self._src['module'])
elif 'html' in self._src:
deps = '<link rel="import" href="{}" />'.format(self._src['html'])
return _element_template.safe_substitute({
'tag':
self._tag,
'guid':
self._guid,
'deps':
deps,
'utils':
_utils_url(),
'children':
'\n'.join([_to_html_str(c) for c in self._children]),
'config':
json.dumps({
'tag': self._tag,
'guid': self._guid,
'attributes': self._attributes,
'properties': self._properties,
'js_listeners': {
k: list(v.values()) for k, v in self._js_listeners.items()
},
'py_listeners': {
k: list(v.values()) for k, v in self._py_listeners.items()
},
}),
})
|
WBT/PRE/testing.py | Hornbydd/WhiteboxTools-ArcGIS | 157 | 12720395 | import whitebox
import ast
import json
import os
import sys
wbt = whitebox.WhiteboxTools()
# wbt.set_verbose_mode(True)
# print(wbt.version())
# print(wbt.help())
# tools = wbt.list_tools(['dem'])
# for index, tool in enumerate(tools):
# print("{}. {}: {}".format(index, tool, tools[tool]))
# def get_tool_params(tool_name):
# out_str = wbt.tool_parameters(tool_name)
# start_index = out_str.index('[') + 1
# end_index = len(out_str.strip()) - 2
# params = out_str[start_index : end_index]
# print(params)
# sub_params = params.split('{"name"')
# param_list = []
# for param in sub_params:
# param = param.strip()
# if len(param) > 0:
# item = '"name"' + param
# item = item[ : item.rfind("}")].strip()
# param_list.append(item)
# params_dict = {}
# for item in param_list:
# print("{}\n".format(item))
# param_dict = {}
# index_name = item.find("name")
# index_flags = item.find("flags")
# index_description = item.find("description")
# index_parameter_type = item.find("parameter_type")
# index_default_value = item.find("default_value")
# index_optional = item.find("optional")
# name = item[index_name - 1 : index_flags - 2].replace('"name":', '')
# name = name.replace('"', '')
# param_dict['name'] = name
# flags = item[index_flags - 1 : index_description -2].replace('"flags":', '')
# if "--" in flags:
# flags = flags.split('--')[1][: -2]
# else:
# flags = flags.split('-')[1][: -2]
# param_dict['flags'] = flags
# desc = item[index_description - 1 : index_parameter_type - 2].replace('"description":', '')
# desc = desc.replace('"', '')
# param_dict['description'] = desc
# param_type = item[index_parameter_type - 1 : index_default_value - 2].replace('"parameter_type":', '')
# param_type = ast.literal_eval(param_type)
# param_dict['parameter_type'] = param_type
# default_value = item[index_default_value - 1 : index_optional - 2].replace('"default_value":', '')
# param_dict['default_value'] = default_value
# optional = item[index_optional - 1 :].replace('"optional":', '')
# param_dict['optional'] = optional
# params_dict[flags] = param_dict
# return params_dict
# tool_name = "BreachDepressions"
# print(wbt.tool_parameters(tool_name))
# params = get_tool_params(tool_name)
# print(params)
# print(params.keys())
# print(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# lines = wbt.list_tools()
# print(lines)
# # for line in lines:
# # print(line)
# print(len(lines))
# parameter_types = []
# for param in params:
# param_type = params[param]['parameter_type']
# if param_type not in parameter_types:
# parameter_types.append(param_type)
# print(parameter_types)
# thisset = {"apple", "banana", "cherry"}
# thisset.add("orange")
# print(thisset)
# tools = wbt.list_tools()
# for index, tool in enumerate(sorted(tools)):
# print("{}: {}".format(index, tool))
# dem = "/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/DEM.tif"
# output = "/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/output.tif"
# wbt.run_tool("BreachDepressions", '--dem=dem --output=output')
# exe_path = "/home/qiusheng/Downloads/WBT/whitebox_tools"
# cmd = exe_path + ' --run=BreachDepressions --dem="/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/DEM.tif" --output="/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/output.tif" -v'
# print(os.popen(cmd).read().rstrip())
# ret = wbt.breach_depressions(dem, output)
# print(ret)
# print(type(ret))
# def redirect_to_file(text):
# original = sys.stdout
# sys.stdout = open('/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/WBT/PRE/redirect.txt', 'w')
# # print('This is your redirected text:')
# # print(text)
# wbt.breach_depressions(dem, output)
# sys.stdout = original
# print('This string goes to stdout, NOT the file!')
# redirect_to_file('Python rocks!')
# https://goo.gl/bFo2tD
# import sys
# if sys.version_info < (3, 0):
# from StringIO import StringIO
# else:
# from io import StringIO
# old_stdout = sys.stdout
# result = StringIO()
# sys.stdout = result
# # wbt.breach_depressions(dem, output)
# # print("test string")
# sys.stdout = old_stdout
# result_string = result.getvalue()
# print(result_string)
# print('--dem="/path/to/DEM.tif" --output="/path/to/output.tif"') |
sdk/cwl/tests/federation/arvboxcwl/setup_user.py | rpatil524/arvados | 222 | 12720400 | <gh_stars>100-1000
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
import arvados
import arvados.errors
import time
import json
while True:
try:
api = arvados.api()
break
except arvados.errors.ApiError:
time.sleep(2)
existing = api.users().list(filters=[["email", "=", "<EMAIL>"],
["is_active", "=", True]], limit=1).execute()
if existing["items"]:
u = existing["items"][0]
else:
u = api.users().create(body={
'first_name': 'Test',
'last_name': 'User',
'email': '<EMAIL>',
'is_admin': False
}).execute()
api.users().activate(uuid=u["uuid"]).execute()
tok = api.api_client_authorizations().create(body={
"api_client_authorization": {
"owner_uuid": u["uuid"]
}
}).execute()
with open("cwl.output.json", "w") as f:
json.dump({
"test_user_uuid": u["uuid"],
"test_user_token": "v2/%s/%s" % (tok["uuid"], tok["api_token"])
}, f)
|
tools/report-converter/codechecker_report_converter/report.py | ryankurte/codechecker | 1,601 | 12720419 | <filename>tools/report-converter/codechecker_report_converter/report.py<gh_stars>1000+
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Parsers for the analyzer output formats (plist ...) should create this
Report which will be stored.
Multiple bug identification hash-es can be generated.
All hash generation algorithms should be documented and implemented here.
"""
import logging
LOG = logging.getLogger('ReportConverter')
def get_line(file_name, line_no, errors='ignore'):
""" Return the given line from the file.
If line_no is larger than the number of lines in the file then empty
string returns. If the file can't be opened for read, the function also
returns empty string.
Try to encode every file as utf-8 to read the line content do not depend
on the platform settings. By default locale.getpreferredencoding() is used
which depends on the platform.
Changing the encoding error handling can influence the hash content!
"""
try:
with open(file_name, mode='r',
encoding='utf-8',
errors=errors) as source_file:
for line in source_file:
line_no -= 1
if line_no == 0:
return line
return ''
except IOError:
LOG.error("Failed to open file %s", file_name)
return ''
def remove_whitespace(line_content, old_col):
""" Removes white spaces from the given line content.
This function removes white spaces from the line content parameter and
calculates the new line location.
Returns the line content without white spaces and the new column number.
E.g.:
line_content = " int foo = 17; sizeof(43); "
^
|- bug_col = 18
content_begin = " int foo = 17; "
content_begin_strip = "intfoo=17;"
line_strip_len = 18 - 10 => 8
''.join(line_content.split()) => "intfoo=17;sizeof(43);"
^
|- until_col - line_strip_len
18 - 8
= 10
"""
content_begin = line_content[:old_col]
content_begin_strip = ''.join(content_begin.split())
line_strip_len = len(content_begin) - len(content_begin_strip)
return ''.join(line_content.split()), \
old_col - line_strip_len
|
examples/pyplot/plot_hexcells.py | kclamar/vedo | 836 | 12720431 | """3D Bar plot of a TOF camera with hexagonal pixels"""
from vedo import *
import numpy as np
settings.defaultFont = "Glasgo"
settings.useParallelProjection = True
vals = np.abs(np.random.randn(4*6)) # pixel heights
cols = colorMap(vals, "summer")
k = 0
items = [__doc__]
for i in range(4):
for j in range(6):
val, col= vals[k], cols[k]
x, y, z = [i+j%2/2, j/1.155, val+0.01]
zbar= Polygon([x,y,0], nsides=6, r=0.55, c=col).extrude(val)
line= Polygon([x,y,z], nsides=6, r=0.55, c='k').wireframe().lw(2)
txt = Text3D(f"{i}/{j}", [x,y,z], s=.15, c='k', justify='center')
items += [zbar, line, txt]
k += 1
show(items, axes=7)
|
pybotters/models/bitflyer.py | maruuuui/pybotters | 176 | 12720440 | <filename>pybotters/models/bitflyer.py
from __future__ import annotations
import asyncio
import logging
import operator
from decimal import Decimal
from typing import Awaitable
import aiohttp
from ..store import DataStore, DataStoreManager
from ..typedefs import Item
from ..ws import ClientWebSocketResponse
logger = logging.getLogger(__name__)
class bitFlyerDataStore(DataStoreManager):
def _init(self) -> None:
self.create('board', datastore_class=Board)
self.create('ticker', datastore_class=Ticker)
self.create('executions', datastore_class=Executions)
self.create('childorderevents', datastore_class=ChildOrderEvents)
self.create('childorders', datastore_class=ChildOrders)
self.create('parentorderevents', datastore_class=ParentOrderEvents)
self.create('parentorders', datastore_class=ParentOrders)
self.create('positions', datastore_class=Positions)
self._snapshots = set()
async def initialize(self, *aws: Awaitable[aiohttp.ClientResponse]) -> None:
for f in asyncio.as_completed(aws):
resp = await f
data = await resp.json()
if resp.url.path == '/v1/me/getchildorders':
self.childorders._onresponse(data)
elif resp.url.path == '/v1/me/getparentorders':
self.parentorders._onresponse(data)
elif resp.url.path == '/v1/me/getpositions':
self.positions._onresponse(data)
def _onmessage(self, msg: Item, ws: ClientWebSocketResponse) -> None:
if 'error' in msg:
logger.warning(msg)
if 'params' in msg:
channel: str = msg['params']['channel']
message = msg['params']['message']
if channel.startswith('lightning_board_'):
if channel.startswith('lightning_board_snapshot_'):
asyncio.create_task(
ws.send_json(
{
'method': 'unsubscribe',
'params': {'channel': channel},
}
)
)
product_code = channel.replace('lightning_board_snapshot_', '')
self.board._delete(self.board.find({'product_code': product_code}))
self._snapshots.add(product_code)
else:
product_code = channel.replace('lightning_board_', '')
if product_code in self._snapshots:
self.board._onmessage(product_code, message)
elif channel.startswith('lightning_ticker_'):
self.ticker._onmessage(message)
elif channel.startswith('lightning_executions_'):
product_code = channel.replace('lightning_executions_', '')
self.executions._onmessage(product_code, message)
elif channel == 'child_order_events':
self.childorderevents._onmessage(message)
self.childorders._onmessage(message)
self.positions._onmessage(message)
elif channel == 'parent_order_events':
self.parentorderevents._onmessage(message)
self.parentorders._onmessage(message)
@property
def board(self) -> 'Board':
return self.get('board', Board)
@property
def ticker(self) -> 'Ticker':
return self.get('ticker', Ticker)
@property
def executions(self) -> 'Executions':
return self.get('executions', Executions)
@property
def childorderevents(self) -> 'ChildOrderEvents':
return self.get('childorderevents', ChildOrderEvents)
@property
def childorders(self) -> 'ChildOrders':
return self.get('childorders', ChildOrders)
@property
def parentorderevents(self) -> 'ParentOrderEvents':
return self.get('parentorderevents', ParentOrderEvents)
@property
def parentorders(self) -> 'ParentOrders':
return self.get('parentorders', ParentOrders)
@property
def positions(self) -> 'Positions':
return self.get('positions', Positions)
class Board(DataStore):
_KEYS = ['product_code', 'side', 'price']
def _init(self) -> None:
self.mid_price: dict[str, float] = {}
def sorted(self, query: Item = None) -> dict[str, list[Item]]:
if query is None:
query = {}
result = {'SELL': [], 'BUY': []}
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result[item['side']].append(item)
result['SELL'].sort(key=lambda x: x['price'])
result['BUY'].sort(key=lambda x: x['price'], reverse=True)
return result
def _onmessage(self, product_code: str, message: Item) -> None:
self.mid_price[product_code] = message['mid_price']
for key, side in (('bids', 'BUY'), ('asks', 'SELL')):
for item in message[key]:
if item['size']:
self._insert([{'product_code': product_code, 'side': side, **item}])
else:
self._delete([{'product_code': product_code, 'side': side, **item}])
board = self.sorted({'product_code': product_code})
targets = []
for side, ope in (('BUY', operator.le), ('SELL', operator.gt)):
for item in board[side]:
if ope(item['price'], message['mid_price']):
break
else:
targets.append(item)
self._delete(targets)
class Ticker(DataStore):
_KEYS = ['product_code']
def _onmessage(self, message: Item) -> None:
self._update([message])
class Executions(DataStore):
_MAXLEN = 99999
def _onmessage(self, product_code: str, message: list[Item]) -> None:
for item in message:
self._insert([{'product_code': product_code, **item}])
class ChildOrderEvents(DataStore):
def _onmessage(self, message: list[Item]) -> None:
self._insert(message)
class ParentOrderEvents(DataStore):
def _onmessage(self, message: list[Item]) -> None:
self._insert(message)
class ChildOrders(DataStore):
_KEYS = ['child_order_acceptance_id']
def _onresponse(self, data: list[Item]) -> None:
if data:
self._delete(self.find({'product_code': data[0]['product_code']}))
for item in data:
if item['child_order_state'] == 'ACTIVE':
self._insert([item])
def _onmessage(self, message: list[Item]) -> None:
for item in message:
if item['event_type'] == 'ORDER':
self._insert([item])
elif item['event_type'] in ('CANCEL', 'EXPIRE'):
self._delete([item])
elif item['event_type'] == 'EXECUTION':
if item['outstanding_size']:
childorder = self.get(item)
if childorder:
if isinstance(childorder['size'], int) and isinstance(
item['size'], int
):
childorder['size'] -= item['size']
else:
childorder['size'] = float(
Decimal(str(childorder['size']))
- Decimal(str(item['size']))
)
else:
self._delete([item])
class ParentOrders(DataStore):
_KEYS = ['parent_order_acceptance_id']
def _onresponse(self, data: list[Item]) -> None:
if data:
self._delete(self.find({'product_code': data[0]['product_code']}))
for item in data:
if item['parent_order_state'] == 'ACTIVE':
self._insert([item])
def _onmessage(self, message: list[Item]) -> None:
for item in message:
if item['event_type'] == 'ORDER':
self._insert([item])
elif item['event_type'] in ('CANCEL', 'EXPIRE'):
self._delete([item])
elif item['event_type'] == 'COMPLETE':
parentorder = self.get(item)
if parentorder:
if parentorder['parent_order_type'] in ('IFD', 'IFDOCO'):
if item['parameter_index'] >= 2:
self._delete([item])
else:
self._delete([item])
class Positions(DataStore):
_COMMON_KEYS = [
'product_code',
'side',
'price',
'size',
'commission',
'sfd',
]
def _common_keys(self, item: Item) -> Item:
return {key: item[key] for key in self._COMMON_KEYS}
def _onresponse(self, data: list[Item]) -> None:
if data:
self._delete(self.find({'product_code': data[0]['product_code']}))
for item in data:
self._insert([self._common_keys(item)])
def _onmessage(self, message: list[Item]) -> None:
for item in message:
if item['event_type'] == 'EXECUTION':
positions = self._find_with_uuid({'product_code': item['product_code']})
if positions:
if positions[next(iter(positions))]['side'] == item['side']:
self._insert([self._common_keys(item)])
else:
for uid, pos in positions.items():
if pos['size'] > item['size']:
if isinstance(pos['size'], int) and isinstance(
item['size'], int
):
pos['size'] -= item['size']
else:
pos['size'] = float(
Decimal(str(pos['size']))
- Decimal(str(item['size']))
)
break
else:
if isinstance(pos['size'], int) and isinstance(
item['size'], int
):
item['size'] -= pos['size']
else:
item['size'] = float(
Decimal(str(item['size']))
- Decimal(str(pos['size']))
)
self._remove([uid])
if not pos['size']:
break
else:
try:
self._insert([self._common_keys(item)])
except KeyError:
pass
|
research/active_learning/experiments/visualize_feature_maps.py | dnarqq/WildHack | 402 | 12720449 | <reponame>dnarqq/WildHack<gh_stars>100-1000
import argparse, random, sys, time
import PIL
import torch
import torchvision.models as models
import matplotlib.pyplot as plt
from torchvision.transforms import *
sys.path.append("..")
from DL.utils import *
from DL.networks import *
from Database.DB_models import *
from DL.sqlite_data_loader import SQLDataLoader
# class SaveFeatures():
# def __init__(self, module):
# self.hook = module.register_forward_hook(self.hook_fn)
# def hook_fn(self, module, input, output):
# self.features = torch.tensor(output, requires_grad=True).cuda()
# def close(self):
# self.hook.remove()
outputs = []
def hook(module, input, output):
outputs.append(output)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--db_name', default='missouricameratraps', type=str, help='Name of the training (target) data Postgres DB.')
parser.add_argument('--db_user', default='user', type=str, help='Name of the user accessing the Postgres DB.')
parser.add_argument('--db_password', default='password', type=str, help='Password of the user accessing the Postgres DB.')
parser.add_argument('--num', default=1000, type=int, help='Number of samples to draw from dataset to get embedding features.')
parser.add_argument('--crop_dir', type=str, help='Path to directory with cropped images to get embedding features for.')
parser.add_argument('--base_model', type=str, help='Path to latest embedding model checkpoint.')
parser.add_argument('--random_seed', default=1234, type=int, help='Random seed to get same samples from database.')
args = parser.parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
BASE_MODEL = args.base_model
# Load the saved embedding model from the checkpoint
checkpoint = load_checkpoint(BASE_MODEL)
if checkpoint['loss_type'].lower() == 'center' or checkpoint['loss_type'].lower() == 'softmax':
embedding_net = SoftmaxNet(checkpoint['arch'], checkpoint['feat_dim'], checkpoint['num_classes'], False)
else:
embedding_net = NormalizedEmbeddingNet(checkpoint['arch'], checkpoint['feat_dim'], False)
model = torch.nn.DataParallel(embedding_net).cuda()
model.load_state_dict(checkpoint['state_dict'])
model.eval()
# Get a sample from the database, with eval transforms applied, etc.
DB_NAME = args.db_name
USER = args.db_user
PASSWORD = <PASSWORD>
# Connect to database and sample a dataset
target_db = PostgresqlDatabase(DB_NAME, user=USER, password=PASSWORD, host='localhost')
target_db.connect(reuse_if_open=True)
db_proxy.initialize(target_db)
dataset_query = Detection.select(Detection.image_id, Oracle.label, Detection.kind).join(Oracle).limit(args.num)
dataset = SQLDataLoader(args.crop_dir, query=dataset_query, is_training=False, kind=DetectionKind.ModelDetection.value, num_workers=8, limit=args.num)
imagepaths = dataset.getallpaths()
sample_image_path = '0ca68a6f-6348-4456-8fb5-c067e2cbfe14'#'0a170ee9-166d-45df-8f45-b14550fc124e'#'43e3d2a6-38ea-4d17-a712-1b0feab92d58'#'0ef0f79f-7b58-473d-abbf-75bba59e834d'
dataset.image_mode()
sample_image = dataset.loader(sample_image_path)
sample_image.save('sample_image_for_activations.png')
print(sample_image.size)
sample_image = dataset.eval_transform(sample_image)
print(sample_image.shape)
# output = model.forward(sample_image.unsqueeze(0))
# print(output)
model_inner_resnet = list(model.children())[0].inner_model
model_inner_resnet.eval()
model_inner_resnet.layer1[0].conv2.register_forward_hook(hook)
output = model.forward(sample_image.unsqueeze(0))
intermediate_output = outputs[0].cpu().detach().numpy()
print(intermediate_output.shape)
for i in range(intermediate_output.shape[1]):
plt.subplot(8,8,i+1)
plt.imshow(intermediate_output[0,i,:,:], cmap='viridis')
plt.axis('off')
plt.suptitle('ResNet Layer1 Conv2 Activations')
plt.savefig('temp.png')
# with torch.no_grad():
# sample_image_input = sample_image.cuda(non_blocking=True)
# _, output = model(sample_image_input) # compute output
# print(output)
# sample_image = PILImage.open(sample_image_path).convert('RGB')
# sample_image = transforms.Compose([Resize([256, 256]), CenterCrop(([[224,224]])), ToTensor(), Normalize([0.369875, 0.388726, 0.347536], [0.136821, 0.143952, 0.145229])])(sample_image)
# print(list(model_inner_resnet.children()))
# print(model_inner_resnet.fc)
# print(model_inner_resnet.fc0)
# # print(model_inner_resnet.layer4[0].conv2)
# # print(type(model))
# # print(len(list(model_inner_resnet.children())))
# # print(list(model.children()))
# # print(list(list(model.children())[0].children()))
# img = np.uint8(np.random.uniform(150, 180, (56, 56, 3)))/255
# img_tensor = torch.unsqueeze(torch.from_numpy(img), 0)
# full_out = model_inner_resnet.forward(img_tensor)
# print(full_out)
# model(img_tensor)
# activations = SaveFeatures(model_inner_resnet.layer4[0].conv2)
# print(activations.features)
# print(type(activations.features))
# activations.close()
if __name__=='__main__':
main() |
src/vbLib/ExecuteCMDSync.py | mehrdad-shokri/macro_pack | 1,550 | 12720452 |
VBA = \
r"""
Function ExecuteCmdSync(targetPath As String)
'Run a shell command, returning the output as a string'
' Using a hidden window, pipe the output of the command to the CLIP.EXE utility...
' Necessary because normal usage with oShell.Exec("cmd.exe /C " & sCmd) always pops a windows
Dim instruction As String
instruction = "cmd.exe /c " & targetPath & " | clip"
On Error Resume Next
Err.Clear
CreateObject("WScript.Shell").Run instruction, 0, True
On Error Goto 0
' Read the clipboard text using htmlfile object
ExecuteCmdSync = CreateObject("htmlfile").ParentWindow.ClipboardData.GetData("text")
End Function
"""
|
tf_siren/siren_mlp.py | jackietung1219/tf_SIREN | 134 | 12720464 | import tensorflow as tf
from tf_siren import siren
class SIRENModel(tf.keras.Model):
def __init__(self, units: int, final_units: int,
final_activation: str = "linear",
num_layers: int = 1,
w0: float = 30.0,
w0_initial: float = 30.0,
initial_layer_init: str = 'siren_first_uniform',
use_bias: bool = True, **kwargs):
"""
SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661).
Used to create a multi-layer MLP using SinusodialRepresentationDense layers.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`.
w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper).
initial_layer_init: Initialization for the first SIREN layer.
Can be any valid keras initialization object or string.
For SIREN, use `siren_uniform` for the general initialization,
or `siren_first_uniform` which is specific for first layer.
use_bias: Boolean whether to use bias or not.
# References:
- [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661)
"""
super(SIRENModel, self).__init__(**kwargs)
siren_layers = [siren.SinusodialRepresentationDense(units, w0=w0_initial, use_bias=use_bias,
kernel_initializer=initial_layer_init,
**kwargs)]
for _ in range(num_layers - 1):
siren_layers.append(siren.SinusodialRepresentationDense(units, w0=w0, use_bias=use_bias, **kwargs))
self.siren_layers = tf.keras.Sequential(siren_layers)
self.final_dense = siren.SinusodialRepresentationDense(final_units, activation=final_activation,
use_bias=use_bias, **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.siren_layers(inputs)
output = self.final_dense(features)
return output
class ScaledSIRENModel(tf.keras.Model):
def __init__(self, units: int, final_units: int,
final_activation: str = "linear",
num_layers: int = 1,
w0: float = 30.0,
w0_initial: float = 30.0,
scale: float = 1.0,
scale_initial: float = None,
initial_layer_init: str = 'siren_first_uniform',
use_bias: bool = True, **kwargs):
"""
Scaled SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661).
Used to create a multi-layer MLP using ScaledSinusodialRepresentationDense layers.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`.
w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper).
scale: Scale of the kernel matrix prior to matmul.
scale_initial: Scale of the kernel matrix prior to matmul, for the first layer.
By default, uses the `w0_initial` value if not passed a value.
initial_layer_init: Initialization for the first SIREN layer.
Can be any valid keras initialization object or string.
For SIREN, use `siren_uniform` for the general initialization,
or `siren_first_uniform` which is specific for first layer.
use_bias: Boolean whether to use bias or not.
# References:
- [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661)
"""
super(ScaledSIRENModel, self).__init__(**kwargs)
if scale_initial is None:
scale_initial = w0_initial
siren_layers = [siren.ScaledSinusodialRepresentationDense(units, scale=scale_initial, w0=w0_initial,
use_bias=use_bias,
kernel_initializer=initial_layer_init,
**kwargs)]
for _ in range(num_layers - 1):
siren_layers.append(siren.ScaledSinusodialRepresentationDense(units, scale=scale, w0=w0, use_bias=use_bias,
**kwargs))
self.siren_layers = tf.keras.Sequential(siren_layers)
self.final_dense = siren.ScaledSinusodialRepresentationDense(final_units, scale=scale,
activation=final_activation,
use_bias=use_bias, **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.siren_layers(inputs)
output = self.final_dense(features)
return output
|
alg/asac/Predictor_G.py | loramf/mlforhealthlabpub | 171 | 12720471 | '''
ASAC (Active Sensing using Actor-Critic Model) (12/18/2018)
Prediction Function only with Selected Samples
'''
#%% Necessary packages
import tensorflow as tf
#%% Prediction Function
'''
Inputs:
- trainX, train Y (training set)
- testX: testing features
- trainG: mask vector for selected training samples
- trainG: mask vector for selected testing samples
Outputs:
- Prediction results on testing set
'''
def Predictor_G (trainX, testX, trainY, trainG, testG, iterations=5001):
# Initialization on the Graph
tf.reset_default_graph()
#%% Preprocessing
Train_No = len(trainY)
Test_No = len(trainY)
New_trainX = list()
for i in range(Train_No):
Temp = trainX[i]
Temp = Temp * trainG[i]
New_trainX.append(Temp)
New_testX = list()
for i in range(Test_No):
Temp = testX[i]
Temp = Temp * testG[i]
New_testX.append(Temp)
#%% Network Parameters
seq_length = len(New_trainX[0][:,0])
data_dim = len(New_trainX[0][0,:])
hidden_dim = 5
output_dim = 1
learning_rate = 0.01
#%% Network Build
# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, seq_length])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(outputs, output_dim, activation_fn=None) # We use the last cell's output
# cost/loss
loss = tf.reduce_sum(tf.square(tf.reshape(Y_pred, [-1,seq_length]) - Y)) # sum of the squares
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
#%% Sessions
sess = tf.Session()
# Initialization
sess.run(tf.global_variables_initializer())
#%% Training step
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={X: New_trainX, Y: trainY})
if i % 100 == 0:
print("[step: {}] loss: {}".format(i, step_loss))
# Test step
test_predict = sess.run(Y_pred, feed_dict={X: New_testX})
#%% Output
Final = list()
for i in range(len(testX)):
Final.append(test_predict[i,:,0])
return Final
|
utils/gap_configs/python/ips/cluster/l1_interleaver.py | 00-01/gap_sdk | 118 | 12720474 | #
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gsystree as st
class L1_interleaver(st.Component):
def __init__(self, parent, slave, nb_slaves=0, nb_masters=0, stage_bits=0, interleaving_bits=2):
super(L1_interleaver, self).__init__(parent, slave)
self.add_properties({
'vp_component': 'pulp.cluster.l1_interleaver_impl',
'nb_slaves': nb_slaves,
'nb_masters': nb_masters,
'stage_bits': stage_bits,
'interleaving_bits': interleaving_bits
})
|
tests/test_provider_hashicorp_time.py | mjuenema/python-terrascript | 507 | 12720505 | <filename>tests/test_provider_hashicorp_time.py
# tests/test_provider_hashicorp_time.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:28:43 UTC)
def test_provider_import():
import terrascript.provider.hashicorp.time
def test_resource_import():
from terrascript.resource.hashicorp.time import time_offset
from terrascript.resource.hashicorp.time import time_rotating
from terrascript.resource.hashicorp.time import time_sleep
from terrascript.resource.hashicorp.time import time_static
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.time
#
# t = terrascript.provider.hashicorp.time.time()
# s = str(t)
#
# assert 'https://github.com/hashicorp/terraform-provider-time' in s
# assert '0.7.2' in s
|
tests/python/correctness/line_properties.py | sid17/weaver | 163 | 12720516 | <reponame>sid17/weaver
#! /usr/bin/env python
#
# ===============================================================
# Description: Basic test for node/edge properties API.
#
# Created: 2013-12-17 14:50:18
#
# Author: <NAME>, <EMAIL>
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import sys
import time
try:
import weaver.client as client
except ImportError:
import client
config_file=''
if len(sys.argv) > 1:
config_file = sys.argv[1]
# creating line graph
nodes = []
edges = []
num_nodes = 400
coord_id = 0
c = client.Client('127.0.0.1', 2002, config_file)
def line_requests(eprops, exp_reach):
num_reach = 0
rp = client.ReachParams(dest=nodes[num_nodes-1], edge_props=eprops)
timer = time.time()
for i in range(num_nodes-1):
prog_args = [(nodes[i], rp)]
response = c.run_reach_program(prog_args)
#print 'done line req'
if response.reachable:
num_reach += 1
if exp_reach == 0:
print 'bad reach'
timer = time.time() - timer
assert (num_reach == exp_reach), 'expected reachable ' + str(exp_reach) + ', actually reachable ' + str(num_reach)
c.begin_tx()
for i in range(num_nodes):
nodes.append(c.create_node())
c.end_tx()
c.begin_tx()
for i in range(num_nodes-1):
edges.append(c.create_edge(nodes[i], nodes[i+1]))
c.end_tx()
node_count = c.get_node_count()
print 'Node count:'
for cnt in node_count:
print str(cnt)
#dummy = raw_input('press a key when shard is killed ')
print 'Now testing without edge props'
line_requests([], num_nodes-1)
print 'Done testing without edge props'
print 'Now testing with edge props'
line_requests([('color','blue')], 0)
print 'Done testing with edge props'
# adding edge props
c.begin_tx()
for i in range(num_nodes-1):
c.set_edge_property(edges[i], 'color', 'blue', nodes[i])
c.end_tx()
print 'Now testing without edge props'
line_requests([], num_nodes-1)
print 'Now testing with edge props'
line_requests([('color','blue')], num_nodes-1)
print 'Now testing with WRONG edge props'
line_requests([('color','abcd')], 0)
print 'Now testing with WRONG edge props'
line_requests([('color','abc')], 0)
print 'Pass line_properties.'
|
src/mceditlib/operations/analyze.py | elcarrion06/mcedit2 | 673 | 12720518 | <gh_stars>100-1000
"""
analyze.py
Get counts of blocks, entities, and tile entities in a selection.
"""
from __future__ import absolute_import
import logging
import numpy
from collections import defaultdict
from mceditlib.operations import Operation
log = logging.getLogger(__name__)
class AnalyzeOperation(Operation):
def __init__(self, dimension, selection):
"""
Analyze all blocks in a selection and return counts of block types, entity IDs and tile entity IDs.
Counts are returned in `self.blocks`, `self.entityCounts` and `self.tileEntityCounts`
:type dimension: WorldEditorDimension
:type selection: `~.BoundingBox`
"""
super(AnalyzeOperation, self).__init__(dimension, selection)
self.createSections = False
self.blocks = numpy.zeros(65536, dtype='intp')
self.selection = selection
self.entityCounts = defaultdict(int)
self.tileEntityCounts = defaultdict(int)
self.skipped = 0
self.sections = 0
log.info("Analyzing %s blocks", selection.volume)
def done(self):
log.info(u"Analyze: Skipped {0}/{1} sections".format(self.skipped, self.sections))
def operateOnChunk(self, chunk):
cx, cz = chunk.cx, chunk.cz
for cy in chunk.bounds.sectionPositions(cx, cz):
section = chunk.getSection(cy, create=False)
if section is None:
continue
self.sections += 1
sectionMask = self.selection.section_mask(cx, cy, cz)
if sectionMask is None:
self.skipped += 1
continue
maskSize = sectionMask.sum()
if maskSize == 0:
self.skipped += 1
continue
blocks = numpy.array(section.Blocks[sectionMask], dtype='uint16')
blocks |= (numpy.array(section.Data[sectionMask], dtype='uint16') << 12)
b = numpy.bincount(blocks.ravel())
self.blocks[:b.shape[0]] += b
for ref in chunk.Entities:
if ref.Position in self.selection:
self.entityCounts[ref.id] += 1
for ref in chunk.TileEntities:
if ref.Position in self.selection:
self.tileEntityCounts[ref.id] += 1
|
pyvips/vtargetcustom.py | mrpal39/pyvips | 142 | 12720524 | <filename>pyvips/vtargetcustom.py
from __future__ import division
import logging
import pyvips
from pyvips import ffi, vips_lib
logger = logging.getLogger(__name__)
class TargetCustom(pyvips.Target):
"""An output target you can connect action signals to to implement
behaviour.
"""
def __init__(self):
"""Make a new target you can customise.
You can pass this target to (for example) :meth:`write_to_target`.
"""
target = ffi.cast('VipsTarget*', vips_lib.vips_target_custom_new())
super(TargetCustom, self).__init__(target)
def on_write(self, handler):
"""Attach a write handler.
The interface is exactly as io.write(). The handler is given a
bytes-like object to write, and should return the number of bytes
written.
"""
def interface_handler(buf):
bytes_written = handler(buf)
# py2 will often return None for bytes_written ... replace with
# the length of the string
if bytes_written is None:
bytes_written = len(buf)
return bytes_written
self.signal_connect("write", interface_handler)
def on_finish(self, handler):
"""Attach a finish handler.
This optional handler is called at the end of write. It should do any
cleaning up necessary.
"""
self.signal_connect("finish", handler)
__all__ = ['TargetCustom']
|
test/programytest/storage/stores/nosql/mongo/store/test_oobs.py | cdoebler1/AIML2 | 345 | 12720542 | import unittest
from unittest.mock import patch
import programytest.storage.engines as Engines
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine
from programy.storage.stores.nosql.mongo.store.oobs import MongoOOBStore
from programytest.storage.asserts.store.assert_oobs import OOBsStoreAsserts
class MongoOOBStoreTests(OOBsStoreAsserts):
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_initialise(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_load_oobs(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assert_load(store)
@staticmethod
def patch_instantiate_class(class_string):
raise Exception("Mock Exception")
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
@patch("programy.utils.classes.loader.ClassLoader.instantiate_class", patch_instantiate_class)
def test_load_oobs_exception(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assert_load_exception(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assert_upload_from_file(store, verbose=False)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_file_verbose(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assert_upload_from_file(store, verbose=True)
def patch_load_oobs_from_file(self, filename, verbose):
raise Exception("Mock Exception")
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
@patch("programy.storage.stores.nosql.mongo.store.oobs.MongoOOBStore._load_oobs_from_file", patch_load_oobs_from_file)
def test_upload_from_file_exception(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoOOBStore(engine)
self.assert_upload_from_file_exception(store)
|
evennia/help/models.py | Jaykingamez/evennia | 1,544 | 12720555 | """
Models for the help system.
The database-tied help system is only half of Evennia's help
functionality, the other one being the auto-generated command help
that is created on the fly from each command's `__doc__` string. The
persistent database system defined here is intended for all other
forms of help that do not concern commands, like information about the
game world, policy info, rules and similar.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from evennia.utils.idmapper.models import SharedMemoryModel
from evennia.help.manager import HelpEntryManager
from evennia.typeclasses.models import Tag, TagHandler, AliasHandler
from evennia.locks.lockhandler import LockHandler
from evennia.utils.utils import lazy_property
__all__ = ("HelpEntry",)
# ------------------------------------------------------------
#
# HelpEntry
#
# ------------------------------------------------------------
class HelpEntry(SharedMemoryModel):
"""
A generic help entry.
An HelpEntry object has the following properties defined:
key - main name of entry
help_category - which category entry belongs to (defaults to General)
entrytext - the actual help text
permissions - perm strings
Method:
access
"""
#
# HelpEntry Database Model setup
#
#
# These database fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
# title of the help entry
db_key = models.CharField(
"help key", max_length=255, unique=True, help_text="key to search for"
)
# help category
db_help_category = models.CharField(
"help category",
max_length=255,
default="General",
help_text="organizes help entries in lists",
)
# the actual help entry text, in any formatting.
db_entrytext = models.TextField(
"help entry", blank=True, help_text="the main body of help text"
)
# lock string storage
db_lock_storage = models.TextField("locks", blank=True, help_text="normally view:all().")
# tags are primarily used for permissions
db_tags = models.ManyToManyField(
Tag,
blank=True,
help_text="tags on this object. Tags are simple string markers to identify, group and alias objects.",
)
# (deprecated, only here to allow MUX helpfile load (don't use otherwise)).
# TODO: remove this when not needed anymore.
db_staff_only = models.BooleanField(default=False)
# Database manager
objects = HelpEntryManager()
_is_deleted = False
# lazy-loaded handlers
@lazy_property
def locks(self):
return LockHandler(self)
@lazy_property
def tags(self):
return TagHandler(self)
@lazy_property
def aliases(self):
return AliasHandler(self)
class Meta(object):
"Define Django meta options"
verbose_name = "Help Entry"
verbose_name_plural = "Help Entries"
#
#
# HelpEntry main class methods
#
#
def __str__(self):
return self.key
def __repr__(self):
return "%s" % self.key
def access(self, accessing_obj, access_type="read", default=False):
"""
Determines if another object has permission to access.
accessing_obj - object trying to access this one
access_type - type of access sought
default - what to return if no lock of access_type was found
"""
return self.locks.check(accessing_obj, access_type=access_type, default=default)
#
# Web/Django methods
#
def web_get_admin_url(self):
"""
Returns the URI path for the Django Admin page for this object.
ex. Account#1 = '/admin/accounts/accountdb/1/change/'
Returns:
path (str): URI path to Django Admin page for object.
"""
content_type = ContentType.objects.get_for_model(self.__class__)
return reverse(
"admin:%s_%s_change" % (content_type.app_label, content_type.model), args=(self.id,)
)
@classmethod
def web_get_create_url(cls):
"""
Returns the URI path for a View that allows users to create new
instances of this object.
ex. Chargen = '/characters/create/'
For this to work, the developer must have defined a named view somewhere
in urls.py that follows the format 'modelname-action', so in this case
a named view of 'character-create' would be referenced by this method.
ex.
url(r'characters/create/', ChargenView.as_view(), name='character-create')
If no View has been created and defined in urls.py, returns an
HTML anchor.
This method is naive and simply returns a path. Securing access to
the actual view and limiting who can create new objects is the
developer's responsibility.
Returns:
path (str): URI path to object creation page, if defined.
"""
try:
return reverse("%s-create" % slugify(cls._meta.verbose_name))
except:
return "#"
def web_get_detail_url(self):
"""
Returns the URI path for a View that allows users to view details for
this object.
ex. Oscar (Character) = '/characters/oscar/1/'
For this to work, the developer must have defined a named view somewhere
in urls.py that follows the format 'modelname-action', so in this case
a named view of 'character-detail' would be referenced by this method.
ex.
url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/$',
CharDetailView.as_view(), name='character-detail')
If no View has been created and defined in urls.py, returns an
HTML anchor.
This method is naive and simply returns a path. Securing access to
the actual view and limiting who can view this object is the developer's
responsibility.
Returns:
path (str): URI path to object detail page, if defined.
"""
try:
return reverse(
"%s-detail" % slugify(self._meta.verbose_name),
kwargs={"category": slugify(self.db_help_category), "topic": slugify(self.db_key)},
)
except Exception as e:
print(e)
return "#"
def web_get_update_url(self):
"""
Returns the URI path for a View that allows users to update this
object.
ex. Oscar (Character) = '/characters/oscar/1/change/'
For this to work, the developer must have defined a named view somewhere
in urls.py that follows the format 'modelname-action', so in this case
a named view of 'character-update' would be referenced by this method.
ex.
url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/change/$',
CharUpdateView.as_view(), name='character-update')
If no View has been created and defined in urls.py, returns an
HTML anchor.
This method is naive and simply returns a path. Securing access to
the actual view and limiting who can modify objects is the developer's
responsibility.
Returns:
path (str): URI path to object update page, if defined.
"""
try:
return reverse(
"%s-update" % slugify(self._meta.verbose_name),
kwargs={"category": slugify(self.db_help_category), "topic": slugify(self.db_key)},
)
except:
return "#"
def web_get_delete_url(self):
"""
Returns the URI path for a View that allows users to delete this object.
ex. Oscar (Character) = '/characters/oscar/1/delete/'
For this to work, the developer must have defined a named view somewhere
in urls.py that follows the format 'modelname-action', so in this case
a named view of 'character-detail' would be referenced by this method.
ex.
url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/delete/$',
CharDeleteView.as_view(), name='character-delete')
If no View has been created and defined in urls.py, returns an
HTML anchor.
This method is naive and simply returns a path. Securing access to
the actual view and limiting who can delete this object is the developer's
responsibility.
Returns:
path (str): URI path to object deletion page, if defined.
"""
try:
return reverse(
"%s-delete" % slugify(self._meta.verbose_name),
kwargs={"category": slugify(self.db_help_category), "topic": slugify(self.db_key)},
)
except:
return "#"
# Used by Django Sites/Admin
get_absolute_url = web_get_detail_url
|
api_tests/requests/views/test_request_action_list.py | gaybro8777/osf.io | 628 | 12720576 | import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.enable_quickfiles_creation
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_nonadmin_nonrequester_cannot_view(self, app, noncontrib, write_contrib, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
def test_admin_can_view(self, app, admin, pre_request, post_request, none_request, auto_approved_pre_request):
for request in [pre_request, post_request, none_request]:
res = app.get(self.url(request), auth=admin.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=admin.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
def test_nonadmin_requester_can_view(self, app, requester, nonadmin_pre_request, nonadmin_post_request, nonadmin_none_request, nonadmin_auto_approved_pre_request):
for request in [nonadmin_pre_request, nonadmin_post_request, nonadmin_none_request]:
res = app.get(self.url(request), auth=requester.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(nonadmin_auto_approved_pre_request), auth=requester.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
packages/core/minos-microservice-common/tests/test_common/test_database/test_locks.py | minos-framework/minos-python | 247 | 12720591 | import unittest
from minos.common import (
DatabaseLock,
Lock,
)
from minos.common.testing import (
MockedDatabaseClient,
)
class TestDatabaseLock(unittest.IsolatedAsyncioTestCase):
def test_base(self):
self.assertTrue(issubclass(DatabaseLock, Lock))
async def test_client(self):
client = MockedDatabaseClient()
lock = DatabaseLock(client, "foo")
self.assertEqual(client, lock.client)
async def test_key(self):
client = MockedDatabaseClient()
lock = DatabaseLock(client, "foo")
self.assertEqual("foo", lock.key)
async def test_key_raises(self):
client = MockedDatabaseClient()
with self.assertRaises(ValueError):
DatabaseLock(client, [])
async def test_hashed_key(self):
client = MockedDatabaseClient()
lock = DatabaseLock(client, "foo")
self.assertEqual(hash("foo"), lock.hashed_key)
if __name__ == "__main__":
unittest.main()
|
tests/test_coder.py | ohduran/ring | 450 | 12720595 | import sys
import ring
from ring.coder import (
Registry, Coder, JsonCoder, coderize, registry as default_registry)
import pytest
def test_coder_registry():
registry = Registry()
error_coder = None, None
registry.register('_error', error_coder)
assert registry.get('_error') == (None, None)
tuple_coder = lambda x: x, lambda x: x # noqa
registry.register('tuple', tuple_coder)
class NewStaticCoder(Coder):
@staticmethod
def encode(d):
return d
@staticmethod
def decode(d):
return d
registry.register('new_static', NewStaticCoder)
registry.register('new_static_obj', NewStaticCoder())
class NewCoder(Coder):
def encode(self, x):
return x
def decode(self, x):
return x
registry.register('new_obj', NewCoder())
def test_coder_json():
coder = default_registry.get('json')
assert b'1' == coder.encode(1)
assert 1 == coder.decode(b'1')
assert b'{"x": 1}' == coder.encode({'x': 1})
assert {'x': 1} == coder.decode(b'{"x": 1}')
def test_coder_pickle():
import memcache
import datetime
coder = default_registry.get('pickle')
mc = memcache.Client(['127.0.0.1:11211'])
@ring.memcache(mc, coder='pickle')
def now():
return datetime.datetime.now()
now.delete()
dt_now = now()
direct_data = mc.get(now.key())
assert direct_data
encoded_data = coder.encode(dt_now)
assert encoded_data == direct_data
decoded_data = coder.decode(encoded_data)
assert decoded_data == dt_now
def test_ring_bare_coder():
@ring.dict({}, coder=JsonCoder)
def f():
return 10
assert f() == 10
if sys.version_info >= (3, 7):
from tests._test_module_py37 import DataClass
def test_dataclass_coder():
coder = default_registry.get('dataclass')
dataclass = DataClass('name', 1, {'test': 1})
encoded_dataclass = coder.encode(dataclass)
assert b'["DataClass", {"name": "name", "my_int": 1, "my_dict": {"test": 1}}]' == encoded_dataclass
decoded_dataclass = coder.decode(encoded_dataclass)
assert 'DataClass' == type(decoded_dataclass).__name__
assert decoded_dataclass.name == 'name'
assert decoded_dataclass.my_int == 1
assert decoded_dataclass.my_dict == {'test': 1}
def test_unexisting_coder():
cache = {}
with pytest.raises(TypeError):
@ring.dict(cache, coder='messed-up')
def f():
pass
@pytest.mark.parametrize('raw_coder', [
JsonCoder,
])
def test_coderize(raw_coder):
assert raw_coder
assert isinstance(coderize(raw_coder), Coder)
def test_invalid_coderize():
with pytest.raises(TypeError):
coderize(1)
|
src/orders/migrations/0011_NotificationToGiverIsSent.py | denkasyanov/education-backend | 151 | 12720604 | # Generated by Django 3.1.4 on 2020-12-31 21:27
from django.db import migrations, models
def mark_giver_notification_as_sent_for_all_previous_orders(apps, schema_editor):
apps.get_model('orders.Order').objects.filter(giver__isnull=False) \
.update(notification_to_giver_is_sent=True)
class Migration(migrations.Migration):
dependencies = [
('orders', '0010_OrderI18n'),
]
operations = [
migrations.AddField(
model_name='order',
name='notification_to_giver_is_sent',
field=models.BooleanField(default=False),
),
migrations.RunPython(mark_giver_notification_as_sent_for_all_previous_orders),
]
|
linter.py | phpdude/api-blueprint-sublime-plugin | 130 | 12720617 | <filename>linter.py
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by <NAME>
# Copyright (c) 2013 <NAME>
# https://github.com/WMeldon/SublimeLinter-apiblueprint
#
# Modified by <NAME>
#
# License: MIT
"""This module exports the Apiblueprint plugin class."""
def ApiBlueprintFactory():
""" Define API Blueprint Linter Class"""
class ApiBlueprint(Linter):
"""Provides an interface to apiblueprint."""
syntax = 'apiblueprint'
cmd = 'drafter --validate'
executable = 'drafter'
executable = None
regex = (
r'(?:(?P<warning>warning)|(?P<error>error)):\s*\((?P<code>\d+)\)'
r'(?P<message>.+?)(?::|$)'
r'(?P<line>\d+):(?P<col>\d+)(?:.*)'
)
multiline = False
line_col_base = (0, 0)
tempfile_suffix = None
error_stream = util.STREAM_BOTH
selectors = {}
word_re = None
defaults = {}
inline_settings = None
inline_overrides = None
comment_re = None
def split_match(self, match):
"""
Run default match. If match is found, convert line variable to line number
and adjust col.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is not None:
line, col = self.view.rowcol((int(line)))
line = int(line) - self.line_col_base[0]
return match, line, col, error, warning, message, near
try:
"""Attempt to import SublimeLinter3"""
from SublimeLinter.lint import Linter, util
ApiBlueprintFactory()
except ImportError:
print("No SublimeLinter3 installed - Install SublimeLinter3 to lint your API blueprints (ST3 Only)")
|
django_linter/transformers/__init__.py | enefeq/django_linter | 101 | 12720624 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astroid import MANAGER, Class, CallFunc
from .models import transform_model_class
from .testing import transform_test_response
from .factories import transform_factory_return
def register(linter):
MANAGER.register_transform(Class, transform_model_class)
MANAGER.register_transform(Class, transform_test_response)
MANAGER.register_transform(CallFunc, transform_factory_return)
|
tests/test_api.py | Sunkist-Cherry/Spam-Filter | 433 | 12720635 | import unittest
from unittest import mock
import cherry
class ApiTest(unittest.TestCase):
def setUp(self):
self.model = 'foo'
self.text = 'random string'
@mock.patch('cherry.api.Classify')
def test_classify_api(self, mock_classify):
cherry.classify(model=self.model, text=self.text)
mock_classify.assert_called_once_with(model=self.model, text=self.text)
@mock.patch('cherry.api.Trainer')
def test_train_api(self, mock_train):
cherry.train(model=self.model)
mock_train.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB',
encoding='utf-8', language='English', preprocessing=None, vectorizer=None,
vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Trainer')
def test_api_call_model_clf_vectorizer(self, mock_trainer):
cherry.train('foo', clf='clf', vectorizer='vectorizer')
mock_trainer.assert_called_with(
'foo', preprocessing=None, categories=None, encoding='utf-8', clf='clf', clf_method='MNB', language='English',
vectorizer='vectorizer', vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Performance')
def test_performance_api(self, mock_performance):
cherry.performance(model=self.model)
mock_performance.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB', encoding='utf-8',
language='English', n_splits=10, output='Stdout', preprocessing=None,
vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Performance')
def test_performance_api_model_clf_vectorizer(self, mock_performance):
cherry.performance('foo', clf='clf', vectorizer='vectorizer')
mock_performance.assert_called_with(
'foo', categories=None, clf='clf', clf_method='MNB',
encoding='utf-8', language='English', n_splits=10,
output='Stdout', preprocessing=None, vectorizer='vectorizer',
vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Search')
def test_api_call(self, mock_search):
cherry.search('foo', {'foo': 'bar'})
mock_search.assert_called_with(
'foo', {'foo': 'bar'}, categories=None, clf=None, clf_method='MNB', cv=3,
encoding='utf-8', language='English', method='RandomizedSearchCV', n_jobs=-1,
preprocessing=None, vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Display')
def test_display_api(self, mock_display):
cherry.display(model=self.model)
mock_display.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB',
encoding='utf-8', language='English', preprocessing=None,
vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)
|
sdk/python/ekuiper/runtime/sink.py | MonicaisHer/ekuiper | 250 | 12720640 | # Copyright 2021 EMQ Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from . import reg
from .connection import SinkChannel
from .symbol import SymbolRuntime, parse_context
from ..sink import Sink
class SinkRuntime(SymbolRuntime):
def __init__(self, ctrl: dict, s: Sink):
ctx = parse_context(ctrl)
config = {}
if 'config' in ctrl:
config = ctrl['config']
s.configure(config)
ch = SinkChannel(ctrl['meta'])
self.s = s
self.ctx = ctx
self.ch = ch
self.running = False
self.key = f"{ctrl['meta']['ruleId']}_{ctrl['meta']['opId']}" \
f"_{ctrl['meta']['instanceId']}_{ctrl['symbolName']}"
def run(self):
logging.info('start running sink')
# noinspection PyBroadException
try:
self.s.open(self.ctx)
self.running = True
reg.setr(self.key, self)
while True:
msg = self.ch.recv()
self.s.collect(self.ctx, msg)
except Exception:
"""two occasions: normal stop will close socket to raise an error
OR stopped by unexpected error"""
if self.running:
logging.error(traceback.format_exc())
finally:
if self.running:
self.stop()
def stop(self):
self.running = False
# noinspection PyBroadException
try:
self.s.close(self.ctx)
self.ch.close()
reg.delete(self.key)
except Exception:
logging.error(traceback.format_exc())
def is_running(self) -> bool:
return self.running
|
cryptodetector/method.py | nondejus/crypto-detector | 109 | 12720649 | <gh_stars>100-1000
"""
Copyright (c) 2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
"""
from abc import ABCMeta, abstractmethod
from cryptodetector.exceptions import InvalidMethodException
class MethodFactory(ABCMeta):
"""Meta class creating a method class. Keeps track of all child classes that inherit from Method
for later reference.
"""
def __new__(mcs, clsname, bases, dct):
if not hasattr(MethodFactory, "method_classes"):
MethodFactory.method_classes = []
method_class = super(MethodFactory, mcs).__new__(mcs, clsname, bases, dct)
if bases:
if not hasattr(method_class, "method_id"):
raise InvalidMethodException("Method " + clsname + " requires " \
+ "'method_id' attribute.")
if method_class.method_id in [mc.method_id for mc in MethodFactory.method_classes]:
raise InvalidMethodException("Method " + clsname + " has duplicate method_id '" \
+ method_class.method_id \
+ "'. method_id must be unique across all available methods.")
MethodFactory.method_classes.append(method_class)
return method_class
class Method(metaclass=MethodFactory):
"""Abstract base class providing the interface for a method
"""
# list of evidence types that all methods should ignore
ignore_evidence_types = []
@abstractmethod
def supports_scanning_file(self, language):
"""Indicates whether this method supports scanning a
file in the given language
Args:
language: language of the content (see langauges.py)
Returns:
(bool) whether it supports scanning a file in the given language
"""
pass
@abstractmethod
def search(self, content, language):
"""Search and find all matches in the content
Args:
content: the content to be scanned. Its type is string for text files and raw
byte array for binary files.
language: language of the content (see langauges.py)
Returns:
(list) list of matches. A match is a dict object containing all the output fields.
"""
pass
@abstractmethod
def quick_search(self, content, language):
"""Quick search the content in the given language
Args:
content: the content to be scanned. Its type is string for text files and raw
byte array for binary files.
language: language of the content (see langauges.py)
Returns:
(bool) whether it found any matches in the content
"""
pass
|
tapiriik/services/BeginnerTriathlete/__init__.py | prohfesor/tapiriik | 1,445 | 12720655 | from .beginnertriathlete import *
|
utils/accumulators.py | iam-sr13/attention-cnn | 974 | 12720661 | <filename>utils/accumulators.py
import torch
from copy import deepcopy
class Mean:
"""
Running average of the values that are 'add'ed
"""
def __init__(self, update_weight=1):
"""
:param update_weight: 1 for normal, 2 for t-average
"""
self.average = None
self.counter = 0
self.update_weight = update_weight
def add(self, value, weight=1):
"""Add a value to the accumulator"""
self.counter += weight
if self.average is None:
self.average = deepcopy(value)
else:
delta = value - self.average
self.average += delta * self.update_weight * weight / (self.counter + self.update_weight - 1)
if isinstance(self.average, torch.Tensor):
self.average.detach()
def value(self):
"""Access the current running average"""
return self.average
class Max:
"""
Keeps track of the max of all the values that are 'add'ed
"""
def __init__(self):
self.max = None
def add(self, value):
"""
Add a value to the accumulator.
:return: `true` if the provided value became the new max
"""
if self.max is None or value > self.max:
self.max = deepcopy(value)
return True
else:
return False
def value(self):
"""Access the current running average"""
return self.max
|
examples/list_documents.py | MrTeferi/photoshop-python-api | 270 | 12720668 | """List current photoshop all documents."""
# Import local modules
import photoshop.api as ps
app = ps.Application()
doc = app.documents[0]
print(doc.name)
for doc in app.documents:
print(doc.name)
|
sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py | rsdoherty/azure-sdk-for-python | 2,728 | 12720690 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._mesh_secret_operations import MeshSecretOperations
from ._mesh_secret_value_operations import MeshSecretValueOperations
from ._mesh_volume_operations import MeshVolumeOperations
from ._mesh_network_operations import MeshNetworkOperations
from ._mesh_application_operations import MeshApplicationOperations
from ._mesh_service_operations import MeshServiceOperations
from ._mesh_code_package_operations import MeshCodePackageOperations
from ._mesh_service_replica_operations import MeshServiceReplicaOperations
from ._mesh_gateway_operations import MeshGatewayOperations
from ._service_fabric_client_ap_is_operations import ServiceFabricClientAPIsOperationsMixin
__all__ = [
'MeshSecretOperations',
'MeshSecretValueOperations',
'MeshVolumeOperations',
'MeshNetworkOperations',
'MeshApplicationOperations',
'MeshServiceOperations',
'MeshCodePackageOperations',
'MeshServiceReplicaOperations',
'MeshGatewayOperations',
'ServiceFabricClientAPIsOperationsMixin',
]
|
examples/xml/soap12-mtom/soap12_mtom.py | edustaff/spyne | 786 | 12720765 | <gh_stars>100-1000
#!/usr/bin/env python
import logging
logger = logging.getLogger(__name__)
import os
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
from spyne.application import Application
from spyne.decorator import rpc
from spyne.service import ServiceBase
from spyne.model.complex import ComplexModel
from spyne.model.binary import ByteArray
from spyne.model.primitive import Unicode
from spyne.server.wsgi import WsgiApplication
from spyne.protocol.soap import Soap12
tns = 'http://gib.gov.tr/vedop3/eFatura'
class documentResponse(ComplexModel):
msg = Unicode
hash = ByteArray
class GIBSoapService(ServiceBase):
@rpc(Unicode(sub_name="fileName"), ByteArray(sub_name='binaryData'),
ByteArray(sub_name="hash"), _returns=documentResponse)
def documentRequest(ctx, file_name, file_data, data_hash):
incoming_invoice_dir = os.getcwd()
logger.info("file_name %r" % file_name)
logger.info("file_hash: %r" % data_hash)
path = os.path.join(incoming_invoice_dir, file_name)
f = open(path, 'wb')
for data in file_data:
f.write(data)
logger.info("File written: %r" % file_name)
f.close()
resp = documentResponse()
resp.msg = "Document was written successfully"
resp.hash = data_hash
return resp
application = Application([GIBSoapService], tns=tns,
in_protocol=Soap12(),
out_protocol=Soap12())
gib_application = WsgiApplication(application)
from wsgiref.simple_server import make_server
server = make_server('0.0.0.0', 8000, gib_application)
server.serve_forever()
|
recipes/Python/576977_Context_manager_restoring/recipe-576977.py | tdiprima/code | 2,023 | 12720772 | <gh_stars>1000+
from __future__ import with_statement
from contextlib import contextmanager
import sys
__docformat__ = "restructuredtext"
@contextmanager
def restoring(expr, clone=None):
'''A context manager that evaluates an expression when entering the runtime
context and restores its value when exiting.
This context manager makes
.. python::
with restoring(expr, clone) as value:
BODY
a shortcut for
.. python::
value = EXPR
__cloned = clone(value) if clone is not None else value
try:
BODY
finally:
EXPR = __cloned
del __cloned
where ``__cloned`` is a temporary hidden name and ``EXPR`` is ``expr``
substituted textually in the code snippet. Therefore ``expr`` can only be an
assignable expression, i.e. an expression that is allowed on the left hand
side of '=' (e.g. identifier, subscription, attribute reference, etc.).
:param expr: The expression whose value is to be evaluated and restored.
:type expr: str
:param clone: A callable that takes the object ``expr`` evaluates to and
returns an appropriate copy to be used for restoring. If None, the
original object is used.
:type clone: callable or None
'''
f = sys._getframe(2) # bypass the contextmanager frame
# evaluate the expression and make a clone of the value to be restored
value = eval(expr, f.f_globals, f.f_locals)
restored_value = clone(value) if clone is not None else value
try:
yield value
finally:
if expr in f.f_locals: # local or nonlocal name
_update_locals(f, {expr:restored_value})
elif expr in f.f_globals: # global name
f.f_globals[expr] = restored_value
else:
# make a copy of f_locals and bind restored_value to a new name
tmp_locals = dict(f.f_locals)
tmp_name = '__' + min(tmp_locals)
tmp_locals[tmp_name] = restored_value
exec '%s = %s' % (expr, tmp_name) in f.f_globals, tmp_locals
def _update_locals(frame, new_locals, clear=False):
# XXX: obscure, most likely implementation-dependent fact:
# f_locals can be modified (only?) from within a trace function
f_trace = frame.f_trace
try:
sys_trace = sys.gettrace()
except AttributeError: # sys.gettrace() not available before 2.6
sys_trace = None
def update_tracer(frm, event, arg):
# Update the frame's locals and restore both the local and the system's
#trace function
assert frm is frame
if clear:
frm.f_locals.clear()
frm.f_locals.update(new_locals)
frm.f_trace = f_trace
sys.settrace(sys_trace)
# Force tracing on with setting the global tracing function and set
# the frame's local trace function
sys.settrace(lambda frame, event, arg: None)
frame.f_trace = update_tracer
def test_restoring_immutable():
x = 'b'
foo = {'a':3, 'b':4}
with restoring('foo[x]') as y:
assert y == foo[x] == 4
foo[x] = y = None
assert y == foo[x] == None
assert foo[x] == 4 and y == None
assert sorted(locals()) == ['foo', 'x', 'y']
def test_restoring_mutable():
orig_path = sys.path[:]
with restoring('sys.path', clone=list) as path:
assert path is sys.path
path += ['foo']
assert path == orig_path + ['foo']
assert sys.path == orig_path
assert path == orig_path + ['foo']
assert sorted(locals()) == ['orig_path', 'path']
x = 1
def test_restoring_global():
global y; y = 2
global x
with restoring('x'):
x = None
with restoring('y'):
y += 3
assert x == None and y == 5
assert y == 2
assert x == 1
assert not locals()
def test_restoring_local():
x = 5
with restoring('x'):
x = None
assert x == 5
assert sorted(locals()) == ['x']
def test_restoring_nonlocal():
a = []
def nested():
with restoring('a', list):
a.append(1)
assert a == [1]
assert a == []
nested()
assert a == []
if __name__ == '__main__':
test_restoring_immutable()
test_restoring_mutable()
test_restoring_global()
test_restoring_local()
test_restoring_nonlocal()
|
okta/exceptions/__init__.py | corylevine/okta-sdk-python | 145 | 12720821 | <filename>okta/exceptions/__init__.py
from . exceptions import HTTPException, OktaAPIException # noqa
|
tmtoolkit/__main__.py | samir-joshi/tmtoolkit | 167 | 12720842 | """
tmtoolkit – Text Mining and Topic Modeling Toolkit for Python
CLI module
<NAME> <<EMAIL>>
"""
if __name__ == '__main__':
import sys
import subprocess
import json
from tmtoolkit.preprocess import DEFAULT_LANGUAGE_MODELS
def _setup(args):
try:
import spacy
from spacy.cli.download import download
except ImportError:
print('error: required package "spacy" is not installed', file=sys.stderr)
exit(1)
if not args:
print('error: you must pass a list of two-letter ISO 639-1 language codes to install the respective '
'language models or the string "all" to install all available language models', file=sys.stderr)
exit(2)
else:
try:
args.pop(args.index('--no-update'))
no_update = True
except ValueError:
no_update = False
if args == ['all']:
install_languages = list(DEFAULT_LANGUAGE_MODELS.keys())
else:
install_languages = []
for arg in args:
install_languages.extend([l for l in map(str.strip, arg.split(',')) if l])
print('checking if required spaCy data packages are installed...')
try:
piplist_str = subprocess.check_output([sys.executable, '-m', 'pip', 'list',
'--disable-pip-version-check',
'--format', 'json'])
except subprocess.CalledProcessError as exc:
print('error: calling pip failed with the following error message\n' + str(exc), file=sys.stderr)
exit(3)
piplist = json.loads(piplist_str)
installed_pkgs = set(item['name'] for item in piplist)
model_pkgs = dict(zip(DEFAULT_LANGUAGE_MODELS.keys(),
map(lambda x: x.replace('_', '-') + '-sm', DEFAULT_LANGUAGE_MODELS.values())))
for lang in install_languages:
if lang not in DEFAULT_LANGUAGE_MODELS.keys():
print('error: no language model for language code "%s"' % lang, file=sys.stderr)
exit(4)
lang_model_pkg = model_pkgs[lang]
if no_update and lang_model_pkg in installed_pkgs:
print('language model package "%s" for language code "%s" is already installed -- skipping'
% (lang_model_pkg, lang))
continue
lang_model = DEFAULT_LANGUAGE_MODELS[lang] + '_sm'
print('installing language model "%s" for language code "%s"...' % (lang_model, lang))
download(lang_model)
print('done.')
commands = {
'setup': _setup
}
if len(sys.argv) <= 1:
print('available commands: ' + ', '.join(commands.keys()))
exit(1)
cmd = sys.argv[1]
if cmd in commands.keys():
commands[cmd](sys.argv[2:])
else:
print('command not supported:', cmd, file=sys.stderr)
exit(2)
|
tests/test_data_table.py | figufema/TesteClone | 1,521 | 12720845 | # Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google.colab.data_table."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import IPython
import pandas as pd
from google.colab import data_table
# pylint:disable=g-import-not-at-top
try:
from unittest import mock # pylint:disable=g-importing-member
except ImportError:
import mock
# pylint:enable=g-import-not-at-top
class DataTableTest(unittest.TestCase):
def setUp(self):
super(DataTableTest, self).setUp()
self.ip_patcher = mock.patch.object(IPython, 'get_ipython', autospec=True)
get_ipython = self.ip_patcher.start()
get_ipython.return_value = IPython.InteractiveShell()
def tearDown(self):
self.ip_patcher.stop()
super(DataTableTest, self).tearDown()
def testDataTable(self):
df = pd.DataFrame({
'x': [12345, 23456, 34567],
'y': ['abcde', 'bcdef', 'cdefg']
})
dt = data_table.DataTable(df)
html = dt._repr_html_()
for col in df.columns:
for val in df[col]:
self.assertIn('{}'.format(val), html)
def testFormatterEnableDisable(self):
def get_formatter():
key = data_table._JAVASCRIPT_MODULE_MIME_TYPE
formatters = IPython.get_ipython().display_formatter.formatters
if key in formatters:
return formatters[key].for_type_by_name('pandas.core.frame',
'DataFrame')
else:
return None
# default formatter is None.
self.assertIsNone(get_formatter())
# enabling changes the formatter.
data_table.enable_dataframe_formatter()
# classmethod identity is not preserved; compare reprs:
self.assertEqual(
repr(get_formatter()), repr(data_table.DataTable.formatter))
# disabling restores the default.
data_table.disable_dataframe_formatter()
self.assertIsNone(get_formatter())
|
keystone/credential/schema.py | ferag/keystone | 615 | 12720860 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_credential_properties = {
'blob': {
'type': 'string'
},
'project_id': {
'type': 'string'
},
'type': {
'type': 'string'
},
'user_id': {
'type': 'string'
}
}
credential_create = {
'type': 'object',
'properties': _credential_properties,
'additionalProperties': True,
'oneOf': [
{
'title': 'ec2 credential requires project_id',
'required': ['blob', 'type', 'user_id', 'project_id'],
'properties': {
'type': {
'enum': ['ec2']
}
}
},
{
'title': 'non-ec2 credential does not require project_id',
'required': ['blob', 'type', 'user_id'],
'properties': {
'type': {
'not': {
'enum': ['ec2']
}
}
}
}
]
}
credential_update = {
'type': 'object',
'properties': _credential_properties,
'minProperties': 1,
'additionalProperties': True
}
|
rman_operators/rman_operators_editors/rman_operators_editors_vol_aggregates.py | prman-pixar/RenderManForBlender | 432 | 12720865 | <gh_stars>100-1000
from bpy.props import (StringProperty, BoolProperty, EnumProperty, IntProperty)
from ...rman_ui.rman_ui_base import CollectionPanel
from ...rfb_logger import rfb_log
from ...rman_operators.rman_operators_collections import return_empty_list
from ...rman_config import __RFB_CONFIG_DICT__ as rfb_config
from ...rfb_utils import object_utils
from ...rfb_utils import scene_utils
import bpy
import re
class RENDERMAN_UL_Volume_Aggregates_List(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
custom_icon = 'OBJECT_DATAMODE'
layout.context_pointer_set("selected_obj", item.ob_pointer)
op = layout.operator('renderman.remove_from_vol_aggregate', text='', icon='REMOVE')
label = item.ob_pointer.name
layout.label(text=label, icon=custom_icon)
class PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor(CollectionPanel, bpy.types.Operator):
bl_idname = "scene.rman_open_vol_aggregates_editor"
bl_label = "RenderMan Volume Aggregates Editor"
bl_description = "Volume Aggregates Editor"
def updated_object_selected_name(self, context):
ob = context.scene.objects.get(self.selected_obj_name, None)
if not ob:
return
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
def obj_list_items(self, context):
pattern = re.compile(self.object_search_filter)
scene = context.scene
rm = scene.renderman
if self.do_object_filter and self.object_search_filter == '':
return return_empty_list(label='No Objects Found')
group = rm.vol_aggregates[rm.vol_aggregates_index]
objs_in_group = []
for member in group.members:
objs_in_group.append(member.ob_pointer.name)
items = []
for ob in scene_utils.get_all_volume_objects(scene):
ob_name = ob.name
if ob_name not in objs_in_group:
if self.do_object_filter and not re.match(pattern, ob_name):
continue
items.append((ob_name, ob_name, ''))
if not items:
return return_empty_list(label='No Objects Found')
elif self.do_object_filter:
items.insert(0, ('0', 'Results (%d)' % len(items), '', '', 0))
else:
items.insert(0, ('0', 'Select Object', '', '', 0))
return items
def update_do_object_filter(self, context):
self.selected_obj_name = '0'
do_object_filter: BoolProperty(name="Object Filter",
description="Search and add multiple objects",
default=False,
update=update_do_object_filter)
object_search_filter: StringProperty(name="Object Filter Search", default="")
selected_obj_name: EnumProperty(name="", items=obj_list_items, update=updated_object_selected_name)
def execute(self, context):
return{'FINISHED'}
def draw(self, context):
layout = self.layout
scene = context.scene
rm = scene.renderman
layout.separator()
self._draw_collection(context, layout, rm, "Volume Aggregates",
"renderman.add_remove_volume_aggregates",
"scene.renderman",
"vol_aggregates", "vol_aggregates_index",
default_name='VolumeAggreagte_%d' % len(rm.vol_aggregates))
def draw_objects_item(self, layout, context, item):
row = layout.row()
scene = context.scene
rm = scene.renderman
vol_aggregate = rm.vol_aggregates[rm.vol_aggregates_index]
row = layout.row()
row.separator()
row.prop(self, 'do_object_filter', text='', icon='FILTER', icon_only=True)
if not self.do_object_filter:
row.prop(self, 'selected_obj_name', text='')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
col.enabled = False
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.vol_aggregates_index = rm.vol_aggregates_index
op.do_scene_selected = False
op.open_editor = False
else:
row.prop(self, 'object_search_filter', text='', icon='VIEWZOOM')
row = layout.row()
row.prop(self, 'selected_obj_name')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
col.enabled = False
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.vol_aggregates_index = rm.vol_aggregates_index
op.do_scene_selected = False
op.open_editor = False
row = layout.row()
row.template_list('RENDERMAN_UL_Volume_Aggregates_List', "",
vol_aggregate, "members", vol_aggregate, 'members_index', rows=6)
def draw_item(self, layout, context, item):
self.draw_objects_item(layout, context, item)
def cancel(self, context):
if self.event and self.event.type == 'LEFTMOUSE':
bpy.ops.scene.rman_open_vol_aggregates_editor('INVOKE_DEFAULT')
def __init__(self):
self.event = None
def invoke(self, context, event):
wm = context.window_manager
width = rfb_config['editor_preferences']['vol_aggregates_editor']['width']
self.event = event
return wm.invoke_props_dialog(self, width=width)
classes = [
PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor,
RENDERMAN_UL_Volume_Aggregates_List
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
envs/real_net_env.py | zongzefang/deeprl_signal_control | 194 | 12720876 | """
Particular class of real traffic network
@author: <NAME>
"""
import configparser
import logging
import numpy as np
import matplotlib.pyplot as plt
import os
import seaborn as sns
import time
from envs.env import PhaseMap, PhaseSet, TrafficSimulator
from real_net.data.build_file import gen_rou_file
sns.set_color_codes()
STATE_NAMES = ['wave']
# node: (phase key, neighbor list)
NODES = {'10026': ('6.0', ['9431', '9561', 'cluster_9563_9597', '9531']),
'8794': ('4.0', ['cluster_8985_9609', '9837', '9058', 'cluster_9563_9597']),
'8940': ('2.1', ['9007', '9429']),
'8996': ('2.2', ['cluster_9389_9689', '9713']),
'9007': ('2.3', ['9309', '8940']),
'9058': ('4.0', ['cluster_8985_9609', '8794', 'joinedS_0']),
'9153': ('2.0', ['9643']),
'9309': ('4.0', ['9466', '9007', 'cluster_9043_9052']),
'9413': ('2.3', ['9721', '9837']),
'9429': ('5.0', ['cluster_9043_9052', 'joinedS_1', '8940']),
'9431': ('2.4', ['9721', '9884', '9561', '10026']),
'9433': ('2.5', ['joinedS_1']),
'9466': ('4.0', ['9309', 'joinedS_0', 'cluster_9043_9052']),
'9480': ('2.3', ['8996', '9713']),
'9531': ('2.6', ['joinedS_1', '10026']),
'9561': ('4.0', ['cluster_9389_9689', '10026', '9431', '9884']),
'9643': ('2.3', ['9153']),
'9713': ('3.0', ['9721', '9884', '8996']),
'9721': ('6.0', ['9431', '9713', '9413']),
'9837': ('3.1', ['9413', '8794', 'cluster_8985_9609']),
'9884': ('2.7', ['9713', '9431', 'cluster_9389_9689', '9561']),
'cluster_8751_9630': ('4.0', ['cluster_9389_9689']),
'cluster_8985_9609': ('4.0', ['9837', '8794', '9058']),
'cluster_9043_9052': ('4.1', ['cluster_9563_9597', '9466', '9309', '10026', 'joinedS_1']),
'cluster_9389_9689': ('4.0', ['9884', '9561', 'cluster_8751_9630', '8996']),
'cluster_9563_9597': ('4.2', ['10026', '8794', 'joinedS_0', 'cluster_9043_9052']),
'joinedS_0': ('6.1', ['9058', 'cluster_9563_9597', '9466']),
'joinedS_1': ('3.2', ['9531', '9429'])}
PHASES = {'4.0': ['GGgrrrGGgrrr', 'rrrGGgrrrGGg', 'rrGrrrrrGrrr', 'rrrrrGrrrrrG'],
'4.1': ['GGgrrGGGrrr', 'rrGrrrrrrrr', 'rrrGgrrrGGg', 'rrrrGrrrrrG'],
'4.2': ['GGGGrrrrrrrr', 'GGggrrGGggrr', 'rrrGGGGrrrrr', 'grrGGggrrGGg'],
'2.0': ['GGrrr', 'ggGGG'],
'2.1': ['GGGrrr', 'rrGGGg'],
'2.2': ['Grr', 'gGG'],
'2.3': ['GGGgrr', 'GrrrGG'],
'2.4': ['GGGGrr', 'rrrrGG'],
'2.5': ['Gg', 'rG'],
'2.6': ['GGGg', 'rrrG'],
'2.7': ['GGg', 'rrG'],
'3.0': ['GGgrrrGGg', 'rrGrrrrrG', 'rrrGGGGrr'],
'3.1': ['GgrrGG', 'rGrrrr', 'rrGGGr'],
'3.2': ['GGGGrrrGG', 'rrrrGGGGr', 'GGGGrrGGr'],
'5.0': ['GGGGgrrrrGGGggrrrr', 'grrrGrrrrgrrGGrrrr', 'GGGGGrrrrrrrrrrrrr',
'rrrrrrrrrGGGGGrrrr', 'rrrrrGGggrrrrrggGg'],
'6.0': ['GGGgrrrGGGgrrr', 'rrrGrrrrrrGrrr', 'GGGGrrrrrrrrrr', 'rrrrrrrrrrGGGG',
'rrrrGGgrrrrGGg', 'rrrrrrGrrrrrrG'],
'6.1': ['GGgrrGGGrrrGGGgrrrGGGg', 'rrGrrrrrrrrrrrGrrrrrrG', 'GGGrrrrrGGgrrrrGGgrrrr',
'GGGrrrrrrrGrrrrrrGrrrr', 'rrrGGGrrrrrrrrrrrrGGGG', 'rrrGGGrrrrrGGGgrrrGGGg']}
class RealNetPhase(PhaseMap):
def __init__(self):
self.phases = {}
for key, val in PHASES.items():
self.phases[key] = PhaseSet(val)
class RealNetController:
def __init__(self, node_names, nodes):
self.name = 'greedy'
self.node_names = node_names
self.nodes = nodes
def forward(self, obs):
actions = []
for ob, node_name in zip(obs, self.node_names):
actions.append(self.greedy(ob, node_name))
return actions
def greedy(self, ob, node_name):
# get the action space
phases = PHASES[NODES[node_name][0]]
flows = []
node = self.nodes[node_name]
# get the green waves
for phase in phases:
wave = 0
visited_ilds = set()
for i, signal in enumerate(phase):
if signal == 'G':
# find controlled lane
lane = node.lanes_in[i]
# ild = 'ild:' + lane
ild = lane
# if it has not been counted, add the wave
if ild not in visited_ilds:
j = node.ilds_in.index(ild)
wave += ob[j]
visited_ilds.add(ild)
flows.append(wave)
return np.argmax(np.array(flows))
class RealNetEnv(TrafficSimulator):
def __init__(self, config, port=0, output_path='', is_record=False, record_stat=False):
self.flow_rate = config.getint('flow_rate')
super().__init__(config, output_path, is_record, record_stat, port=port)
def _get_node_phase_id(self, node_name):
return self.phase_node_map[node_name]
def _init_neighbor_map(self):
return dict([(key, val[1]) for key, val in NODES.items()])
def _init_map(self):
self.neighbor_map = self._init_neighbor_map()
self.phase_map = RealNetPhase()
self.phase_node_map = dict([(key, val[0]) for key, val in NODES.items()])
self.state_names = STATE_NAMES
def _init_sim_config(self, seed):
# comment out to call build_file.py
return gen_rou_file(self.data_path,
self.flow_rate,
seed=seed,
thread=self.sim_thread)
def plot_stat(self, rewards):
self.state_stat['reward'] = rewards
for name, data in self.state_stat.items():
fig = plt.figure(figsize=(8, 6))
plot_cdf(data)
plt.ylabel(name)
fig.savefig(self.output_path + self.name + '_' + name + '.png')
def plot_cdf(X, c='b', label=None):
sorted_data = np.sort(X)
yvals = np.arange(len(sorted_data))/float(len(sorted_data)-1)
plt.plot(sorted_data, yvals, color=c, label=label)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO)
config = configparser.ConfigParser()
config.read('./config/config_test_real.ini')
base_dir = './output_result/'
if not os.path.exists(base_dir):
os.mkdir(base_dir)
env = RealNetEnv(config['ENV_CONFIG'], 2, base_dir, is_record=True, record_stat=True)
env.train_mode = False
time.sleep(1)
# ob = env.reset(gui=True)
controller = RealNetController(env.node_names, env.nodes)
env.init_test_seeds(list(range(10000, 100001, 10000)))
rewards = []
for i in range(10):
ob = env.reset(test_ind=i)
global_rewards = []
cur_step = 0
while True:
next_ob, reward, done, global_reward = env.step(controller.forward(ob))
# for node_name, node_ob in zip(env.node_names, next_ob):
# logging.info('%d, %s:%r\n' % (cur_step, node_name, node_ob))
global_rewards.append(global_reward)
rewards += list(reward)
cur_step += 1
if done:
break
ob = next_ob
env.terminate()
logging.info('step: %d, avg reward: %.2f' % (cur_step, np.mean(global_rewards)))
time.sleep(1)
env.plot_stat(np.array(rewards))
env.terminate()
time.sleep(2)
env.collect_tripinfo()
env.output_data()
|
software/glasgow/support/logging.py | electroniceel/Glasgow | 1,014 | 12720879 | <gh_stars>1000+
import operator
from .lazy import *
from .bits import bits
__all__ = ["dump_hex", "dump_bin", "dump_seq", "dump_mapseq"]
def dump_hex(data):
def to_hex(data):
try:
data = memoryview(data)
except TypeError:
data = memoryview(bytes(data))
if dump_hex.limit is None or len(data) < dump_hex.limit:
return data.hex()
else:
return "{}... ({} bytes total)".format(
data[:dump_hex.limit].hex(), len(data))
return lazy(lambda: to_hex(data))
dump_hex.limit = 64
def dump_bin(data):
def to_bin(data):
data = bits(data)
if dump_bin.limit is None or len(data) < dump_bin.limit:
return str(data)[::-1]
else:
return "{}... ({} bits total)".format(
str(data[:dump_bin.limit])[::-1], len(data))
return lazy(lambda: to_bin(data))
dump_bin.limit = 64
def dump_seq(joiner, data):
def to_seq(data):
try:
data_length = len(data)
except TypeError:
try:
data_length = data.__length_hint__()
except AttributeError:
data_length = None
if dump_seq.limit is None or (data_length is not None and
data_length < dump_seq.limit):
return joiner.join(data)
else:
return "{}... ({} elements total)".format(
joiner.join(elem for elem, _ in zip(data, range(dump_seq.limit))),
data_length or "?")
return lazy(lambda: to_seq(data))
dump_seq.limit = 16
def dump_mapseq(joiner, mapper, data):
def to_mapseq(data):
try:
data_length = len(data)
except TypeError:
try:
data_length = data.__length_hint__()
except AttributeError:
data_length = None
if dump_mapseq.limit is None or (data_length is not None and
data_length < dump_mapseq.limit):
return joiner.join(map(mapper, data))
else:
return "{}... ({} elements total)".format(
joiner.join(mapper(elem) for elem, _ in zip(data, range(dump_mapseq.limit))),
data_length or "?")
return lazy(lambda: to_mapseq(data))
dump_mapseq.limit = 16
|
migrations/versions/1fe582999fec_.py | muellermartin/moa | 238 | 12720891 | """empty message
Revision ID: 1fe582999fec
Revises: <PASSWORD>
Create Date: 2019-11-19 11:12:38.940614
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1fe582999fec'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('settings', 'conditional_posting_old')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('settings', sa.Column('conditional_posting_old', sa.BOOLEAN(), nullable=False))
# ### end Alembic commands ###
|
jcvi/utils/console.py | paradoxcell/jcvi | 517 | 12720920 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# console.py
# utils
#
# Created by <NAME> on 01/09/21
# Copyright © 2021 <NAME>. All rights reserved.
#
"""
We create a singleton console instance at the module level or as an attribute
of your top-level object.
"""
from rich.console import Console
console = Console()
printf = console.print
|
django_cassandra_engine/base/validation.py | Saurabh-Singh-00/django-cassandra-engine | 334 | 12720953 | <filename>django_cassandra_engine/base/validation.py<gh_stars>100-1000
from django.db.backends.base.validation import BaseDatabaseValidation
class CassandraDatabaseValidation(BaseDatabaseValidation):
pass
|
test/cprofile_rust_cython_complex.py | utapyngo/simplification | 113 | 12720960 | <gh_stars>100-1000
# this tests numpy array simplification using RDP
# 216804 --> 3061 points (98.5% reduction)
# 50ms per VW operation on MBA Core i7
# Note that for the equivalent VW output we should reduce tolerance by an order of magnitude, then divide by 2
# e.g. 0.01 -> 0.0005
from simplification.cutil import simplify_coords
import json
import numpy as np
with open("test/coords_complex.json", "r") as f:
coords = np.array(json.load(f))
for x in range(50):
simplify_coords(coords, 0.01)
|
samples/vsphere/logforwarding/log_forwarding.py | restapicoding/VMware-SDK | 589 | 12720962 | #!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2017, 2018, 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '6.7+'
from samples.vsphere.common import sample_cli
from samples.vsphere.common import sample_util
from samples.vsphere.common import vapiconnect
from com.vmware.appliance.logging_client import Forwarding
class LogForwarding(object):
"""
Demonstrates log forwarding API operations
Prerequisites:
- vCenter
- Log host listening to syslog packets over any of the supported
protocols UDP/TCP/TLS
"""
def __init__(self):
self.loghost = None
self.protocol = None
self.port = None
self.stub_config = None
self.log_forwarding_client = None
def setup(self):
parser = sample_cli.build_arg_parser()
parser.add_argument(
'--loghost', required=True, action='store', help='The log host')
parser.add_argument(
'--port',
required=True,
action='store',
help='The log host port number')
parser.add_argument(
'--protocol',
required=True,
action='store',
help='The log host protocol (TCP/UDP/TLS)')
args = sample_util.process_cli_args(parser.parse_args())
self.loghost = args.loghost
self.protocol = args.protocol
self.port = int(args.port)
# Connect to vAPI services
self.stub_config = vapiconnect.connect(
host=args.server,
user=args.username,
pwd=<PASSWORD>,
skip_verification=args.skipverification)
self.log_forwarding_client = Forwarding(self.stub_config)
def run(self):
# Set log forwarding configuration
self.set_log_forwarding()
# Get log forwarding configuration
self.get_log_forwarding()
# Test log forwarding configuration
self.test_log_forwarding()
# Update log forwarding configuration
self.update_log_forwarding()
def set_log_forwarding(self):
log_forwarding_config = [
Forwarding.Config(
hostname=self.loghost, port=self.port, protocol=self.protocol)
]
self.log_forwarding_client.set(log_forwarding_config)
def get_log_forwarding(self):
configs = self.log_forwarding_client.get()
for cfg in configs:
print('Loghost: {}, Port: {}, Protocol: {}'.format(
cfg.hostname, cfg.port, cfg.protocol))
def test_log_forwarding(self):
test_response = self.log_forwarding_client.test(True)
print("\nLog forwarding test response:")
for resp in test_response:
print('Loghost: {}, State: {}, Message: {}'.format(
resp.hostname, resp.state,
resp.message.default_message if resp.message else None))
def update_log_forwarding(self):
# Read log forwarding configuration
log_forwarding_config = self.log_forwarding_client.get()
# Delete the newly added configuration
log_forwarding_config = list(
filter(lambda cfg: cfg.hostname != self.loghost,
log_forwarding_config))
# Apply the modified log forwarding configuration
self.log_forwarding_client.set(log_forwarding_config)
def main():
log_forwarding = LogForwarding()
log_forwarding.setup()
log_forwarding.run()
if __name__ == '__main__':
main()
|
bootstrap.py/txtmark.py | onesty-dev/txtmark | 361 | 12720978 | <gh_stars>100-1000
#!/usr/bin/env python
# vim:fileencoding=utf-8 :et:ts=4:sts=4
#
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import errno
import urllib2
import sys
import subprocess
group_id = "com.github.rjeschke"
artifact = "txtmark"
jar_dest = os.path.join(os.path.expanduser("~"), ".txtmark_jar", "txtmark.jar")
oss_snapshots = "https://oss.sonatype.org/content/repositories/snapshots"
maven_repo1 = "https://repo1.maven.org/maven2"
snap_url = oss_snapshots + "/" + group_id.replace(".", "/") + "/" + artifact
rel_url = maven_repo1 + "/" + group_id.replace(".", "/") + "/" + artifact
def mkdirs(path):
"""mkdir -p"""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def read_mvn_infos(url):
response = urllib2.urlopen(url + "/maven-metadata.xml")
latest = None
version = None
last_modified = 0
for line in response.read().split("\n"):
line = line.strip()
if re.match("^<latest>.+</latest>$", line):
latest = line[8:-9]
elif re.match("^<lastUpdated>.+</lastUpdated>$", line):
last_modified = long(line[13:-14])
elif not latest and re.match("^<version>.+</version>$", line):
version = line[9:-10]
if latest:
return [latest, last_modified]
return [version, last_modified]
def get_snapshot_version(url, version):
response = urllib2.urlopen(url + "/maven-metadata.xml")
timestamp = None
build_number = 0
for line in response.read().split("\n"):
line = line.strip()
if not timestamp and re.match("^<timestamp>.*</timestamp>$", line):
timestamp = line[11:-12]
elif build_number == 0 and re.match("^<buildNumber>.*</buildNumber>$", line):
build_number = int(line[13:-14])
return url + "/" + artifact + "-" + version[:version.find("-")] + "-" + timestamp + "-" + str(build_number) + ".jar"
def download(is_snap, version):
u = None
if is_snap:
u = get_snapshot_version(snap_url + "/" + version, version)
else:
u = rel_url + "/" + version + "/" + artifact + "-" + version + ".jar"
response = urllib2.urlopen(u)
with open(jar_dest, "wb") as fd:
fd.write(response.read())
def fetch_artifact(force_update):
if force_update or not os.path.exists(jar_dest):
mkdirs(os.path.dirname(jar_dest))
rel = read_mvn_infos(rel_url)
snp = read_mvn_infos(snap_url)
if snp[1] > rel[1]:
download(True, snp[0])
else:
download(False, rel[0])
if __name__ == "__main__":
force_update = False
if len(sys.argv) > 1:
force_update = sys.argv[1] == "-u" or sys.argv[1] == "--update"
fetch_artifact(force_update)
cmd = ["java", "-cp", jar_dest, "com.github.rjeschke.txtmark.cmd.Run"]
cmd.extend(sys.argv[2 if force_update else 1:])
exit(subprocess.call(cmd))
|
examples/interfaces/multiple.py | dmytrostriletskyi/design-kit | 107 | 12720989 | <reponame>dmytrostriletskyi/design-kit
from accessify import implements
class HumanSoulInterface:
def love(self, who, *args, **kwargs):
pass
class HumanBasicsInterface:
@staticmethod
def eat(food, *args, allergy=None, **kwargs):
pass
if __name__ == '__main__':
@implements(HumanSoulInterface, HumanBasicsInterface)
class Human:
def love(self, who, *args, **kwargs):
pass
|
scripts/coverage.py | lishoujun/pysimdjson | 531 | 12720991 | """
This is a backport and modernization of Cython's coverage.py support from the
3.* branch to the stable 0.29.* branch.
Most importantly for us, it includes support for coverage.py's `exclude_lines`
configuration option, allowing us to filter things like MemoryError from the
coverage reports.
It is standalone, and does not require Cython itself.
The complete license for this file can be found at:
https://github.com/cython/cython/blob/0.29.22/LICENSE.txt
Changelog
---------
1.0.0
^^^^^
- Support for excluded_lines
- Fixed inconsistent quotations, PEP8'd.
"""
import io
import re
import os.path
import sys
from collections import defaultdict
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
from coverage.files import canonical_filename
C_FILE_EXTENSIONS = {'.c', '.cpp', '.cc', '.cxx'}
MODULE_FILE_EXTENSIONS = {'.py', '.pyx', '.pxd'} | C_FILE_EXTENSIONS
def is_package_dir(dir_path):
for filename in ('__init__.py',
'__init__.pyc',
'__init__.pyx',
'__init__.pxd'):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
_match_file_encoding = re.compile(br'(\w*coding)[:=]\s*([-\w.]+)').search
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first couple of hundred
# chars, and this bulk read/split is much faster.
lines = ()
start = b''
while len(lines) < 3:
data = f.read(500)
start += data
lines = start.split(b'\n')
if not data:
break
m = _match_file_encoding(lines[0])
if m and m.group(1) != b'c_string_encoding':
return m.group(2).decode('iso8859-1')
elif len(lines) > 1:
m = _match_file_encoding(lines[1])
if m:
return m.group(2).decode('iso8859-1')
return 'UTF-8'
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
def open_source_file(source_filename, encoding=None, error_handling=None):
stream = None
try:
if encoding is None:
# Most of the time the encoding is not specified, so try hard to
# open the file only once.
f = io.open(source_filename, 'rb')
encoding = detect_opened_file_encoding(f)
f.seek(0)
stream = io.TextIOWrapper(
f,
encoding=encoding,
errors=error_handling
)
else:
stream = io.open(
source_filename,
encoding=encoding,
errors=error_handling
)
except OSError:
if os.path.exists(source_filename):
raise # File is there, but something went wrong reading from it.
# Allow source files to be in zip files etc.
try:
loader = __loader__
if source_filename.startswith(loader.archive):
stream = open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
if stream is None:
raise FileNotFoundError(source_filename)
if stream.read(1) != u'\uFEFF':
stream.seek(0)
return stream
def _find_c_source(base_path):
file_exists = os.path.exists
for ext in C_FILE_EXTENSIONS:
file_name = base_path + ext
if file_exists(file_name):
return file_name
return None
def _find_dep_file_path(main_file, file_path, relative_path_search=False):
abs_path = os.path.abspath(file_path)
if not os.path.exists(abs_path) and (file_path.endswith('.pxi') or
relative_path_search):
# files are looked up relative to the main source file
rel_file_path = os.path.join(os.path.dirname(main_file), file_path)
if os.path.exists(rel_file_path):
abs_path = os.path.abspath(rel_file_path)
# search sys.path for external locations if a valid file hasn't been found
if not os.path.exists(abs_path):
for sys_path in sys.path:
test_path = os.path.realpath(os.path.join(sys_path, file_path))
if os.path.exists(test_path):
return canonical_filename(test_path)
return canonical_filename(abs_path)
class Plugin(CoveragePlugin):
# map from traced file paths to absolute file paths
_file_path_map = None
# map from traced file paths to corresponding C files
_c_files_map = None
# map from parsed C files to their content
_parsed_c_files = None
# map from traced files to lines that are excluded from coverage
_excluded_lines_map = None
# list of regex patterns for lines to exclude
_excluded_line_patterns = ()
def sys_info(self):
return []
def configure(self, config):
# Entry point for coverage "configurer".
# Read the regular expressions from the coverage config that match
# lines to be excluded from coverage.
self._excluded_line_patterns = config.get_option(
'report:exclude_lines'
)
def file_tracer(self, filename):
"""
Try to find a C source file for a file path found by the tracer.
"""
if filename.startswith('<') or filename.startswith('memory:'):
return None
c_file = py_file = None
filename = canonical_filename(os.path.abspath(filename))
if self._c_files_map and filename in self._c_files_map:
c_file = self._c_files_map[filename][0]
if c_file is None:
c_file, py_file = self._find_source_files(filename)
if not c_file:
return None # unknown file
# parse all source file paths and lines from C file
# to learn about all relevant source files right away (pyx/pxi/pxd)
# FIXME: this might already be too late if the first executed line
# is not from the main .pyx file but a file with a different
# name than the .c file (which prevents us from finding the
# .c file)
_, code = self._read_source_lines(c_file, filename)
if code is None:
return None # no source found
if self._file_path_map is None:
self._file_path_map = {}
return CythonModuleTracer(
filename,
py_file,
c_file,
self._c_files_map,
self._file_path_map
)
def file_reporter(self, filename):
# TODO: let coverage.py handle .py files itself
# ext = os.path.splitext(filename)[1].lower()
# if ext == '.py':
# from coverage.python import PythonFileReporter
# return PythonFileReporter(filename)
filename = canonical_filename(os.path.abspath(filename))
if self._c_files_map and filename in self._c_files_map:
c_file, rel_file_path, code = self._c_files_map[filename]
else:
c_file, _ = self._find_source_files(filename)
if not c_file:
return None # unknown file
rel_file_path, code = self._read_source_lines(c_file, filename)
if code is None:
return None # no source found
return CythonModuleReporter(
c_file,
filename,
rel_file_path,
code,
self._excluded_lines_map.get(rel_file_path, frozenset())
)
def _find_source_files(self, filename):
basename, ext = os.path.splitext(filename)
ext = ext.lower()
if ext in MODULE_FILE_EXTENSIONS:
pass
elif ext == '.pyd':
# Windows extension module
platform_suffix = re.search(
r'[.]cp[0-9]+-win[_a-z0-9]*$',
basename,
re.I
)
if platform_suffix:
basename = basename[:platform_suffix.start()]
elif ext == '.so':
# Linux/Unix/Mac extension module
platform_suffix = re.search(
r'[.](?:cpython|pypy)-[0-9]+[-_a-z0-9]*$',
basename,
re.I
)
if platform_suffix:
basename = basename[:platform_suffix.start()]
elif ext == '.pxi':
# if we get here, it means that the first traced line of a Cython
# module was not in the main module but in an include file, so try
# a little harder to find the main source file
self._find_c_source_files(os.path.dirname(filename), filename)
if filename in self._c_files_map:
return self._c_files_map[filename][0], None
else:
# none of our business
return None, None
if ext in C_FILE_EXTENSIONS:
c_file = filename
else:
c_file = _find_c_source(basename)
if c_file is None:
# a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c"
package_root = find_root_package_dir(filename)
package_path = os.path.relpath(
basename,
package_root
).split(os.path.sep)
if len(package_path) > 1:
test_basepath = os.path.join(
os.path.dirname(filename),
'.'.join(package_path)
)
c_file = _find_c_source(test_basepath)
py_source_file = None
if c_file:
py_source_file = os.path.splitext(c_file)[0] + '.py'
if not os.path.exists(py_source_file):
py_source_file = None
try:
with open(c_file, 'rb') as f:
if b'/* Generated by Cython ' not in f.read(30):
return None, None # not a Cython file
except (IOError, OSError):
c_file = None
return c_file, py_source_file
def _find_c_source_files(self, dir_path, source_file):
"""
Desperately parse all C files in the directory or its package parents
(not re-descending) to find the (included) source file in one of them.
"""
if not os.path.isdir(dir_path):
return
splitext = os.path.splitext
for filename in os.listdir(dir_path):
ext = splitext(filename)[1].lower()
if ext in C_FILE_EXTENSIONS:
self._read_source_lines(
os.path.join(dir_path, filename),
source_file
)
if source_file in self._c_files_map:
return
# not found? then try one package up
if is_package_dir(dir_path):
self._find_c_source_files(os.path.dirname(dir_path), source_file)
def _read_source_lines(self, c_file, sourcefile):
"""
Parse a Cython generated C/C++ source file and find the executable
lines. Each executable line starts with a comment header that states
source file and line number, as well as the surrounding range of source
code lines.
"""
if self._parsed_c_files is None:
self._parsed_c_files = {}
if c_file in self._parsed_c_files:
code_lines = self._parsed_c_files[c_file]
else:
code_lines = self._parse_cfile_lines(c_file)
self._parsed_c_files[c_file] = code_lines
if self._c_files_map is None:
self._c_files_map = {}
for filename, code in code_lines.items():
abs_path = _find_dep_file_path(c_file, filename,
relative_path_search=True)
self._c_files_map[abs_path] = (c_file, filename, code)
if sourcefile not in self._c_files_map:
return (None,) * 2 # e.g. shared library file
return self._c_files_map[sourcefile][1:]
def _parse_cfile_lines(self, c_file):
"""
Parse a C file and extract all source file lines that generated
executable code.
"""
match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match
match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match
match_comment_end = re.compile(r' *[*]/$').match
match_trace_line = re.compile(r' *__Pyx_TraceLine\(([0-9]+),').match
not_executable = re.compile(
r'\s*c(?:type)?def\s+'
r'(?:(?:public|external)\s+)?'
r'(?:struct|union|enum|class)'
r'(\s+[^:]+|)\s*:'
).match
line_is_excluded = None
if self._excluded_line_patterns:
line_is_excluded = re.compile(
'|'.join([
'(?:{0})'.format(regex)
for regex in self._excluded_line_patterns
])
).search
code_lines = defaultdict(dict)
executable_lines = defaultdict(set)
current_filename = None
if self._excluded_lines_map is None:
self._excluded_lines_map = defaultdict(set)
with open(c_file) as lines:
lines = iter(lines)
for line in lines:
match = match_source_path_line(line)
if not match:
if ('__Pyx_TraceLine(' in line and
current_filename is not None):
trace_line = match_trace_line(line)
if trace_line:
executable_lines[current_filename].add(
int(trace_line.group(1))
)
continue
filename, lineno = match.groups()
current_filename = filename
lineno = int(lineno)
for comment_line in lines:
match = match_current_code_line(comment_line)
if match:
code_line = match.group(1).rstrip()
if not_executable(code_line):
break
if (line_is_excluded is not None and
line_is_excluded(code_line)):
self._excluded_lines_map[filename].add(lineno)
break
code_lines[filename][lineno] = code_line
break
elif match_comment_end(comment_line):
# unexpected comment format - false positive?
break
# Remove lines that generated code but are not traceable.
for filename, lines in code_lines.items():
dead_lines = set(lines).difference(
executable_lines.get(filename, ())
)
for lineno in dead_lines:
del lines[lineno]
return code_lines
class CythonModuleTracer(FileTracer):
"""
Find the Python/Cython source file for a Cython module.
"""
def __init__(self, module_file, py_file, c_file, c_files_map,
file_path_map):
super(CythonModuleTracer, self).__init__()
self.module_file = module_file
self.py_file = py_file
self.c_file = c_file
self._c_files_map = c_files_map
self._file_path_map = file_path_map
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
"""
Determine source file path. Called by the function call tracer.
"""
source_file = frame.f_code.co_filename
try:
return self._file_path_map[source_file]
except KeyError:
pass
abs_path = _find_dep_file_path(filename, source_file)
if self.py_file and source_file[-3:].lower() == '.py':
# always let coverage.py handle this case itself
self._file_path_map[source_file] = self.py_file
return self.py_file
assert self._c_files_map is not None
if abs_path not in self._c_files_map:
self._c_files_map[abs_path] = (self.c_file, source_file, None)
self._file_path_map[source_file] = abs_path
return abs_path
class CythonModuleReporter(FileReporter):
"""
Provide detailed trace information for one source file to coverage.py.
"""
def __init__(self, c_file, source_file, rel_file_path, code,
excluded_lines):
super(CythonModuleReporter, self).__init__(source_file)
self.name = rel_file_path
self.c_file = c_file
self._code = code
self._excluded_lines = excluded_lines
def lines(self):
"""
Return set of line numbers that are possibly executable.
"""
return set(self._code)
def excluded_lines(self):
"""
Return set of line numbers that are excluded from coverage.
"""
return self._excluded_lines
def _iter_source_tokens(self):
current_line = 1
for line_no, code_line in sorted(self._code.items()):
while line_no > current_line:
yield []
current_line += 1
yield [('txt', code_line)]
current_line += 1
def source(self):
"""
Return the source code of the file as a string.
"""
if os.path.exists(self.filename):
with open_source_file(self.filename) as f:
return f.read()
else:
return '\n'.join(
(tokens[0][1] if tokens else '')
for tokens in self._iter_source_tokens())
def source_token_lines(self):
"""
Iterate over the source code tokens.
"""
if os.path.exists(self.filename):
with open_source_file(self.filename) as f:
for line in f:
yield [('txt', line.rstrip('\n'))]
else:
for line in self._iter_source_tokens():
yield [('txt', line)]
def coverage_init(reg, options):
plugin = Plugin()
reg.add_configurer(plugin)
reg.add_file_tracer(plugin)
|
tests/test_schema.py | center-for-threat-informed-defense/attack-flow | 165 | 12721009 | <reponame>center-for-threat-informed-defense/attack-flow
import json
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
import pytest
from attack_flow.schema import (
anchor,
generate_html,
get_properties,
html_name,
insert_html,
InvalidRelationshipsError,
SchemaProperty,
validate_docs,
validate_rules,
)
PROJECT_ROOT = Path(__file__).resolve().parent.parent
SCHEMA_PATH = PROJECT_ROOT / "schema" / "attack-flow-2022-01-05-draft.json"
def test_validate_docs():
doc1_json = {
"flow": {
"type": "attack-flow",
"id": "https://flow-v1/doc1",
"name": "Test Attack Flow",
"created": "2021-12-17T08:31:22.320133-05:00"
},
"actions": [],
"assets": [],
"relationships": [],
"object_properties": [],
"data_properties": [],
}
doc2_json = {
# Missing required name field:
"flow": {
"type": "attack-flow",
"id": "https://flow-v1/doc1",
"created": "bogus date",
},
"actions": [],
"assets": [],
"relationships": [],
"object_properties": [],
"data_properties": [],
}
with SCHEMA_PATH.open() as schema_file, \
NamedTemporaryFile('w+') as doc1_file, \
NamedTemporaryFile('w+') as doc2_file:
json.dump(doc1_json, doc1_file)
json.dump(doc2_json, doc2_file)
schema_file.seek(0)
doc1_file.seek(0)
doc2_file.seek(0)
results_one_file = validate_docs(schema_file.name, doc1_file.name)
results_two_files = validate_docs(schema_file.name,
[doc1_file.name, doc2_file.name])
assert results_one_file[0] is None
assert results_two_files[0] is None
assert isinstance(results_two_files[1], Exception)
def test_schema_property_string():
sp = SchemaProperty('test-prop', False, {
'description': 'My description :>',
'type': 'string',
})
assert sp.name == 'test-prop'
assert sp.type == 'string'
assert not sp.required
assert sp.html_type == 'string'
assert sp.html_description == 'My description :>'
def test_schema_property_uuid():
sp = SchemaProperty('test-uuid', True, {
'description': 'My description :>',
'type': 'string',
'format': 'uuid',
})
assert sp.name == 'test-uuid'
assert sp.type == 'string'
assert sp.required
assert sp.html_type == 'uuid'
assert sp.html_description == 'My description :>'
def test_schema_property_datetime():
sp = SchemaProperty('test-datetime', True, {
'description': 'My description',
'type': 'string',
'format': 'date-time',
})
assert sp.name == 'test-datetime'
assert sp.type == 'string'
assert sp.required
assert sp.html_type == 'date-time'
assert sp.html_description == \
'My description (RFC-3339 format, e.g. YYYY-MM-DDThh:mm:ssZ)'
def test_schema_property_array_of_string():
sp = SchemaProperty('test-array', True, {
'description': 'My description',
'type': 'array',
'items': {'type': 'string'}
})
assert sp.name == 'test-array'
assert sp.type == 'array'
assert sp.subtype == 'string'
assert sp.required
assert sp.html_type == 'array of string'
assert sp.html_description == 'My description'
def test_schema_property_array_of_object():
sp = SchemaProperty('test-array2', True, {
'description': 'My description',
'type': 'array',
'items': {'type': 'object'}
})
assert sp.name == 'test-array2'
assert sp.type == 'array'
assert sp.subtype == 'object'
assert sp.required
assert sp.html_type == 'array of <a href="#testarray2">test-array2</a>'
assert sp.html_description == 'My description'
def test_schema_property_object():
sp = SchemaProperty('test-object', True, {
'description': 'My description',
'type': 'object',
'properties': {'foo': 'string'}
})
assert sp.name == 'test-object'
assert sp.type == 'object'
assert sp.subtype == ''
assert sp.required
assert sp.html_type == '<a href="#testobject">test-object</a> object'
assert sp.html_description == 'My description'
def test_schema_property_enum():
sp = SchemaProperty('test-enum', True, {
'description': 'My description',
'type': 'string',
'enum': ['foo', 'bar']
})
assert sp.name == 'test-enum'
assert sp.type == 'string'
assert sp.required
assert sp.html_type == 'enum'
assert sp.html_description == 'My description (Enum values: "foo", "bar")'
def test_get_properties():
schema = {
'type': 'object',
'properties': {
'name': {
'description': 'My name',
'type': 'string'
},
'hobbies': {
'description': 'My hobbies',
'type': 'array',
'items': {'type': 'string'}
},
'cars': {
'description': 'My cars',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'make': {
'description': 'The auto manufacturer',
'type': 'string',
},
'model': {
'description': 'The model name',
'type': 'string',
},
}
}
},
'address': {
'description': 'My address',
'type': 'object',
'properties': {
'city': {
'description': 'My city',
'type': 'string'
},
'state': {
'description': 'My state',
'type': 'string'
}
}
}
}
}
props = get_properties(schema, node='root')
assert 'root' in props
root = props['root']
assert root['name'].type == 'string'
assert 'address' in props
address = props['address']
assert address['city'].type == 'string'
def test_generate_html():
actual_html = generate_html({
'__root__': {
'prop1': SchemaProperty('prop1', False, {
'description': 'prop1 description',
'type': 'string',
}),
'prop2': SchemaProperty('prop2', True, {
'description': 'prop2 description',
'type': 'string',
})
},
'subtype': {
'prop3': SchemaProperty('prop3', True, {
'description': 'prop3 description',
'type': 'string'
})
}
})
expected_html = [
'<h3 id="TopLevel">Top Level Fields</h3>',
'<table>',
' <tr>',
' <th>Name</th>',
' <th>Type</th>',
' <th>Required</th>',
' <th>Description</th>',
' </tr>',
' <tr>',
' <td>prop1</td>',
' <td>string</td>',
' <td>No</td>',
' <td>prop1 description</td>',
' </tr>',
' <tr>',
' <td>prop2</td>',
' <td>string</td>',
' <td>Yes</td>',
' <td>prop2 description</td>',
' </tr>',
'</table>',
'',
'<h3 id="subtype">Subtype Fields</h3>',
'<table>',
' <tr>',
' <th>Name</th>',
' <th>Type</th>',
' <th>Required</th>',
' <th>Description</th>',
' </tr>',
' <tr>',
' <td>prop3</td>',
' <td>string</td>',
' <td>Yes</td>',
' <td>prop3 description</td>',
' </tr>',
'</table>',
'',
]
assert actual_html == expected_html
def test_anchor():
assert anchor('? ASDF; 123 ') == 'ASDF123'
def test_insert_html():
old_doc = iter([
'old text 1',
'old text 2',
'<!--JSON_SCHEMA-->',
'old html 1',
'old html 2',
'<!--/JSON_SCHEMA-->',
'old text 3',
'old text 4',
])
html = [
'new html 1',
'new html 2',
]
actual = iter(insert_html(old_doc, html).splitlines())
assert next(actual) == 'old text 1'
assert next(actual) == 'old text 2'
assert next(actual).startswith('<!--JSON_SCHEMA')
assert next(actual) == 'new html 1'
assert next(actual) == 'new html 2'
assert next(actual) == '<!--/JSON_SCHEMA-->'
assert next(actual) == 'old text 3'
assert next(actual) == 'old text 4'
def test_insert_html_no_start_tag():
old_doc = iter([
'old text 1',
'old text 2',
'<!--/JSON_SCHEMA-->',
'old text 3',
'old text 4',
])
with pytest.raises(Exception):
insert_html(old_doc, []).splitlines()
def test_insert_html_no_end_tag():
old_doc = iter([
'old text 1',
'old text 2',
'<!--JSON_SCHEMA-->',
'old text 3',
'old text 4',
])
with pytest.raises(Exception):
insert_html(old_doc, []).splitlines()
def test_validate_rules():
flow = {
"flow": {
"type": "attack-flow",
"id": "https://flow-v1",
"name": "Test Attack Flow",
"created": "2021-12-17T08:31:22.320133-05:00"
},
"actions": [
{
"id": "action1",
"name": "action-one",
},
],
"assets": [
{"id": "asset1"},
],
"relationships": [
{
"source": "action1",
"target": "asset1",
},
{
"source": "asset1",
"target": "action2",
},
{
"source": "action2",
"target": "asset2",
},
],
}
with pytest.raises(InvalidRelationshipsError) as exc_info:
validate_rules(flow)
exc = exc_info.value
assert str(exc) == dedent("""\
- Relationship target ID "action2" does not exist.
- Relationship source ID "action2" does not exist.
- Relationship target ID "asset2" does not exist.""")
def test_html_name():
assert html_name("foo") == "Foo"
assert html_name("foo_bar") == "Foo Bar"
|
networkx/classes/tests/test_graph_historical.py | jebogaert/networkx | 10,024 | 12721033 | """Original NetworkX graph tests"""
import networkx
import networkx as nx
from .historical_tests import HistoricalTests
class TestGraphHistorical(HistoricalTests):
@classmethod
def setup_class(cls):
HistoricalTests.setup_class()
cls.G = nx.Graph
|
turbo/register.py | wecatch/app-turbo | 157 | 12721051 | <filename>turbo/register.py<gh_stars>100-1000
from __future__ import absolute_import, division, print_function, with_statement
import os
from turbo.conf import app_config
from turbo.util import get_base_dir, import_object
def _install_app(package_space):
for app in getattr(import_object('apps.settings', package_space), 'INSTALLED_APPS'):
import_object('.'.join(['apps', app]), package_space)
def register_app(app_name, app_setting, web_application_setting, mainfile, package_space):
"""insert current project root path into sys path
"""
from turbo import log
app_config.app_name = app_name
app_config.app_setting = app_setting
app_config.project_name = os.path.basename(get_base_dir(mainfile, 2))
app_config.web_application_setting.update(web_application_setting)
if app_setting.get('session_config'):
app_config.session_config.update(app_setting['session_config'])
log.getLogger(**app_setting.log)
_install_app(package_space)
def register_url(url, handler, name=None, kwargs=None):
"""insert url into tornado application handlers group
:arg str url: url
:handler object handler: url mapping handler
:name reverse url name
:kwargs dict tornado handler initlize args
"""
if name is None and kwargs is None:
app_config.urls.append((url, handler))
return
if name is None:
app_config.urls.append((url, handler, kwargs))
return
app_config.urls.append((url, handler, kwargs, name))
def register_group_urls(prefix, urls):
for item in urls:
url, handler = item[0:2]
register_url(prefix + url, handler, *item[2:])
|
test/run/t143.py | timmartin/skulpt | 2,671 | 12721059 | print repr((1,2,3))
print repr([1,2,3])
print repr({1:'ok', 2:'stuff'})
print repr("weewaa")
|
tests/links_tests/connection_tests/test_graph_mlp.py | pfnet/chainerchem | 184 | 12721087 | <filename>tests/links_tests/connection_tests/test_graph_mlp.py
from chainer import cuda
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.links.connection.graph_mlp import GraphMLP # NOQA
in_size = 3
atom_size = 5
out_size = 4
channels = [16, out_size]
batch_size = 2
@pytest.fixture
def model():
l = GraphMLP(channels, in_channels=in_size)
l.cleargrads()
return l
@pytest.fixture
def data():
x_data = numpy.random.uniform(
-1, 1, (batch_size, atom_size, in_size)).astype(numpy.float32)
y_grad = numpy.random.uniform(
-1, 1, (batch_size, atom_size, out_size)).astype(numpy.float32)
return x_data, y_grad
def test_forward_cpu(model, data):
# only testing shape for now...
x_data = data[0]
y_actual = model(x_data)
assert y_actual.shape == (batch_size, atom_size, out_size)
assert len(model.layers) == len(channels)
@pytest.mark.gpu
def test_forward_gpu(model, data):
x_data = cuda.to_gpu(data[0])
model.to_gpu()
y_actual = model(x_data)
assert y_actual.shape == (batch_size, atom_size, out_size)
assert len(model.layers) == len(channels)
def test_backward_cpu(model, data):
x_data, y_grad = data
gradient_check.check_backward(model, x_data, y_grad, list(model.params()),
atol=1e-3, rtol=1e-3)
@pytest.mark.gpu
def test_backward_gpu(model, data):
x_data, y_grad = [cuda.to_gpu(d) for d in data]
model.to_gpu()
gradient_check.check_backward(model, x_data, y_grad, list(model.params()),
atol=1e-3, rtol=1e-3)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
scripts/mvmc_driver.py | dumpmemory/ners | 156 | 12721090 | """
Script for running over all instances of MVMC.
Since running the script can take a long time, it is possible to parallelize across
different machines using the --index and --skip arguments.
Examples:
python scripts/mvmc_driver.py --mvmc_path data/mvmc --index 0 --skip 1
"""
import argparse
import os
import subprocess
from tqdm.auto import tqdm
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--mvmc_path", type=str, default="data/mvmc")
parser.add_argument(
"--index", default=0, type=int, help="Initial index to start at."
)
parser.add_argument(
"--skip", default=1, type=int, help="Number of instances to skip at a time."
)
parser.add_argument(
"--force", action="store_true", help="Re-run even if output exists."
)
return parser
def main(args):
instance_ids = sorted(os.listdir(args.mvmc_path))
base_cmd = [
"python",
"main.py",
"--mvmc",
"--symmetrize",
"--export-mesh",
"--predict-illumination",
]
if args.force:
base_cmd.append("--force")
for instance_id in tqdm(instance_ids[args.index :: args.skip]):
cmd = base_cmd + [
"--instance-dir",
os.path.join(args.mvmc_path, instance_id),
]
print("Running:", " ".join(cmd))
subprocess.call(cmd)
if __name__ == "__main__":
args = get_parser().parse_args()
main(args)
|
sdk/python/pulumi_gcp/logging/billing_account_exclusion.py | sisisin/pulumi-gcp | 121 | 12721135 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BillingAccountExclusionArgs', 'BillingAccountExclusion']
@pulumi.input_type
class BillingAccountExclusionArgs:
def __init__(__self__, *,
billing_account: pulumi.Input[str],
filter: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BillingAccountExclusion resource.
:param pulumi.Input[str] billing_account: The billing account to create the exclusion for.
:param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
:param pulumi.Input[str] description: A human-readable description.
:param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to
false.
:param pulumi.Input[str] name: The name of the logging exclusion.
"""
pulumi.set(__self__, "billing_account", billing_account)
pulumi.set(__self__, "filter", filter)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> pulumi.Input[str]:
"""
The billing account to create the exclusion for.
"""
return pulumi.get(self, "billing_account")
@billing_account.setter
def billing_account(self, value: pulumi.Input[str]):
pulumi.set(self, "billing_account", value)
@property
@pulumi.getter
def filter(self) -> pulumi.Input[str]:
"""
The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: pulumi.Input[str]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this exclusion rule should be disabled or not. This defaults to
false.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the logging exclusion.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _BillingAccountExclusionState:
def __init__(__self__, *,
billing_account: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BillingAccountExclusion resources.
:param pulumi.Input[str] billing_account: The billing account to create the exclusion for.
:param pulumi.Input[str] description: A human-readable description.
:param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to
false.
:param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
:param pulumi.Input[str] name: The name of the logging exclusion.
"""
if billing_account is not None:
pulumi.set(__self__, "billing_account", billing_account)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> Optional[pulumi.Input[str]]:
"""
The billing account to create the exclusion for.
"""
return pulumi.get(self, "billing_account")
@billing_account.setter
def billing_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_account", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this exclusion rule should be disabled or not. This defaults to
false.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
"""
The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the logging exclusion.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class BillingAccountExclusion(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
billing_account: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
Billing account logging exclusions can be imported using their URI, e.g.
```sh
$ pulumi import gcp:logging/billingAccountExclusion:BillingAccountExclusion my_exclusion billingAccounts/my-billing_account/exclusions/my-exclusion
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] billing_account: The billing account to create the exclusion for.
:param pulumi.Input[str] description: A human-readable description.
:param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to
false.
:param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
:param pulumi.Input[str] name: The name of the logging exclusion.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BillingAccountExclusionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Billing account logging exclusions can be imported using their URI, e.g.
```sh
$ pulumi import gcp:logging/billingAccountExclusion:BillingAccountExclusion my_exclusion billingAccounts/my-billing_account/exclusions/my-exclusion
```
:param str resource_name: The name of the resource.
:param BillingAccountExclusionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BillingAccountExclusionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
billing_account: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BillingAccountExclusionArgs.__new__(BillingAccountExclusionArgs)
if billing_account is None and not opts.urn:
raise TypeError("Missing required property 'billing_account'")
__props__.__dict__["billing_account"] = billing_account
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
if filter is None and not opts.urn:
raise TypeError("Missing required property 'filter'")
__props__.__dict__["filter"] = filter
__props__.__dict__["name"] = name
super(BillingAccountExclusion, __self__).__init__(
'gcp:logging/billingAccountExclusion:BillingAccountExclusion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
billing_account: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'BillingAccountExclusion':
"""
Get an existing BillingAccountExclusion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] billing_account: The billing account to create the exclusion for.
:param pulumi.Input[str] description: A human-readable description.
:param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to
false.
:param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
:param pulumi.Input[str] name: The name of the logging exclusion.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BillingAccountExclusionState.__new__(_BillingAccountExclusionState)
__props__.__dict__["billing_account"] = billing_account
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["filter"] = filter
__props__.__dict__["name"] = name
return BillingAccountExclusion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> pulumi.Output[str]:
"""
The billing account to create the exclusion for.
"""
return pulumi.get(self, "billing_account")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A human-readable description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether this exclusion rule should be disabled or not. This defaults to
false.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def filter(self) -> pulumi.Output[str]:
"""
The filter to apply when excluding logs. Only log entries that match the filter are excluded.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to
write a filter.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the logging exclusion.
"""
return pulumi.get(self, "name")
|
src/genie/libs/parser/iosxe/tests/ShowEnvTemperature/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12721233 | <reponame>balmasea/genieparser
expected_output = {
'switch': {
"1": {
'system_temperature_state': 'ok',
}
}
}
|
tests/test_status.py | johnashu/dataplicity-lomond | 225 | 12721234 | from lomond.status import Status
def test_constants():
expected_constants = {
'BAD_DATA', 'DATA_NOT_UNDERSTOOD', 'EXTENSION_FAILED', 'GOING_AWAY',
'MESSAGE_TOO_LARGE', 'NORMAL', 'POLICY_VIOLATION', 'PROTOCOL_ERROR',
'UNEXPECTED_CONDITION'
}
assert expected_constants == set(filter(
lambda constant: constant.isupper(),
dir(Status)
))
|
plenum/test/view_change/test_master_primary_different_from_previous.py | jandayanan/indy-plenum | 148 | 12721362 | import types
import pytest
from plenum.test.helper import checkViewNoForNodes, \
sdk_send_random_and_check, countDiscarded
from plenum.test.malicious_behaviors_node import slow_primary
from plenum.test.test_node import getPrimaryReplica, ensureElectionsDone
from plenum.test.view_change.helper import provoke_and_wait_for_view_change, ensure_view_change
from stp_core.common.log import getlogger
logger = getlogger()
def test_master_primary_different_from_previous(txnPoolNodeSet, looper,
sdk_pool_handle, sdk_wallet_client):
"""
After a view change, primary must be different from previous primary for
master instance, it does not matter for other instance. The primary is
benign and does not vote for itself.
"""
pr = slow_primary(txnPoolNodeSet, 0, delay=10)
old_pr_node_name = pr.node.name
# View change happens
ensure_view_change(looper, txnPoolNodeSet)
logger.debug("VIEW HAS BEEN CHANGED!")
# Elections done
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
# New primary is not same as old primary
assert getPrimaryReplica(txnPoolNodeSet, 0).node.name != old_pr_node_name
pr.outBoxTestStasher.resetDelays()
# The new primary can still process requests
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)
|
moto/polly/resources.py | jonnangle/moto-1 | 5,460 | 12721427 | <filename>moto/polly/resources.py
# -*- coding: utf-8 -*-
VOICE_DATA = [
{
"Id": "Joanna",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Female",
"Name": "Joanna",
},
{
"Id": "Mizuki",
"LanguageCode": "ja-JP",
"LanguageName": "Japanese",
"Gender": "Female",
"Name": "Mizuki",
},
{
"Id": "Filiz",
"LanguageCode": "tr-TR",
"LanguageName": "Turkish",
"Gender": "Female",
"Name": "Filiz",
},
{
"Id": "Astrid",
"LanguageCode": "sv-SE",
"LanguageName": "Swedish",
"Gender": "Female",
"Name": "Astrid",
},
{
"Id": "Tatyana",
"LanguageCode": "ru-RU",
"LanguageName": "Russian",
"Gender": "Female",
"Name": "Tatyana",
},
{
"Id": "Maxim",
"LanguageCode": "ru-RU",
"LanguageName": "Russian",
"Gender": "Male",
"Name": "Maxim",
},
{
"Id": "Carmen",
"LanguageCode": "ro-RO",
"LanguageName": "Romanian",
"Gender": "Female",
"Name": "Carmen",
},
{
"Id": "Ines",
"LanguageCode": "pt-PT",
"LanguageName": "Portuguese",
"Gender": "Female",
"Name": "Inês",
},
{
"Id": "Cristiano",
"LanguageCode": "pt-PT",
"LanguageName": "Portuguese",
"Gender": "Male",
"Name": "Cristiano",
},
{
"Id": "Vitoria",
"LanguageCode": "pt-BR",
"LanguageName": "Brazilian Portuguese",
"Gender": "Female",
"Name": "Vitória",
},
{
"Id": "Ricardo",
"LanguageCode": "pt-BR",
"LanguageName": "Brazilian Portuguese",
"Gender": "Male",
"Name": "Ricardo",
},
{
"Id": "Maja",
"LanguageCode": "pl-PL",
"LanguageName": "Polish",
"Gender": "Female",
"Name": "Maja",
},
{
"Id": "Jan",
"LanguageCode": "pl-PL",
"LanguageName": "Polish",
"Gender": "Male",
"Name": "Jan",
},
{
"Id": "Ewa",
"LanguageCode": "pl-PL",
"LanguageName": "Polish",
"Gender": "Female",
"Name": "Ewa",
},
{
"Id": "Ruben",
"LanguageCode": "nl-NL",
"LanguageName": "Dutch",
"Gender": "Male",
"Name": "Ruben",
},
{
"Id": "Lotte",
"LanguageCode": "nl-NL",
"LanguageName": "Dutch",
"Gender": "Female",
"Name": "Lotte",
},
{
"Id": "Liv",
"LanguageCode": "nb-NO",
"LanguageName": "Norwegian",
"Gender": "Female",
"Name": "Liv",
},
{
"Id": "Giorgio",
"LanguageCode": "it-IT",
"LanguageName": "Italian",
"Gender": "Male",
"Name": "Giorgio",
},
{
"Id": "Carla",
"LanguageCode": "it-IT",
"LanguageName": "Italian",
"Gender": "Female",
"Name": "Carla",
},
{
"Id": "Karl",
"LanguageCode": "is-IS",
"LanguageName": "Icelandic",
"Gender": "Male",
"Name": "Karl",
},
{
"Id": "Dora",
"LanguageCode": "is-IS",
"LanguageName": "Icelandic",
"Gender": "Female",
"Name": "Dóra",
},
{
"Id": "Mathieu",
"LanguageCode": "fr-FR",
"LanguageName": "French",
"Gender": "Male",
"Name": "Mathieu",
},
{
"Id": "Celine",
"LanguageCode": "fr-FR",
"LanguageName": "French",
"Gender": "Female",
"Name": "Céline",
},
{
"Id": "Chantal",
"LanguageCode": "fr-CA",
"LanguageName": "Canadian French",
"Gender": "Female",
"Name": "Chantal",
},
{
"Id": "Penelope",
"LanguageCode": "es-US",
"LanguageName": "US Spanish",
"Gender": "Female",
"Name": "Penélope",
},
{
"Id": "Miguel",
"LanguageCode": "es-US",
"LanguageName": "US Spanish",
"Gender": "Male",
"Name": "Miguel",
},
{
"Id": "Enrique",
"LanguageCode": "es-ES",
"LanguageName": "Castilian Spanish",
"Gender": "Male",
"Name": "Enrique",
},
{
"Id": "Conchita",
"LanguageCode": "es-ES",
"LanguageName": "Castilian Spanish",
"Gender": "Female",
"Name": "Conchita",
},
{
"Id": "Geraint",
"LanguageCode": "en-GB-WLS",
"LanguageName": "Welsh English",
"Gender": "Male",
"Name": "Geraint",
},
{
"Id": "Salli",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Female",
"Name": "Salli",
},
{
"Id": "Kimberly",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Female",
"Name": "Kimberly",
},
{
"Id": "Kendra",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Female",
"Name": "Kendra",
},
{
"Id": "Justin",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Male",
"Name": "Justin",
},
{
"Id": "Joey",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Male",
"Name": "Joey",
},
{
"Id": "Ivy",
"LanguageCode": "en-US",
"LanguageName": "US English",
"Gender": "Female",
"Name": "Ivy",
},
{
"Id": "Raveena",
"LanguageCode": "en-IN",
"LanguageName": "Indian English",
"Gender": "Female",
"Name": "Raveena",
},
{
"Id": "Emma",
"LanguageCode": "en-GB",
"LanguageName": "British English",
"Gender": "Female",
"Name": "Emma",
},
{
"Id": "Brian",
"LanguageCode": "en-GB",
"LanguageName": "British English",
"Gender": "Male",
"Name": "Brian",
},
{
"Id": "Amy",
"LanguageCode": "en-GB",
"LanguageName": "British English",
"Gender": "Female",
"Name": "Amy",
},
{
"Id": "Russell",
"LanguageCode": "en-AU",
"LanguageName": "Australian English",
"Gender": "Male",
"Name": "Russell",
},
{
"Id": "Nicole",
"LanguageCode": "en-AU",
"LanguageName": "Australian English",
"Gender": "Female",
"Name": "Nicole",
},
{
"Id": "Vicki",
"LanguageCode": "de-DE",
"LanguageName": "German",
"Gender": "Female",
"Name": "Vicki",
},
{
"Id": "Marlene",
"LanguageCode": "de-DE",
"LanguageName": "German",
"Gender": "Female",
"Name": "Marlene",
},
{
"Id": "Hans",
"LanguageCode": "de-DE",
"LanguageName": "German",
"Gender": "Male",
"Name": "Hans",
},
{
"Id": "Naja",
"LanguageCode": "da-DK",
"LanguageName": "Danish",
"Gender": "Female",
"Name": "Naja",
},
{
"Id": "Mads",
"LanguageCode": "da-DK",
"LanguageName": "Danish",
"Gender": "Male",
"Name": "Mads",
},
{
"Id": "Gwyneth",
"LanguageCode": "cy-GB",
"LanguageName": "Welsh",
"Gender": "Female",
"Name": "Gwyneth",
},
{
"Id": "Jacek",
"LanguageCode": "pl-PL",
"LanguageName": "Polish",
"Gender": "Male",
"Name": "Jacek",
},
]
# {...} is also shorthand set syntax
LANGUAGE_CODES = {
"cy-GB",
"da-DK",
"de-DE",
"en-AU",
"en-GB",
"en-GB-WLS",
"en-IN",
"en-US",
"es-ES",
"es-US",
"fr-CA",
"fr-FR",
"is-IS",
"it-IT",
"ja-JP",
"nb-NO",
"nl-NL",
"pl-PL",
"pt-BR",
"pt-PT",
"ro-RO",
"ru-RU",
"sv-SE",
"tr-TR",
}
VOICE_IDS = {
"Geraint",
"Gwyneth",
"Mads",
"Naja",
"Hans",
"Marlene",
"Nicole",
"Russell",
"Amy",
"Brian",
"Emma",
"Raveena",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Salli",
"Conchita",
"Enrique",
"Miguel",
"Penelope",
"Chantal",
"Celine",
"Mathieu",
"Dora",
"Karl",
"Carla",
"Giorgio",
"Mizuki",
"Liv",
"Lotte",
"Ruben",
"Ewa",
"Jacek",
"Jan",
"Maja",
"Ricardo",
"Vitoria",
"Cristiano",
"Ines",
"Carmen",
"Maxim",
"Tatyana",
"Astrid",
"Filiz",
}
|
grin-py/services/NEED_dosWatcher.py | hunternsk/grin-pool | 130 | 12721458 | #!/usr/bin/python
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Watches both the pool and grin logs and tries to identify cases of denial of service,
# abuse, or badly misbehaving workers. Add them to mysql banned_workers table.
# Maybe include a code for the reason so bans can be lifted after appropriate time?
#
# Examples: Too many rapid connect/disconnects without shares submitted
# Idle disconnect too long too many times
# Submitting too many bad shares
# Etc....
import sys
import subprocess
import re
import glob
xxx XXX THIS IS NOT WRITTEN YET
|
corporate/migrations/0014_customerplan_end_date.py | dumpmemory/zulip | 17,004 | 12721480 | # Generated by Django 3.2.6 on 2021-09-17 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0013_alter_zulipsponsorshiprequest_org_website"),
]
operations = [
migrations.AddField(
model_name="customerplan",
name="end_date",
field=models.DateTimeField(null=True),
),
]
|
src/zapv2/ajaxSpider.py | thc202/zap-api-python | 146 | 12721491 | <filename>src/zapv2/ajaxSpider.py
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class ajaxSpider(object):
def __init__(self, zap):
self.zap = zap
@property
def allowed_resources(self):
"""
Gets the allowed resources. The allowed resources are always fetched even if out of scope, allowing to include necessary resources (e.g. scripts) from 3rd-parties.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/allowedResources/')))
@property
def status(self):
"""
Gets the current status of the crawler. Actual values are Stopped and Running.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/status/')))
def results(self, start=None, count=None):
"""
Gets the current results of the crawler.
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/results/', params)))
@property
def number_of_results(self):
"""
Gets the number of resources found.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/numberOfResults/')))
@property
def full_results(self):
"""
Gets the full crawled content detected by the AJAX Spider. Returns a set of values based on 'inScope' URLs, 'outOfScope' URLs, and 'errors' encountered during the last/current run of the AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/fullResults/')))
@property
def option_browser_id(self):
"""
Gets the configured browser to use for crawling.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionBrowserId/')))
@property
def option_event_wait(self):
"""
Gets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionEventWait/')))
@property
def option_max_crawl_depth(self):
"""
Gets the configured value for the max crawl depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlDepth/')))
@property
def option_max_crawl_states(self):
"""
Gets the configured value for the maximum crawl states allowed.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlStates/')))
@property
def option_max_duration(self):
"""
Gets the configured max duration of the crawl, the value is in minutes.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxDuration/')))
@property
def option_number_of_browsers(self):
"""
Gets the configured number of browsers to be used.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionNumberOfBrowsers/')))
@property
def option_reload_wait(self):
"""
Gets the configured time to wait after reloading the page, this value is in milliseconds.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionReloadWait/')))
@property
def option_click_default_elems(self):
"""
Gets the configured value for 'Click Default Elements Only', HTML elements such as 'a', 'button', 'input', all associated with some action or links on the page.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickDefaultElems/')))
@property
def option_click_elems_once(self):
"""
Gets the value configured for the AJAX Spider to know if it should click on the elements only once.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickElemsOnce/')))
@property
def option_random_inputs(self):
"""
Gets if the AJAX Spider will use random values in form fields when crawling, if set to true.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionRandomInputs/')))
def scan(self, url=None, inscope=None, contextname=None, subtreeonly=None, apikey=''):
"""
Runs the AJAX Spider against a given target.
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if inscope is not None:
params['inScope'] = inscope
if contextname is not None:
params['contextName'] = contextname
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scan/', params)))
def scan_as_user(self, contextname, username, url=None, subtreeonly=None, apikey=''):
"""
Runs the AJAX Spider from the perspective of a User of the web application.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextName': contextname, 'userName': username, 'apikey': apikey}
if url is not None:
params['url'] = url
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scanAsUser/', params)))
def stop(self, apikey=''):
"""
Stops the AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/stop/', {'apikey': apikey})))
def add_allowed_resource(self, regex, enabled=None, apikey=''):
"""
Adds an allowed resource.
This component is optional and therefore the API will only work if it is installed
"""
params = {'regex': regex, 'apikey': apikey}
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/addAllowedResource/', params)))
def remove_allowed_resource(self, regex, apikey=''):
"""
Removes an allowed resource.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/removeAllowedResource/', {'regex': regex, 'apikey': apikey})))
def set_enabled_allowed_resource(self, regex, enabled, apikey=''):
"""
Sets whether or not an allowed resource is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setEnabledAllowedResource/', {'regex': regex, 'enabled': enabled, 'apikey': apikey})))
def set_option_browser_id(self, string, apikey=''):
"""
Sets the configuration of the AJAX Spider to use one of the supported browsers.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionBrowserId/', {'String': string, 'apikey': apikey})))
def set_option_click_default_elems(self, boolean, apikey=''):
"""
Sets whether or not the the AJAX Spider will only click on the default HTML elements.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickDefaultElems/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_click_elems_once(self, boolean, apikey=''):
"""
When enabled, the crawler attempts to interact with each element (e.g., by clicking) only once.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickElemsOnce/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_event_wait(self, integer, apikey=''):
"""
Sets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionEventWait/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_depth(self, integer, apikey=''):
"""
Sets the maximum depth that the crawler can reach.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_states(self, integer, apikey=''):
"""
Sets the maximum number of states that the crawler should crawl.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlStates/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_duration(self, integer, apikey=''):
"""
The maximum time that the crawler is allowed to run.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxDuration/', {'Integer': integer, 'apikey': apikey})))
def set_option_number_of_browsers(self, integer, apikey=''):
"""
Sets the number of windows to be used by AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionNumberOfBrowsers/', {'Integer': integer, 'apikey': apikey})))
def set_option_random_inputs(self, boolean, apikey=''):
"""
When enabled, inserts random values into form fields.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionRandomInputs/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_reload_wait(self, integer, apikey=''):
"""
Sets the time to wait after the page is loaded before interacting with it.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionReloadWait/', {'Integer': integer, 'apikey': apikey})))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.