id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
95329 | <reponame>Melimet/DAP2020
#!/usr/bin/python3
import unittest
from tmc import points
from tmc.utils import load, get_out
coefficient_of_determination = load('src.coefficient_of_determination', 'coefficient_of_determination')
class CoefficientOfDetermination(unittest.TestCase):
@points('p05-12.1')
def test_all_features(self):
scores = coefficient_of_determination()
self.assertAlmostEqual(scores[0], 1.0, msg="Incorrect coefficient of determination!")
@points('p05-12.2')
def test_individual_features(self):
scores = coefficient_of_determination()
sums=[0.0258828579115,0.0968186306153,0.0881564161891,0.868276772892]
for i in range(1,5):
self.assertAlmostEqual(sums[i-1], sum(scores[i:i+2]),
msg="Incorrect individual coefficients of determination!")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3265141 | <reponame>ASMlover/study
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import random
import socket
import time
def on_boring(conn:socket.socket, msg:str) -> None:
for _ in range(5):
conn.send(msg.encode())
time.sleep(random.random())
conn.send(b"I'm boring, I'm quit!")
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as acceptor:
acceptor.bind(("0.0.0.0", 5555))
acceptor.listen()
while True:
conn, _ = acceptor.accept()
with conn:
on_boring(conn, "boring!")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("boring server finished")
except Exception as e:
print(e)
| StarcoderdataPython |
124429 | from sports.nba.nba_team import NBA_Team
class PhoenixSuns(NBA_Team):
"""
NBA Golden State Warriors Static Information
"""
full_name = "<NAME>"
name = "Suns"
team_id = 1610612756
def __init__(self):
"""
"""
super().__init__()
| StarcoderdataPython |
3255579 | class OperatorUtils():
@classmethod
def apply_assignment_op(cls, left, op, right):
if op == "=":
return right
elif op == "*=":
return left * right
elif op == "/=":
return left / right
elif op == "%=":
return left % right
elif op == "+=":
return left + right
elif op == "-=":
return left - right
elif op == "<<=":
return left << right
elif op == ">>=":
return left >> right
elif op == "^=":
return left ^ right
elif op == "|=":
return left | right
elif op == "&=":
return left & right
| StarcoderdataPython |
1610338 | """Rational quadratic kernel."""
from typing import Optional
import numpy as np
import probnum.utils as _utils
from probnum.typing import IntArgType, ScalarArgType
from ._kernel import IsotropicMixin, Kernel
class RatQuad(Kernel, IsotropicMixin):
r"""Rational quadratic kernel.
Covariance function defined by
.. math::
:nowrap:
\begin{equation}
k(x_0, x_1)
= \left(
1 + \frac{\lVert x_0 - x_1 \rVert_2^2}{2 \alpha l^2}
\right)^{-\alpha},
\end{equation}
where :math:`\alpha > 0`. For :math:`\alpha \rightarrow \infty` the rational
quadratic kernel converges to the :class:`~probnum.kernels.ExpQuad` kernel.
Parameters
----------
input_dim :
Input dimension of the kernel.
lengthscale :
Lengthscale :math:`l` of the kernel. Describes the input scale on which the
process varies.
alpha :
Scale mixture :math:`\alpha`. Positive constant determining the weighting
between different lengthscales.
See Also
--------
ExpQuad : Exponentiated Quadratic / RBF kernel.
Examples
--------
>>> import numpy as np
>>> from probnum.kernels import RatQuad
>>> K = RatQuad(input_dim=1, lengthscale=0.1, alpha=3)
>>> xs = np.linspace(0, 1, 3)[:, None]
>>> K(xs[:, None, :], xs[None, :, :])
array([[1.00000000e+00, 7.25051190e-03, 1.81357765e-04],
[7.25051190e-03, 1.00000000e+00, 7.25051190e-03],
[1.81357765e-04, 7.25051190e-03, 1.00000000e+00]])
"""
def __init__(
self,
input_dim: IntArgType,
lengthscale: ScalarArgType = 1.0,
alpha: ScalarArgType = 1.0,
):
self.lengthscale = _utils.as_numpy_scalar(lengthscale)
self.alpha = _utils.as_numpy_scalar(alpha)
if not self.alpha > 0:
raise ValueError(f"Scale mixture alpha={self.alpha} must be positive.")
super().__init__(input_dim=input_dim)
def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarray:
if x1 is None:
return np.ones_like(x0[..., 0])
return (
1.0
+ (
self._squared_euclidean_distances(x0, x1)
/ (2.0 * self.alpha * self.lengthscale ** 2)
)
) ** -self.alpha
| StarcoderdataPython |
75341 | <filename>conekt/flask_blast/__init__.py
from .blast import BlastThread | StarcoderdataPython |
3354636 | # Generated by Django 3.0.4 on 2021-03-07 01:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0013_auto_20210307_0125'),
]
operations = [
migrations.AddField(
model_name='post',
name='class_in',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='api.Class'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='viewed',
field=models.ManyToManyField(blank=True, related_name='posts_visited', to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
23190 | <filename>src/python_module_setup.py
from distutils.core import setup, Extension
import os
#os.environ['USE_CUDA'] = '1'
#os.environ['USE_BLAS'] = '1'
#os.environ['USE_OPENMP'] = '1'
cuda_obj = []
cuda_extra = []
cuda_include = []
cuda_macro = [(None, None)]
blas_obj = []
blas_extra = []
blas_include = []
blas_macro = [(None, None)]
open_mp_extra = []
if(os.environ.get('USE_CUDA') != None):
print("USE_CUDA")
cuda_obj = ['cuda/cuda_main.o', 'cuda/cuda_conv_layer.o', 'cuda/cuda_dense_layer.o', 'cuda/cuda_pool_layer.o', 'cuda/cuda_activ_functions.o']
cuda_include = ['/usr/local/cuda-11.3/include']
cuda_extra = ['-L/usr/local/cuda-11.3/lib64', '-lcudart', '-lcublas']
cuda_macro = [('CUDA','1'), ('CUDA_THREADS_PER_BLOCKS', '256')]
if(os.environ.get('USE_BLAS') != None):
print("USE_BLAS")
blas_obj = ['blas/blas_dense_layer.o', 'blas/blas_conv_layer.o']
blas_include = ['/opt/OpenBLAS/include']
blas_extra = ['-lopenblas', '-L/opt/OpenBLAS/lib']
blas_macro = [('BLAS', '1')]
if(os.environ.get('USE_OPENMP') != None):
print("USE_OPENMP")
open_mp_extra = ['-fopenmp']
#Re-add naiv: 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'
setup(name = 'CIANNA',
version = '0.9',
ext_modules = [Extension('CIANNA', ['python_module.c'],
extra_objects=['conv_layer.o', 'dense_layer.o', 'pool_layer.o', 'activ_functions.o', 'initializers.o', 'vars.o', 'auxil.o', 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'] + cuda_obj + blas_obj,
include_dirs= cuda_include + blas_include,
extra_link_args=['-O3 -std=c99'] + cuda_extra + blas_extra + open_mp_extra,
define_macros=[('MAX_LAYERS_NB', '100'), ('MAX_NETWORKS_NB','10')] + cuda_macro + blas_macro)])
| StarcoderdataPython |
1725759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
将Omicron中与性能相关比较密切的函数抽取到这个模块。以便将来进行加速。
TODO: 部分函数之前已使用numba加速,但因numba与OS的兼容性问题取消。需要随时保持跟踪。
"""
import logging
import numpy as np
from deprecated import deprecated
logger = logging.getLogger(__name__)
@deprecated(version="1.1")
def index(arr, item): # pragma: no cover
for idx, val in np.ndenumerate(arr):
if val == item:
return idx
# If no item was found return None, other return types might be a problem due to
# numba's type inference.
return -1
@deprecated(version="1.1")
def index_sorted(arr, item): # pragma: no cover
pos = np.searchsorted(arr, item)
if arr[pos] == item:
return pos
else:
return -1
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved into omicron.core.numpy_extensions module",
)
def count_between(arr, start, end):
"""计算数组中,`start`元素与`end`元素之间共有多少个元素
要求arr必须是已排序。计算结果会包含区间边界点。
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> count_between(arr, 20050104, 20050111)
6
>>> count_between(arr, 20050104, 20050109)
4
"""
pos_start = np.searchsorted(arr, start, side="right")
pos_end = np.searchsorted(arr, end, side="right")
counter = pos_end - pos_start + 1
if start < arr[0]:
counter -= 1
if end > arr[-1]:
counter -= 1
return counter
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def shift(arr, start, offset):
"""在numpy数组arr中,找到start(或者最接近的一个),取offset对应的元素。
要求`arr`已排序。`offset`为正,表明向后移位;`offset`为负,表明向前移位
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> shift(arr, 20050104, 1)
20050105
>>> shift(arr, 20050105, -1)
20050104
>>> # 起始点已右越界,且向右shift,返回起始点
>>> shift(arr, 20050120, 1)
20050120
Args:
arr : 已排序的数组
start : numpy可接受的数据类型
offset (int): [description]
Returns:
移位后得到的元素值
"""
pos = np.searchsorted(arr, start, side="right")
if pos + offset - 1 >= len(arr):
return start
else:
return arr[pos + offset - 1]
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.timeframe module",
)
def minute_frames_floor(ticks, moment):
"""
对于分钟级的frame,返回它们与frame刻度向下对齐后的frame及日期进位。如果需要对齐到上一个交易
日,则进位为-1,否则为0.
Examples:
>>> ticks = [600, 630, 660, 690, 810, 840, 870, 900]
>>> minute_frames_floor(ticks, 545)
(900, -1)
>>> minute_frames_floor(ticks, 600)
(600, 0)
>>> minute_frames_floor(ticks, 605)
(600, 0)
>>> minute_frames_floor(ticks, 899)
(870, 0)
>>> minute_frames_floor(ticks, 900)
(900, 0)
>>> minute_frames_floor(ticks, 905)
(900, 0)
Args:
ticks (np.array or list): frames刻度
moment (int): 整数表示的分钟数,比如900表示15:00
Returns:
tuple, the first is the new moment, the second is carry-on
"""
if moment < ticks[0]:
return ticks[-1], -1
# ’right' 相当于 ticks <= m
index = np.searchsorted(ticks, moment, side="right")
return ticks[index - 1], 0
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def floor(arr, item):
"""
在数据arr中,找到小于等于item的那一个值。如果item小于所有arr元素的值,返回arr[0];如果item
大于所有arr元素的值,返回arr[-1]
与`minute_frames_floor`不同的是,本函数不做回绕与进位.
Examples:
>>> a = [3, 6, 9]
>>> floor(a, -1)
3
>>> floor(a, 9)
9
>>> floor(a, 10)
9
>>> floor(a, 4)
3
>>> floor(a,10)
9
Args:
arr:
item:
Returns:
"""
if item < arr[0]:
return arr[0]
index = np.searchsorted(arr, item, side="right")
return arr[index - 1]
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def join_by_left(key, r1, r2, mask=True):
"""左连接 `r1`, `r2` by `key`
如果`r1`中存在`r2`中没有的行,则该行对应的`r2`中的那些字段的取值将使用`fill`来填充。如果
same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows
r1 have duplicat keys
[Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693)
Examples:
>>> # to join the following
>>> # [[ 1, 2],
>>> # [ 1, 3], x [[1, 5],
>>> # [ 2, 3]] [4, 7]]
>>> # only first two rows in left will be joined
>>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')])
>>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')])
>>> joined = join_by_left('seq', r1, r2)
>>> print(joined)
[(1, 2, 5) (1, 3, 5) (2, 3, --)]
>>> print(joined.dtype)
(numpy.record, [('seq', '<i4'), ('score', '<i4'), ('age', '<i4')])
>>> joined[2][2]
masked
>>> joined.tolist()[2][2] == None
True
Args:
key : join关键字
r1 : 数据集1
r2 : 数据集2
fill : 对匹配不上的cell进行填充时使用的值
Returns:
a numpy array
"""
# figure out the dtype of the result array
descr1 = r1.dtype.descr
descr2 = [d for d in r2.dtype.descr if d[0] not in r1.dtype.names]
descrm = descr1 + descr2
# figure out the fields we'll need from each array
f1 = [d[0] for d in descr1]
f2 = [d[0] for d in descr2]
# cache the number of columns in f1
ncol1 = len(f1)
# get a dict of the rows of r2 grouped by key
rows2 = {}
for row2 in r2:
rows2.setdefault(row2[key], []).append(row2)
# figure out how many rows will be in the result
nrowm = 0
for k1 in r1[key]:
if k1 in rows2:
nrowm += len(rows2[k1])
else:
nrowm += 1
# allocate the return array
# ret = np.full((nrowm, ), fill, dtype=descrm)
_ret = np.recarray(nrowm, dtype=descrm)
if mask:
ret = np.ma.array(_ret, mask=True)
else:
ret = _ret
# merge the data into the return array
i = 0
for row1 in r1:
if row1[key] in rows2:
for row2 in rows2[row1[key]]:
ret[i] = tuple(row1[f1]) + tuple(row2[f2])
i += 1
else:
for j in range(ncol1):
ret[i][j] = row1[j]
i += 1
return ret
@deprecated(
category=PendingDeprecationWarning,
version="1.1",
reason="this will be moved to omicron.core.numpy_extensions module",
)
def numpy_append_fields(base, names, data, dtypes):
"""给现有的数组`base`增加新的字段
实现了`numpy.lib.recfunctions.rec_append_fields`的功能。因为`rec_append_fields`不能处
理`data`元素的类型为Object的情况
Example:
>>> # 新增单个字段
>>> import numpy
>>> old = np.array([i for i in range(3)], dtype=[('col1', '<f4')])
>>> new_list = [2 * i for i in range(3)]
>>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '<f4')])
>>> print(res)
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0.) (1., 2.) (2., 4.)]
>>> # 新增多个字段
>>> data = [res['col1'].tolist(), res['new_col'].tolist()]
>>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', '<f4'), ('col4', '<f4')]))
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0., 0.) (1., 1., 2.) (2., 2., 4.)]
Args:
base ([numpy.array]): 基础数组
name ([type]): 新增字段的名字,可以是字符串(单字段的情况),也可以是字符串列表
data (list): 增加的字段的数据,list类型
dtypes ([type]): 新增字段的dtype
"""
if isinstance(names, str):
names = [
names,
]
data = [
data,
]
result = np.empty(base.shape, dtype=base.dtype.descr + dtypes)
for col in base.dtype.names:
result[col] = base[col]
for i in range(len(names)):
result[names[i]] = data[i]
return result
| StarcoderdataPython |
123905 | <gh_stars>10-100
"""Algorithms for STRIPS learning that start from the most general operators,
then specialize them based on the data."""
import itertools
from typing import Dict, List, Set
from predicators.src import utils
from predicators.src.nsrt_learning.strips_learning import BaseSTRIPSLearner
from predicators.src.settings import CFG
from predicators.src.structs import ParameterizedOption, \
PartialNSRTAndDatastore, Segment, STRIPSOperator
class GeneralToSpecificSTRIPSLearner(BaseSTRIPSLearner):
"""Base class for a general-to-specific STRIPS learner."""
def _initialize_general_pnad_for_option(
self, parameterized_option: ParameterizedOption
) -> PartialNSRTAndDatastore:
"""Create the most general PNAD for the given option."""
# Create the parameters, which are determined solely from the option
# types, since the most general operator has no add/delete effects.
parameters = utils.create_new_variables(parameterized_option.types)
option_spec = (parameterized_option, parameters)
# In the most general operator, the side predicates contain ALL
# predicates.
side_predicates = self._predicates.copy()
# There are no add effects or delete effects. The preconditions
# are initialized to be trivial. They will be recomputed next.
op = STRIPSOperator(parameterized_option.name, parameters, set(),
set(), set(), side_predicates)
pnad = PartialNSRTAndDatastore(op, [], option_spec)
# Recompute datastore. This simply clusters by option, since the
# side predicates contain all predicates, and effects are trivial.
self._recompute_datastores_from_segments([pnad])
# Determine the initial preconditions via a lifted intersection.
preconditions = self._induce_preconditions_via_intersection(pnad)
pnad.op = pnad.op.copy_with(preconditions=preconditions)
return pnad
class BackchainingSTRIPSLearner(GeneralToSpecificSTRIPSLearner):
"""Learn STRIPS operators by backchaining."""
def _learn(self) -> List[PartialNSRTAndDatastore]:
# Initialize the most general PNADs by merging self._initial_pnads.
# As a result, we will have one very general PNAD per option.
param_opt_to_general_pnad = {}
param_opt_to_nec_pnads: Dict[ParameterizedOption,
List[PartialNSRTAndDatastore]] = {}
# Extract all parameterized options from the data.
parameterized_options = set()
for ll_traj, seg_traj in zip(self._trajectories,
self._segmented_trajs):
if not ll_traj.is_demo:
continue
for segment in seg_traj:
parameterized_options.add(segment.get_option().parent)
# Set up the param_opt_to_general_pnad and param_opt_to_nec_pnads
# dictionaries.
for param_opt in parameterized_options:
pnad = self._initialize_general_pnad_for_option(param_opt)
param_opt_to_general_pnad[param_opt] = pnad
param_opt_to_nec_pnads[param_opt] = []
self._assert_all_data_in_exactly_one_datastore(
list(param_opt_to_general_pnad.values()))
prev_itr_ops: Set[STRIPSOperator] = set()
# We loop until the harmless PNADs induced by our procedure
# converge to a fixed point (i.e, they don't change after two
# subsequent iterations).
while True:
# Run multiple passes of backchaining over the data until
# convergence to a fixed point. Note that this process creates
# operators with only parameters, preconditions, and add effects.
self._backchain_multipass(param_opt_to_nec_pnads,
param_opt_to_general_pnad)
# Induce delete effects, side predicates and potentially
# keep effects.
self._induce_delete_side_keep(param_opt_to_nec_pnads)
# Harmlessness should now hold, but it's slow to check.
if CFG.backchaining_check_intermediate_harmlessness:
assert self._check_harmlessness(
self._get_uniquely_named_nec_pnads(param_opt_to_nec_pnads))
# Recompute datastores and preconditions for all PNADs.
# Filter out PNADs that don't have datastores.
cur_itr_pnads_unfiltered = [
pnad for pnads in param_opt_to_nec_pnads.values()
for pnad in pnads
]
self._recompute_datastores_from_segments(cur_itr_pnads_unfiltered)
cur_itr_pnads_filtered = []
for pnad in cur_itr_pnads_unfiltered:
if len(pnad.datastore) > 0:
new_pre = self._induce_preconditions_via_intersection(pnad)
# NOTE: this implicitly changes param_opt_to_nec_pnads
# as well, since we're directly modifying the PNAD objects.
pnad.op = pnad.op.copy_with(preconditions=new_pre)
cur_itr_pnads_filtered.append(pnad)
else:
param_opt_to_nec_pnads[pnad.option_spec[0]].remove(pnad)
del cur_itr_pnads_unfiltered # should be unused after this
# Check if the PNAD set has converged. If so, break.
if {pnad.op for pnad in cur_itr_pnads_filtered} == prev_itr_ops:
break
prev_itr_ops = {pnad.op for pnad in cur_itr_pnads_filtered}
# Assign a unique name to each PNAD.
final_pnads = self._get_uniquely_named_nec_pnads(
param_opt_to_nec_pnads)
# Assert data has been correctly partitioned amongst PNADs.
self._assert_all_data_in_exactly_one_datastore(final_pnads)
return final_pnads
def _backchain_multipass(
self, param_opt_to_nec_pnads: Dict[ParameterizedOption,
List[PartialNSRTAndDatastore]],
param_opt_to_general_pnad: Dict[ParameterizedOption,
PartialNSRTAndDatastore]
) -> None:
"""Take multiple passes through the demonstrations, running
self._backchain_one_pass() each time.
Keep going until the PNADs reach a fixed point. Note that this
process creates operators with only parameters, preconditions,
and add effects.
"""
while True:
# Before each pass, clear the poss_keep_effects
# of all the PNADs. We do this because we only want the
# poss_keep_effects of the final pass, where the PNADs did
# not change. However, we cannot simply clear the
# pnad.seg_to_keep_effects_sub because some of these
# substitutions might be necessary if this happens to be
# a PNAD that already has keep effects. Thus, we call a
# method that handles this correctly.
for pnads in param_opt_to_nec_pnads.values():
for pnad in pnads:
pnad.poss_keep_effects.clear()
self._clear_unnecessary_keep_effs_sub(pnad)
# Run one pass of backchaining.
nec_pnad_set_changed = self._backchain_one_pass(
param_opt_to_nec_pnads, param_opt_to_general_pnad)
if not nec_pnad_set_changed:
break
def _backchain_one_pass(
self, param_opt_to_nec_pnads: Dict[ParameterizedOption,
List[PartialNSRTAndDatastore]],
param_opt_to_general_pnad: Dict[ParameterizedOption,
PartialNSRTAndDatastore]
) -> bool:
"""Take one pass through the demonstrations in the given order.
Go through each one from the end back to the start, making the
PNADs more specific whenever needed. Return whether any PNAD was
changed.
"""
# Reset all segments' necessary_add_effects so that they aren't
# accidentally used from a previous iteration of backchaining.
self._reset_all_segment_add_effs()
nec_pnad_set_changed = False
for ll_traj, seg_traj in zip(self._trajectories,
self._segmented_trajs):
if not ll_traj.is_demo:
continue
traj_goal = self._train_tasks[ll_traj.train_task_idx].goal
atoms_seq = utils.segment_trajectory_to_atoms_sequence(seg_traj)
assert traj_goal.issubset(atoms_seq[-1])
# This variable, necessary_image, gets updated as we
# backchain. It always holds the set of ground atoms that
# are necessary for the remainder of the plan to reach the
# goal. At the start, necessary_image is simply the goal.
necessary_image = set(traj_goal)
for t in range(len(atoms_seq) - 2, -1, -1):
segment = seg_traj[t]
option = segment.get_option()
# Find the necessary PNADs associated with this option. If
# there are none, then use the general PNAD associated with
# this option. (But make sure to use a copy of it, because we
# don't want the general PNAD to get mutated when we mutate
# necessary PNADs!)
if len(param_opt_to_nec_pnads[option.parent]) == 0:
general_pnad = param_opt_to_general_pnad[option.parent]
pnads_for_option = [
PartialNSRTAndDatastore(general_pnad.op,
list(general_pnad.datastore),
general_pnad.option_spec)
]
else:
pnads_for_option = param_opt_to_nec_pnads[option.parent]
# Compute the ground atoms that must be added on this timestep.
# They must be a subset of the current PNAD's add effects.
necessary_add_effects = necessary_image - atoms_seq[t]
assert necessary_add_effects.issubset(segment.add_effects)
# Update the segment's necessary_add_effects.
segment.necessary_add_effects = necessary_add_effects
# We start by checking if any of the PNADs associated with the
# demonstrated option are able to match this transition.
objects = set(segment.states[0])
pnad, var_to_obj = self._find_best_matching_pnad_and_sub(
segment, objects, pnads_for_option)
if pnad is not None:
assert var_to_obj is not None
obj_to_var = {v: k for k, v in var_to_obj.items()}
assert len(var_to_obj) == len(obj_to_var)
ground_op = pnad.op.ground(
tuple(var_to_obj[var] for var in pnad.op.parameters))
if len(param_opt_to_nec_pnads[option.parent]) == 0:
param_opt_to_nec_pnads[option.parent].append(pnad)
# If we weren't able to find a substitution (i.e, the above
# _find_best_matching call didn't yield a PNAD), we need to
# spawn a new PNAD from the most general PNAD to cover
# these necessary add effects.
else:
nec_pnad_set_changed = True
pnad = self._spawn_new_pnad(
param_opt_to_general_pnad[option.parent], segment)
param_opt_to_nec_pnads[option.parent].append(pnad)
# Recompute datastores for ALL PNADs associated with this
# option. We need to do this because the new PNAD may now
# be a better match for some transition that we previously
# matched to another PNAD.
self._recompute_datastores_from_segments(
param_opt_to_nec_pnads[option.parent])
# Recompute all preconditions, now that we have recomputed
# the datastores. While doing this, keep track of any
# PNADs that get empty datastores.
pnads_to_remove = []
for nec_pnad in param_opt_to_nec_pnads[option.parent]:
if len(nec_pnad.datastore) > 0:
pre = self._induce_preconditions_via_intersection(
nec_pnad)
nec_pnad.op = nec_pnad.op.copy_with(
preconditions=pre)
else:
pnads_to_remove.append(nec_pnad)
# Remove PNADs that are no longer necessary because they
# have no data in their datastores.
for rem_pnad in pnads_to_remove:
param_opt_to_nec_pnads[option.parent].remove(rem_pnad)
# After all this, the unification call that failed earlier
# (leading us into the current else statement) should work.
best_score_pnad, var_to_obj = \
self._find_best_matching_pnad_and_sub(
segment, objects,
param_opt_to_nec_pnads[option.parent])
assert var_to_obj is not None
assert best_score_pnad == pnad
obj_to_var = {v: k for k, v in var_to_obj.items()}
assert len(var_to_obj) == len(obj_to_var)
ground_op = pnad.op.ground(
tuple(var_to_obj[var] for var in pnad.op.parameters))
# Every atom in the necessary_image that wasn't in the
# ground_op's add effects is a possible keep effect. This
# may add new variables, whose mappings for this segment
# we keep track of in the seg_to_keep_effects_sub dict.
for atom in necessary_image - ground_op.add_effects:
keep_eff_sub = {}
for obj in atom.objects:
if obj in obj_to_var:
continue
new_var = utils.create_new_variables(
[obj.type], obj_to_var.values())[0]
obj_to_var[obj] = new_var
keep_eff_sub[new_var] = obj
pnad.poss_keep_effects.add(atom.lift(obj_to_var))
if segment not in pnad.seg_to_keep_effects_sub:
pnad.seg_to_keep_effects_sub[segment] = {}
pnad.seg_to_keep_effects_sub[segment].update(keep_eff_sub)
# Update necessary_image for this timestep. It no longer
# needs to include the ground add effects of this PNAD, but
# must now include its ground preconditions.
necessary_image -= {
a.ground(var_to_obj)
for a in pnad.op.add_effects
}
necessary_image |= {
a.ground(var_to_obj)
for a in pnad.op.preconditions
}
return nec_pnad_set_changed
def _induce_delete_side_keep(
self, param_opt_to_nec_pnads: Dict[ParameterizedOption,
List[PartialNSRTAndDatastore]]
) -> None:
"""Given the current PNADs where add effects and preconditions are
correct, learn the remaining components: delete effects, side
predicates, and keep_effects.
Note that this may require spawning new PNADs with keep effects.
"""
for option, nec_pnad_list in sorted(param_opt_to_nec_pnads.items(),
key=str):
pnads_with_keep_effects = set()
for pnad in nec_pnad_list:
self._compute_pnad_delete_effects(pnad)
self._compute_pnad_side_predicates(pnad)
pnads_with_keep_effects |= self._get_pnads_with_keep_effects(
pnad)
param_opt_to_nec_pnads[option].extend(
list(pnads_with_keep_effects))
def _reset_all_segment_add_effs(self) -> None:
"""Reset all segments' necessary_add_effects to None."""
for ll_traj, seg_traj in zip(self._trajectories,
self._segmented_trajs):
if not ll_traj.is_demo:
continue
for segment in seg_traj:
segment.necessary_add_effects = None
@staticmethod
def _clear_unnecessary_keep_effs_sub(
pnad: PartialNSRTAndDatastore) -> None:
"""Clear unnecessary substitution values from the PNAD's
seg_to_keep_effects_sub_dict.
A substitution is unnecessary if it concerns a variable that
isn't in the PNAD's op parameters.
"""
for segment, keep_eff_sub in pnad.seg_to_keep_effects_sub.items():
new_keep_eff_sub_dict = {}
for var, obj in keep_eff_sub.items():
if var in pnad.op.parameters:
new_keep_eff_sub_dict[var] = obj
pnad.seg_to_keep_effects_sub[segment] = new_keep_eff_sub_dict
@staticmethod
def _get_uniquely_named_nec_pnads(
param_opt_to_nec_pnads: Dict[ParameterizedOption,
List[PartialNSRTAndDatastore]]
) -> List[PartialNSRTAndDatastore]:
"""Given the param_opt_to_nec_pnads dict, return a list of PNADs that
have unique names and can be used for planning."""
uniquely_named_nec_pnads: List[PartialNSRTAndDatastore] = []
for pnad_list in sorted(param_opt_to_nec_pnads.values(), key=str):
for i, pnad in enumerate(pnad_list):
new_op = pnad.op.copy_with(name=(pnad.op.name + str(i)))
new_pnad = PartialNSRTAndDatastore(new_op,
list(pnad.datastore),
pnad.option_spec)
uniquely_named_nec_pnads.append(new_pnad)
return uniquely_named_nec_pnads
@classmethod
def get_name(cls) -> str:
return "backchaining"
def _spawn_new_pnad(self, pnad: PartialNSRTAndDatastore,
segment: Segment) -> PartialNSRTAndDatastore:
"""Given a general PNAD and some segment with necessary add effects
that the PNAD must achieve, create a new PNAD ("spawn" from the most
general one) so that it has the necessary add effects contained in the
given segment.
Returns the newly constructed PNAD, without modifying the
original.
"""
# Assert that the segment contains necessary_add_effects.
necessary_add_effects = segment.necessary_add_effects
assert necessary_add_effects is not None
# Assert that this really is a general PNAD.
assert len(pnad.op.add_effects) == 0, \
"Can't spawn from non-general PNAD"
# Get an arbitrary grounding of the PNAD's operator whose
# preconditions hold in segment.init_atoms.
objects = set(segment.states[0])
_, var_to_obj = self._find_best_matching_pnad_and_sub(
segment, objects, [pnad], check_only_preconditions=True)
# Assert that such a grounding exists - this must be the case
# since we only ever call this method with the most general
# PNAD for the option.
assert var_to_obj is not None
obj_to_var = {v: k for k, v in var_to_obj.items()}
assert len(var_to_obj) == len(obj_to_var)
# Before we can lift the necessary_add_effects, we need to add new
# entries to obj_to_var, since necessary_add_effects may
# contain objects that were not in the ground operator's
# parameters.
all_objs = {o for eff in necessary_add_effects for o in eff.objects}
missing_objs = sorted(all_objs - set(obj_to_var))
new_vars = utils.create_new_variables([o.type for o in missing_objs],
existing_vars=pnad.op.parameters)
obj_to_var.update(dict(zip(missing_objs, new_vars)))
# Finally, we can lift necessary_add_effects.
updated_params = sorted(obj_to_var.values())
updated_add_effects = {
a.lift(obj_to_var)
for a in necessary_add_effects
}
# Create a new PNAD with the given parameters and add effects. Set
# the preconditions to be trivial. They will be recomputed later.
new_pnad_op = pnad.op.copy_with(parameters=updated_params,
preconditions=set(),
add_effects=updated_add_effects)
new_pnad = PartialNSRTAndDatastore(new_pnad_op, [], pnad.option_spec)
# Note: we don't need to copy anything related to keep effects into
# new_pnad here, because we only care about keep effects on the final
# iteration of backchaining, where this function is never called.
return new_pnad
@staticmethod
def _compute_pnad_delete_effects(pnad: PartialNSRTAndDatastore) -> None:
"""Update the given PNAD to change the delete effects to ones obtained
by unioning all lifted images in the datastore.
IMPORTANT NOTE: We want to do a union here because the most
general delete effects are the ones that capture _any possible_
deletion that occurred in a training transition. (This is
contrast to preconditions, where we want to take an intersection
over our training transitions.) However, we do not allow
creating new variables when we create these delete effects.
Instead, we filter out delete effects that include new
variables. Therefore, even though it may seem on the surface
like this procedure will cause all delete effects in the data to
be modeled accurately, this is not actually true.
"""
delete_effects = set()
for segment, var_to_obj in pnad.datastore:
obj_to_var = {o: v for v, o in var_to_obj.items()}
atoms = {
atom
for atom in segment.delete_effects
if all(o in obj_to_var for o in atom.objects)
}
lifted_atoms = {atom.lift(obj_to_var) for atom in atoms}
delete_effects |= lifted_atoms
pnad.op = pnad.op.copy_with(delete_effects=delete_effects)
@staticmethod
def _compute_pnad_side_predicates(pnad: PartialNSRTAndDatastore) -> None:
"""Update the given PNAD to change the side predicates to ones that
include every unmodeled add or delete effect seen in the data."""
# First, strip out any existing side predicates so that the call
# to apply_operator() cannot use them, which would defeat the purpose.
pnad.op = pnad.op.copy_with(side_predicates=set())
side_predicates = set()
for (segment, var_to_obj) in pnad.datastore:
objs = tuple(var_to_obj[param] for param in pnad.op.parameters)
ground_op = pnad.op.ground(objs)
next_atoms = utils.apply_operator(ground_op, segment.init_atoms)
for atom in segment.final_atoms - next_atoms:
side_predicates.add(atom.predicate)
for atom in next_atoms - segment.final_atoms:
side_predicates.add(atom.predicate)
pnad.op = pnad.op.copy_with(side_predicates=side_predicates)
@staticmethod
def _get_pnads_with_keep_effects(
pnad: PartialNSRTAndDatastore) -> Set[PartialNSRTAndDatastore]:
"""Return a new set of PNADs that include keep effects into the given
PNAD."""
# The keep effects that we want are the subset of possible keep
# effects which are not already in the PNAD's add effects, and
# whose predicates were determined to be side predicates.
keep_effects = {
eff
for eff in pnad.poss_keep_effects if eff not in pnad.op.add_effects
and eff.predicate in pnad.op.side_predicates
}
new_pnads_with_keep_effects = set()
# Given these keep effects, we need to create a combinatorial number of
# PNADs, one for each unique combination of keep effects. Moreover, we
# need to ensure that they are named differently from each other. Some
# of these PNADs will be filtered out later if they are not useful to
# cover any datapoints.
for r in range(1, len(keep_effects) + 1):
for keep_effects_subset in itertools.combinations(keep_effects, r):
# These keep effects (keep_effects_subset) could involve new
# variables, which we need to add to the PNAD parameters.
params_set = set(pnad.op.parameters)
for eff in keep_effects_subset:
for var in eff.variables:
params_set.add(var)
parameters = sorted(params_set)
# The keep effects go into both the PNAD preconditions and the
# PNAD add effects.
preconditions = pnad.op.preconditions | set(
keep_effects_subset)
add_effects = pnad.op.add_effects | set(keep_effects_subset)
# Create the new PNAD.
new_pnad_op = pnad.op.copy_with(parameters=parameters,
preconditions=preconditions,
add_effects=add_effects)
new_pnad = PartialNSRTAndDatastore(new_pnad_op, [],
pnad.option_spec)
# Remember to copy seg_to_keep_effects_sub into the new_pnad!
new_pnad.seg_to_keep_effects_sub = pnad.seg_to_keep_effects_sub
new_pnads_with_keep_effects.add(new_pnad)
return new_pnads_with_keep_effects
def _assert_all_data_in_exactly_one_datastore(
self, pnads: List[PartialNSRTAndDatastore]) -> None:
"""Assert that every demo datapoint appears in exactly one datastore
among the given PNADs' datastores."""
all_segs_in_data_lst = [
seg for pnad in pnads for seg, _ in pnad.datastore
]
all_segs_in_data = set(all_segs_in_data_lst)
assert len(all_segs_in_data_lst) == len(all_segs_in_data)
for ll_traj, seg_traj in zip(self._trajectories,
self._segmented_trajs):
if not ll_traj.is_demo: # ignore non-demo data
continue
for segment in seg_traj:
assert segment in all_segs_in_data
| StarcoderdataPython |
3320448 | from PIL import Image
from PIL import ImageFilter
import utils.logger as logger
#from utils.rotate import rotate
from config import *
from typing import Tuple, List
import sys
i = 0
def crop_image(image, area:Tuple) -> object:
''' Uses PIL to crop an image, given its area.
Input:
image - PIL opened image
Area - Coordinates in tuple (xmin, ymax, xmax, ymin) format '''
img1 = Image.open(image)
img = img1.crop(area)
basewidth = 200
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
cropped_image = img.resize((basewidth,hsize), Image.ANTIALIAS)
global i
if i>3:
i=0
cropped_image.save("croppedimage/r" + str(i) + ".jpg", "JPEG",dpi=(300,300))
i += 1
return cropped_image
def locate_asset(self, image, classifier, lines="") -> List:
''' Determines where an asset is in the picture, returning
a set of coordinates, for the top left, top right, bottom
left, and bottom right of the tag
Returns:
[(area, image)]
Area is the coordinates of the bounding box
Image is the image, opened by PIL.'''
cropped_images = []
#print(lines)
for line in str(lines).split('\n'):
if "sign" in line:
continue
if "photo" in line:
continue
#print(line)
if "left_x" in line:
#if 'photo' or 'sign' in line:
# Extract the nameplate info
#print(line)
area = classifier.extract_info(line)
# Open image
cropped_images.append((area, crop_image(image, area)))
if cropped_images == []:
logger.bad("No label found in image.")
else:
logger.good("Found " + str(len(cropped_images)) + " label(s) in image.")
return cropped_images
| StarcoderdataPython |
3263143 | #Exercício Python 012: Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto.
price = float(input('Enter product price: '))
discount = price * 0.05
new_price = price - discount
print('New product price is: %.2f' %new_price)
| StarcoderdataPython |
3265156 | <filename>src/fnc/sequences.py<gh_stars>100-1000
"""
Functions that operate on sequences.
Most of these functions return generators so that they will be more efficient at processing large
datasets. All generator functions will have a ``Yields`` section in their docstring to easily
identify them as generators. Otherwise, functions that return concrete values with have a
``Returns`` section instead.
"""
from collections import Counter, deque
from functools import partial
import itertools
from operator import not_
import fnc
from .helpers import Container, iscollection, isgenerator
_filter = filter
_map = map
def chunk(size, seq):
"""
Split elements of `seq` into chunks with length `size` and yield each chunk.
Examples:
>>> list(chunk(2, [1, 2, 3, 4, 5]))
[[1, 2], [3, 4], [5]]
Args:
seq (Iterable): Iterable to chunk.
size (int, optional): Chunk size. Defaults to ``1``.
Yields:
list: Chunked groups.
"""
if not isinstance(size, int) or size <= 0: # pragma: no cover
raise ValueError("size must be an integer greater than zero")
group = []
for item in seq:
if len(group) >= size:
yield group
group = []
group.append(item)
if group:
yield group
def compact(seq):
"""
Exclude elements from `seq` that are falsey.
Examples:
>>> list(compact(['', 1, 0, True, False, None]))
[1, True]
Args:
seq (Iterable): Iterable to compact.
Yields:
Elements that are truthy.
"""
for item in seq:
if item:
yield item
def concat(*seqs):
"""
Concatenates zero or more iterables into a single iterable.
Examples:
>>> list(concat([1, 2], [3, 4], [[5], [6]]))
[1, 2, 3, 4, [5], [6]]
Args:
*seqs (Iterable): Iterables to concatenate.
Yields:
Each element from all iterables.
"""
return itertools.chain.from_iterable(seqs)
def countby(iteratee, seq):
"""
Return a ``dict`` composed of keys generated from the results of running each element of `seq`
through the `iteratee`.
Examples:
>>> result = countby(None, [1, 2, 1, 2, 3, 4])
>>> result == {1: 2, 2: 2, 3: 1, 4: 1}
True
>>> result = countby(lambda x: x.lower(), ['a', 'A', 'B', 'b'])
>>> result == {'a': 2, 'b': 2}
True
>>> result = countby('a', [{'a': 'x'}, {'a': 'x'}, {'a': 'y'}])
>>> result == {'x': 2, 'y': 1}
True
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
dict
"""
return dict(Counter(map(iteratee, seq)))
def difference(seq, *seqs):
"""
Yields elements from `seq` that are not in `seqs`.
Note:
This function is like ``set.difference()`` except it works with both hashable
and unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(difference([1, 2, 3], [1], [2]))
[3]
>>> list(difference([1, 4, 2, 3, 5, 0], [1], [2, 0]))
[4, 3, 5]
>>> list(difference([1, 3, 4, 1, 2, 4], [1, 4]))
[3, 2]
Args:
seq (Iterable): Iterable to compute difference against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Each element in `seq` that doesn't appear in `seqs`.
"""
yield from differenceby(None, seq, *seqs)
def differenceby(iteratee, seq, *seqs):
"""
Like :func:`difference` except that an `iteratee` is used to modify each element in the
sequences. The modified values are then used for comparison.
Note:
This function is like ``set.difference()`` except it works with both hashable
and unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(differenceby('a', [{'a': 1}, {'a': 2}, {'a': 3}], [{'a': 1}], [{'a': 2}]))
[{'a': 3}]
>>> list(differenceby(lambda x: x % 4, [1, 4, 2, 3, 5, 0], [1], [2, 0]))
[3]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to compute difference against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Each element in `seq` that doesn't appear in `seqs`.
"""
if not seqs:
yield from unionby(iteratee, seq)
return
if iteratee is not None:
iteratee = fnc.iteratee(iteratee)
yielded = Container()
# Concat sequences into a single sequence and map iteratee to each item so that the
# computed value only needs to be done once for each item since that is what we'll
# compare to below. We'll store these values into a iterable in case any of the
# sequences are a generator/iterator that would get exhausted if we tried to iterate
# over it more than once.
others = Container(map(iteratee, concat(*seqs)))
for item in seq:
if iteratee is not None:
value = iteratee(item)
else:
value = item
if value in yielded or value in others:
continue
yield item
yielded.add(value)
def duplicates(seq, *seqs):
"""
Yields unique elements from sequences that are repeated one or more times.
Note:
The order of yielded elements depends on when the second duplicated
element is found and not when the element first appeared.
Examples:
>>> list(duplicates([0, 1, 3, 2, 3, 1]))
[3, 1]
>>> list(duplicates([0, 1], [3, 2], [3, 1]))
[3, 1]
Args:
seq (Iterable): Iterable to check for duplicates.
*seqs (Iterable): Other iterables to compare with.
Yields:
Duplicated elements.
"""
yield from duplicatesby(None, seq, *seqs)
def duplicatesby(iteratee, seq, *seqs):
"""
Like :func:`duplicates` except that an `iteratee` is used to modify each element in the
sequences. The modified values are then used for comparison.
Examples:
>>> list(duplicatesby('a', [{'a':1}, {'a':3}, {'a':2}, {'a':3}, {'a':1}]))
[{'a': 3}, {'a': 1}]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to check for duplicates
*seqs (Iterable): Other iterables to compare with.
Yields:
Each element in `seq` that doesn't appear in `seqs`.
"""
if iteratee is not None:
iteratee = fnc.iteratee(iteratee)
seen = Container()
yielded = Container()
for item in itertools.chain(seq, *seqs):
if iteratee is not None:
value = iteratee(item)
else:
value = item
if value not in seen:
seen.add(value)
continue
if value not in yielded:
yield item
yielded.add(value)
def filter(iteratee, seq):
"""
Filter `seq` by `iteratee`, yielding only the elements that the iteratee returns truthy for.
Note:
This function is like the builtin ``filter`` except it converts `iteratee` into
a fnc-style predicate.
Examples:
>>> result = filter({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 3}])
>>> list(result) == [{'a': 1}, {'a': 1, 'b': 3}]
True
>>> list(filter(lambda x: x >= 3, [1, 2, 3, 4]))
[3, 4]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to filter.
Yields:
Filtered elements.
"""
return _filter(fnc.iteratee(iteratee), seq)
def find(iteratee, seq):
"""
Iterates over elements of `seq`, returning the first element that the iteratee returns truthy
for.
Examples:
>>> find(lambda x: x >= 3, [1, 2, 3, 4])
3
>>> find(lambda x: x >= 5, [1, 2, 3, 4]) is None
True
>>> find({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}])
{'a': 1}
>>> result = find({'a': 1}, [{'b': 2}, {'a': 1, 'b': 2}, {'a': 1}])
>>> result == {'a': 1, 'b': 2}
True
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
First element found or ``None``.
"""
for item in filter(iteratee, seq):
return item
def findindex(iteratee, seq):
"""
Return the index of the element in `seq` that returns ``True`` for `iteratee`. If no match is
found, ``-1`` is returned.
Examples:
>>> findindex(lambda x: x >= 3, [1, 2, 3, 4])
2
>>> findindex(lambda x: x > 4, [1, 2, 3, 4])
-1
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to process.
Returns:
int: Index of found item or ``-1`` if not found.
"""
iteratee = fnc.iteratee(iteratee)
return next((i for i, value in enumerate(seq) if iteratee(value)), -1)
def findlast(iteratee, seq):
"""
This function is like :func:`find` except it iterates over elements of `seq` from right to left.
Examples:
>>> findlast(lambda x: x >= 3, [1, 2, 3, 4])
4
>>> findlast(lambda x: x >= 5, [1, 2, 3, 4]) is None
True
>>> result = findlast({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}])
>>> result == {'a': 1, 'b': 2}
True
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
Last element found or ``None``.
"""
return find(iteratee, reversed(seq))
def findlastindex(iteratee, seq):
"""
Return the index of the element in `seq` that returns ``True`` for `iteratee`. If no match is
found, ``-1`` is returned.
Examples:
>>> findlastindex(lambda x: x >= 3, [1, 2, 3, 4])
3
>>> findlastindex(lambda x: x > 4, [1, 2, 3, 4])
-1
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to process.
Returns:
int: Index of found item or ``-1`` if not found.
"""
iteratee = fnc.iteratee(iteratee)
return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)
def flatten(*seqs):
"""
Flatten iterables a single level deep.
Examples:
>>> list(flatten([[1], [2, [3]], [[4]]]))
[1, 2, [3], [4]]
>>> list(flatten([[1], [2, [3]], [[4]]], [5, [6, 7]]))
[1, 2, [3], [4], 5, 6, 7]
Args:
*seqs (Iterables): Iterables to flatten.
Yields:
Eelements from the flattened iterable.
"""
for item in itertools.chain.from_iterable(seqs):
if iscollection(item):
yield from item
else:
yield item
def flattendeep(*seqs):
"""
Recursively flatten iterables.
Examples:
>>> list(flattendeep([[1], [2, [3]], [[4]]]))
[1, 2, 3, 4]
>>> list(flattendeep([[1], [2, [3]], [[4]]], [5, [6, 7]]))
[1, 2, 3, 4, 5, 6, 7]
>>> list(flattendeep([[1], [2, [3]], [[4]]], [5, [[[[6, [[[7]]]]]]]]))
[1, 2, 3, 4, 5, 6, 7]
Args:
*seqs (Iterables): Iterables to flatten.
Yields:
Flattened elements.
"""
for item in itertools.chain.from_iterable(seqs):
if iscollection(item):
yield from flattendeep(item)
else:
yield item
def groupall(iteratees, seq):
"""
This function is like :func:`groupby` except it supports nested grouping by multiple iteratees.
If only a single iteratee is given, it is like calling :func:`groupby`.
Examples:
>>> result = groupall(
... ['shape', 'qty'],
... [
... {'shape': 'square', 'color': 'red', 'qty': 5},
... {'shape': 'square', 'color': 'blue', 'qty': 10},
... {'shape': 'square', 'color': 'orange', 'qty': 5},
... {'shape': 'circle', 'color': 'yellow', 'qty': 5},
... {'shape': 'circle', 'color': 'pink', 'qty': 10},
... {'shape': 'oval', 'color': 'purple', 'qty': 5}
... ]
... )
>>> expected = {
... 'square': {
... 5: [
... {'shape': 'square', 'color': 'red', 'qty': 5},
... {'shape': 'square', 'color': 'orange', 'qty': 5}
... ],
... 10: [{'shape': 'square', 'color': 'blue', 'qty': 10}]
... },
... 'circle': {
... 5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}],
... 10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}]
... },
... 'oval': {
... 5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}]
... }
... }
>>> result == expected
True
Args:
iteratees (Iterable): Iteratees to group by.
seq (Iterable): Iterable to iterate over.
Returns:
dict: Results of recursively grouping by all `iteratees`.
"""
if not iteratees:
return seq
head, *rest = iteratees
return fnc.mapvalues(partial(groupall, rest), groupby(head, seq))
def groupby(iteratee, seq):
"""
Return a ``dict`` composed of keys generated from the results of running each element of `seq`
through the `iteratee`.
Examples:
>>> result = groupby('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
>>> result == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]}
True
>>> result = groupby({'a': 1}, [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
>>> result == {False: [{'a': 3, 'b': 4}], True: [{'a': 1, 'b': 2}]}
True
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
dict: Results of grouping by `iteratee`.
"""
result = {}
iteratee = fnc.iteratee(iteratee)
for item in seq:
result.setdefault(iteratee(item), []).append(item)
return result
def intercalate(value, seq):
"""
Insert `value` between each element in `seq` and concatenate the results.
Examples:
>>> list(intercalate('x', [1, [2], [3], 4]))
[1, 'x', 2, 'x', 3, 'x', 4]
>>> list(intercalate(', ', ['Lorem', 'ipsum', 'dolor']))
['Lorem', ', ', 'ipsum', ', ', 'dolor']
>>> ''.join(intercalate(', ', ['Lorem', 'ipsum', 'dolor']))
'Lorem, ipsum, dolor'
>>> list(intercalate([0,0,0], [[1,2,3],[4,5,6],[7,8,9]]))
[1, 2, 3, 0, 0, 0, 4, 5, 6, 0, 0, 0, 7, 8, 9]
Args:
value (object): Element to insert.
seq (Iterable): Iterable to intercalate.
Yields:
Elements of the intercalated iterable.
"""
return flatten(intersperse(value, seq))
def interleave(*seqs):
"""
Merge multiple iterables into a single iterable by inserting the next element from each iterable
by sequential round-robin.
Examples:
>>> list(interleave([1, 2, 3], [4, 5, 6], [7, 8, 9]))
[1, 4, 7, 2, 5, 8, 3, 6, 9]
Args:
*seqs (Iterable): Iterables to interleave.
Yields:
Elements of the interleaved iterable.
"""
queue = deque(iter(seq) for seq in seqs)
while queue:
seq = queue.popleft()
try:
yield next(seq)
except StopIteration:
pass
else:
queue.append(seq)
def intersection(seq, *seqs):
"""
Computes the intersection of all the passed-in iterables.
Note:
This function is like ``set.intersection()`` except it works with both hashable
and unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(intersection([1, 2, 3], [1, 2, 3, 4, 5], [2, 3]))
[2, 3]
>>> list(intersection([1, 2, 3]))
[1, 2, 3]
Args:
seq (Iterable): Iterable to compute intersection against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Elements that itersect.
"""
yield from intersectionby(None, seq, *seqs)
def intersectionby(iteratee, seq, *seqs):
"""
Like :func:`intersection` except that an `iteratee` is used to modify each element in the
sequences. The modified values are then used for comparison.
Note:
This function is like ``set.intersection()`` except it works with both hashable
and unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(intersectionby(
... 'a',
... [{'a': 1}, {'a': 2}, {'a': 3}],
... [{'a': 1}, {'a': 2}, {'a': 3}, {'a': 4}, {'a': 5}],
... [{'a': 2}, {'a': 3}]
... ))
[{'a': 2}, {'a': 3}]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to compute intersection against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Elements that intersect.
"""
if not seqs:
yield from unionby(iteratee, seq)
return
if iteratee is not None:
iteratee = fnc.iteratee(iteratee)
yielded = Container()
# Map iteratee to each item in each other sequence and compute intersection of those
# values to reduce number of times iteratee is called. The resulting sequence will
# be an intersection of computed values which will be used to compare to the primary
# sequence. We'll store these values into a iterable in case any of the sequences
# are a generator/iterator that would get exhausted if we tried to iterate over it
# more than once.
others = Container(intersection(*(map(iteratee, other) for other in seqs)))
for item in seq:
if iteratee is not None:
value = iteratee(item)
else:
value = item
if value in yielded:
continue
if value in others:
yield item
yielded.add(value)
def intersperse(value, seq):
"""
Insert a separating element between each element in `seq`.
Examples:
>>> list(intersperse('x', [1, [2], [3], 4]))
[1, 'x', [2], 'x', [3], 'x', 4]
Args:
value (object): Element to insert.
seq (Iterable): Iterable to intersperse.
Yields:
Elements of the interspersed iterable.
"""
seq = iter(seq)
try:
yield next(seq)
except StopIteration:
return
for item in seq:
yield value
yield item
def keyby(iteratee, seq):
"""
Return a ``dict`` composed of keys generated from the results of running each element of `seq`
through the `iteratee`.
Examples:
>>> results = keyby('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
>>> results == {1: {'a': 1, 'b': 2}, 3: {'a': 3, 'b': 4}}
True
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
dict: Results of indexing by `iteratee`.
"""
iteratee = fnc.iteratee(iteratee)
return {iteratee(value): value for value in seq}
def map(iteratee, *seqs):
"""
Map `iteratee` to each element of iterable and yield the results. If additional iterable
arguments are passed, `iteratee` must take that many arguments and is applied to the items from
all iterables in parallel.
Note:
This function is like the builtin ``map`` except it converts `iteratee` into a
fnc-style predicate.
Examples:
>>> list(map(str, [1, 2, 3, 4]))
['1', '2', '3', '4']
>>> list(map('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}, {'a': 5, 'b': 6}]))
[1, 3, 5]
>>> list(map('0.1', [[[0, 1]], [[2, 3]], [[4, 5]]]))
[1, 3, 5]
>>> list(map('a.b', [{'a': {'b': 1}}, {'a': {'b': 2}}]))
[1, 2]
>>> list(map('a.b[1]', [{'a': {'b': [0, 1]}}, {'a': {'b': [2, 3]}}]))
[1, 3]
Args:
iteratee (object): Iteratee applied per iteration.
*seqs (Iterable): Iterables to map.
Yields:
Mapped elements.
"""
return _map(fnc.iteratee(iteratee), *seqs)
def mapcat(iteratee, *seqs):
"""
Map an `iteratee` to each element of each iterable in `seqs` and concatenate the results into a
single iterable.
Examples:
>>> list(mapcat(lambda x: list(range(x)), range(4)))
[0, 0, 1, 0, 1, 2]
Args:
iteratee (object): Iteratee to apply to each element.
*seqs (Iterable): Iterable to map and concatenate.
Yields:
Elements resulting from concat + map operations.
"""
return concat(*map(iteratee, *seqs))
def mapflat(iteratee, *seqs):
"""
Map an `iteratee` to each element of each iterable in `seqs` and flatten the results.
Examples:
>>> list(mapflat(lambda n: [[n, n]], [1, 2]))
[[1, 1], [2, 2]]
Args:
iteratee (object): Iteratee applied per iteration.
*seqs (Iterable): Iterables to iterate over.
Yields:
Elements result from flatten + map operations.
"""
return flatten(map(iteratee, *seqs))
def mapflatdeep(iteratee, *seqs):
"""
Map an `iteratee` to each element of each iterable in `seqs` and recurisvely flatten the
results.
Examples:
>>> list(mapflatdeep(lambda n: [[n, n]], [1, 2]))
[1, 1, 2, 2]
Args:
iteratee (object): Iteratee applied per iteration.
*seqs (Iterable): Iterables to iterate over.
Yields:
Elements result from recursive flatten + map operations.
"""
return flattendeep(map(iteratee, *seqs))
def partition(iteratee, seq):
"""
Return a ``tuple`` of 2 lists containing elements from `seq` split into two groups where the
first group contains all elements the `iteratee` returned truthy for and the second group
containing the falsey elements.
Examples:
>>> partition(lambda x: x % 2, [1, 2, 3, 4])
([1, 3], [2, 4])
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Returns:
tuple[list]
"""
iteratee = fnc.iteratee(iteratee)
successes = []
failures = []
for item in seq:
if iteratee(item):
successes.append(item)
else:
failures.append(item)
return successes, failures
def reject(iteratee, seq):
"""
The opposite of :func:`filter` this function yields the elements of `seq` that the `iteratee`
returns falsey for.
Examples:
>>> list(reject(lambda x: x >= 3, [1, 2, 3, 4]))
[1, 2]
>>> list(reject('a', [{'a': 0}, {'a': 1}, {'a': 2}]))
[{'a': 0}]
>>> list(reject({'a': 1}, [{'a': 0}, {'a': 1}, {'a': 2}]))
[{'a': 0}, {'a': 2}]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to iterate over.
Yields:
Rejected elements.
"""
iteratee = fnc.iteratee(iteratee)
return filter(fnc.compose(iteratee, not_), seq)
def union(seq, *seqs):
"""
Computes the union of the passed-in iterables (sometimes referred to as ``unique``).
Note:
This function is like ``set.union()`` except it works with both hashable and
unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(union([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> list(union([1, 2, 3], [2, 3, 4], [3, 4, 5]))
[1, 2, 3, 4, 5]
Args:
seq (Iterable): Iterable to compute union against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Each unique element from all iterables.
"""
yield from unionby(None, seq, *seqs)
def unionby(iteratee, seq, *seqs):
"""
Like :func:`union` except that an `iteratee` is used to modify each element in the sequences.
The modified values are then used for comparison.
Note:
This function is like ``set.union()`` except it works with both hashable and
unhashable values and preserves the ordering of the original iterables.
Examples:
>>> list(unionby(
... 'a',
... [{'a': 1}, {'a': 2}, {'a': 3}, {'a': 1}, {'a': 2}, {'a': 3}]
... ))
[{'a': 1}, {'a': 2}, {'a': 3}]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to compute union against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Each unique element from all iterables.
"""
if iteratee is not None:
iteratee = fnc.iteratee(iteratee)
seen = Container()
for item in itertools.chain(seq, *seqs):
if iteratee is not None:
value = iteratee(item)
else:
value = item
if value not in seen:
yield item
seen.add(value)
def unzip(seq):
"""
The inverse of the builtin ``zip`` function, this method transposes groups of elements into new
groups composed of elements from each group at their corresponding indexes.
Examples:
>>> list(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)]))
[(1, 2, 3), (4, 5, 6), (7, 8, 9)]
>>> list(unzip(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)])))
[(1, 4, 7), (2, 5, 8), (3, 6, 9)]
Args:
seq (Iterable): Iterable to unzip.
Yields:
tuple: Each transposed group.
"""
return zip(*seq)
def without(values, seq):
"""
Exclude elements in `seq` that are in `values`.
Examples:
>>> list(without([2, 4], [1, 2, 3, 2, 4, 4, 3]))
[1, 3, 3]
Args:
values (mixed): Values to remove.
seq (Iterable): List to filter.
Yields:
Elements not in `values`.
"""
for item in seq:
if item not in values:
yield item
def xor(seq, *seqs):
"""
Computes the symmetric difference of the provided iterables where the elements are only in one
of the iteralbes.
Note:
This function is like ``set.symmetric_difference()`` except it works with both
hashable and unhashable values and preserves the ordering of the original
iterables.
Warning:
While this function returns a generator object, internally it will create
intermediate non-generator iterables which may or may not be a performance
concern depending on the sizes of the inputs.
Examples:
>>> list(xor([1, 3, 4], [1, 2, 4], [2]))
[3]
Args:
seq (Iterable): Iterable to compute symmetric difference against.
*seqs (Iterable): Other iterables to compare with.
Yields:
Elements from the symmetric difference.
"""
if not seqs:
yield from seq
return
head, *rest = seqs
if isgenerator(seq):
seq = tuple(seq)
if isgenerator(head):
head = tuple(head)
a = union(seq, head)
b = tuple(intersection(seq, head))
d = difference(a, b)
yield from xor(d, *rest)
| StarcoderdataPython |
1695285 | from ttracker.model.items.match import MatchResult
class Player:
def __init__(self, content):
self.user_id = content['userId']
self.player_name = content['playerName']
self.system_seat_id = content['systemSeatId']
self.team_id = content['teamId']
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __str__(self):
return f'<{self.player_name}>'
class GameRoomConfig:
def __init__(self, content):
self.event_id = content['eventId']
self.reserved_players = self.get_reserved_players(content)
self.match_id = content['matchId']
@staticmethod
def get_reserved_players(content):
if content.get('reservedPlayers'):
return [Player(player) for player in content['reservedPlayers']]
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __str__(self):
return f'<{self.match_id}>'
class MatchGameRoomStateChangedEvent:
def __init__(self, content):
self.transaction_id = content['transactionId']
self.timestamp = content['timestamp']
game_room_info = content['matchGameRoomStateChangedEvent'][
'gameRoomInfo']
self.game_room_config = GameRoomConfig(
game_room_info['gameRoomConfig'])
self.state_type = game_room_info['stateType']
self.final_match_result = MatchResult(
game_room_info.get('finalMatchResult', None))
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __str__(self):
return f'<{self.event_id}>'
| StarcoderdataPython |
3265142 | from tracker.app import app as application
| StarcoderdataPython |
1720739 | <gh_stars>0
import vk_api
import datetime
import colorama
from colorama import Fore
colorama.init()
editnumber = [('0', '0⃣'), ('1', '1⃣'), ('2', '2⃣'), ('3', '3⃣'), ('4', '4⃣'), ('5', '5⃣'), ('6', '6⃣'), ('7', '7⃣'), ('8', '8⃣'), ('9', '9⃣')]
ballnumber = [('0', ''), ('1', '❶'), ('2', '❷'), ('3', '❸'), ('4', '❹'), ('5', '❺'), ('6', '❻'), ('7', '❼'), ('8', '❽'), ('9', '❾')]
holeballnumber = [('0', ''), ('1', '➀'), ('2', '➁'), ('3', '➂'), ('4', '➃'), ('5', '➄'), ('6', '➅'), ('7', '➆'), ('8', '➇'), ('9', '➈')]
class status(object):
"""docstring for status"""
def __init__(self, token, decor=0, debug=0, online = 0, friends = 0):
super(status, self).__init__()
self.decor = decor
if token != '' and debug == 0:
self.token = True
self.debug = 0
else:
self.token = False
self.debug = 1
self.online = online
self.friends = friends
vk = vk_api.VkApi(token=token)
self.vk = vk
# аккаунт
name = vk.method('account.getProfileInfo')
self.first_name = name['first_name']
self.last_name = name['last_name']
self.screen_name = name['screen_name']
# ваши подписчики
self.followerson = len(vk.method('users.getFollowers', {'count': '1000'})["items"])
self.friendsonline = len(vk.method('friends.getOnline'))
self.bannedon = vk.method('account.getBanned', {'count': '200'})["count"]
# ваши диалоги
self.unreadmessageon = vk.method('account.getCounters', {'filter': 'messages'})['messages']
# статистика вашего аккаунта
self.avalikes = vk.method('photos.get', {'album_id': 'profile', 'rev': 1, 'extended': 1, 'count': 1})["items"][0]["likes"]["count"]
self.giftscounton = vk.method('gifts.get', {'count': '200'})["count"]
# дата и время
t = datetime.datetime.now()
self.time = t.strftime("%H:%M")
def date(self):
t = datetime.datetime.now()
return f'[{Fore.CYAN}{t}{Fore.RESET}]'
def get_status(self, title):
if self.decor == 1:
for old, new in editnumber:
title = title.replace(old, new)
if self.decor == 2:
for old, new in ballnumber:
title = title.replace(old, new)
if self.decor == 3:
for old, new in holeballnumber:
title = title.replace(old, new)
try:
if (len(title)< 140):
if self.debug == 0:
self.vk.method("status.set", {"text": title}) #отправка запроса на установку статуса
print(f'\n{self.date()} <@{self.screen_name}> {Fore.GREEN}{title} {Fore.YELLOW}{self.token} {Fore.MAGENTA}{self.debug} {Fore.CYAN}{self.online} {Fore.BLUE}{self.friends}{Fore.RESET}')
if self.debug == 1:
print(f'{self.date()} <@{self.screen_name}> {Fore.GREEN}{title} {Fore.YELLOW}{self.token} {Fore.MAGENTA}{self.debug} {Fore.CYAN}{self.online} {Fore.BLUE}{self.friends}{Fore.RESET}')
print(f"| {len(title)} / 140 символов")
else:
print(f'\n{self.date()} | статус @{self.screen_name} не может быть установлен из-за превышения лимита длинны в 140 символов')
print(f'{len(title)} / 140 символов')
except Exception as e:
print(f'e: {Fore.YELLOW}{e}{Fore.RESET}')
if self.online == 1:
self.vk.method("account.setOnline") #отправляет запрос на установку статуса "Онлайн" на 5 минут
else:
self.vk.method("account.setOffline") # отправляет запрос на установку статуса "Оффлайн" каждые 5 минут
#Удаление входящих заявок в друзья
if self.friends == 1:
self.vk.method("friends.deleteAllRequests") #удаляет все входящие заявки в друзья
| StarcoderdataPython |
76335 | <filename>LiveFeedback/CentralLinePhantom1.py
import os
import keras
import sys
import time
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, GlobalAveragePooling2D
from keras.layers.core import Dense, Dropout, Flatten
from keras.optimizers import Adam, SGD
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from keras.utils import Sequence
import cv2
import matplotlib.pyplot as plt
from pyIGTLink import pyIGTLink
# Parameters
image_size = 128
# Check command line arguments
if len(sys.argv) < 2:
print("Usage: {} WEIGHTS_FILE".format(sys.argv[0]))
sys.exit()
weights_file_name = sys.argv[1]
print("Loading weights from: {}".format(weights_file_name))
# Building the model. Should be the same as the weights to be loaded.
model = Sequential()
model.add(Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(image_size, image_size, 1)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.2))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.2))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(.4))
model.add(Dense(2, activation='sigmoid'))
model.load_weights(weights_file_name)
print("Server starting...")
client = pyIGTLink.PyIGTLinkClient(host="127.0.0.1")
client.start()
print("Server running...")
try:
image_squeezed = np.zeros([image_size, image_size]).astype(np.uint8)
image_padded = np.zeros([1, image_size, image_size, 1]).astype(np.uint8)
image_input = np.zeros([1, image_size, image_size, 1]).astype(np.uint8)
cv2.circle(image_input[0,:,:,0], (10,10), 15, 255, 5)
cv2.imshow("image", image_input[0,:,:,0])
cv2.waitKey(10)
while True:
messages = client.get_latest_messages()
if len(messages) > 0:
for message in messages:
if message._type == "IMAGE":
image = message._image
image = np.flip(image, 1)
image_squeezed = np.squeeze(image)
image_padded[0,:,:,0] = cv2.resize(image_squeezed, (image_size, image_size)).astype(np.uint8)
image_input = image_padded / 255.0
prediction = model.predict(image_input).tolist()
print("Predicted center line: " + str(prediction[0]))
cx = int(image_size*prediction[0][0])
cy = int(image_size*prediction[0][1])
cv2.circle(image_input[0,:,:,0], (cx, cy), 2, 255, thickness=1)
cv2.imshow("image", image_input[0,:,:,0])
cv2.waitKey(10)
client.send_message(pyIGTLink.StringMessage(str(prediction[0]), device_name=message._device_name+"Predicted"))
time.sleep(0.1)
except KeyboardInterrupt:
pass
client.stop()
| StarcoderdataPython |
3328103 | <reponame>shiburizu/concerto-direct
#core
from winpty import PtyProcess #pywinpty
import os, sys, time, re, threading, logging
from functools import partial
from datetime import datetime
logging.basicConfig(filename='concerto.log', encoding='utf-8', level=logging.DEBUG)
# Pyinstaller path helper
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
#Kivy core
import kivy
from kivy.config import Config
Config.set('graphics', 'width', '600')
Config.set('graphics', 'height', '400')
Config.set('graphics', 'resizable', False)
Config.set(
"kivy",
"default_font",
[
"Tex Gyre",
resource_path("texgyreheros-bolditalic.otf"),
],
)
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
Config.set('kivy','window_icon','concertoicon.ico')
from kivy.app import App
from kivy.lang import Builder
from kivy.resources import resource_add_path, resource_find
#Kivy widgets
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.modalview import ModalView
from kivy.clock import Clock
#clean caster output of ANSI escape codes
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
class loghelper():
dateTimeObj = datetime.now()
timestampStr = 'Concerto_' + dateTimeObj.strftime("%d-%b-%Y-%H-%M-%S") + '.txt'
def write(self,s):
with open(self.timestampStr,'a') as log:
log.write(s)
logger = loghelper()
class Caster():
adr = None #current game IP
playing = False #TODO True when Netplaying
rf = -1 #user rollback frames
df = -1 #user delay frames
rs = -1 #suggested rollback frames
ds = -1 #suggested delay frames
p1 = None #TODO p1 side name
p2 = None #TODO p2 side name
aproc = None #active caster
#n = [i for i in re.findall('[0-9]+',' '.join(scon)) if int(i) < 15]
def isValidRead(self,scon):
if "rollback:" in scon:
sconlst = scon.split()
rbn = 0
for r in reversed(sconlst):
if r != 'rollback:':
rbn += 1
elif r == 'rollback:':
break
if rbn > 0:
rblst = re.sub("[^0-9]", "",''.join(sconlst[-rbn:]))
if len(rblst) > 0: #only checking if a number exists, not using rblst anywhere
p = re.findall('\d+\.\d+',scon)
for m in p:
if m in sconlst:
sconlst.remove(m)
n = [i for i in re.findall('[0-9]+',' '.join(sconlst)) if int(i) < 15]
if len(n) >= 2:
logger.write('\nrblst: %s\n' % rblst)
logger.write('\nn: %s\n' % n)
logger.write('\nVALID READ:\n%s\n' % scon.split())
return n
return False
def host(self,sc): #sc is active screen to trigger frame delay select
proc = PtyProcess.spawn('cccaster.v3.0.exe -n 0')
self.aproc = proc
logger.write('\n== Host ==\n')
while proc.isalive():
ip = re.findall(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{,5}',proc.read()) #find IP and port combo for host
if ip != []:
self.adr = str(ip[0])
break
logger.write('IP: %s\n' % self.adr)
curCon = ""
lastCon = ""
con = ""
while proc.isalive():
curCon = ansi_escape.sub('',str(proc.read()))
con += lastCon + curCon
if self.playing == False and self.rs == -1 and self.ds == -1:
n = self.isValidRead(con)
if n != False:
logger.write('\n=================================\n')
logger.write(str(con.split()))
self.ds = int(n[-2]) - int(n[-1])
self.rs = int(n[-1])
r = []
namecheck = False #try to read names from caster output
for x in reversed(con.split()):
if namecheck == False and x == "connected":
namecheck = True
elif namecheck == True and x == '*':
break
elif namecheck == True and x.replace('*','') != '':
r.insert(0,x)
p = re.findall('\d+\.\d+',con) #find all floats in caster output and use the last one [-1] to make sure we get caster text
sc.frameset(' '.join(r),n[-2],p[-1])
while True: #todo optimize frameset stutter by making this a thread
if self.rf != -1 and self.df != -1:
proc.write('\x08') #two backspace keys for edge case of >9 frames
proc.write('\x08')
proc.write(str(self.rf))
proc.write('\x0D')
time.sleep(0.1) #slight delay to let caster refresh screen
proc.write('\x08')
proc.write('\x08')
proc.write(str(self.df))
proc.write('\x0D')
self.playing = True #set netplaying to avoid more reads
break
else:
if lastCon != curCon:
lastCon = curCon
continue
def join(self,ip,sc,*args):
proc = PtyProcess.spawn('cccaster.v3.0.exe -n %s' % ip)
self.aproc = proc
curCon = ""
lastCon = ""
con = ""
logger.write('\n== Join %s ==\n' % ip)
while proc.isalive():
curCon = ansi_escape.sub('',str(proc.read()))
con += lastCon + curCon
if self.playing == False and self.rs == -1 and self.ds == -1:
n = self.isValidRead(con)
if n != False:
logger.write('\n=================================\n')
logger.write(str(con.split()))
self.ds = int(n[-2]) - int(n[-1])
self.rs = int(n[-1])
r = []
namecheck = False #try to read names from caster output
for x in con.split():
if x == "to" and namecheck == False:
namecheck = True
elif x == '*' and namecheck == True:
break
elif namecheck == True and x.replace('*','') != '':
r.append(x)
p = re.findall('\d+\.\d+',con) #find all floats in caster output and use the last one [-1] to make sure we get caster text
sc.frameset(' '.join(r),n[-2],p[-1])
while True:
if self.rf != -1 and self.df != -1:
proc.write('\x08')
proc.write('\x08')
proc.write(str(self.rf))
proc.write('\x0D')
time.sleep(0.1)
proc.write('\x08')
proc.write('\x08')
proc.write(str(self.df))
proc.write('\x0D')
self.playing = True
break
else:
if lastCon != curCon:
lastCon = curCon
continue
def watch(self,ip,*args):
proc = PtyProcess.spawn('cccaster.v3.0.exe -n -s %s' % ip)
self.aproc = proc
curCon = ""
lastCon = ""
con = ""
logger.write('\n== Watch %s ==\n' % ip)
while proc.isalive():
curCon = ansi_escape.sub('',str(proc.read()))
con += lastCon + curCon
if "fast-forward)" in con:
logger.write('\n=================================\n')
logger.write(str(con.split()))
proc.write('1') #start spectating, find names after
r = []
for x in reversed(con.split()):
if x == '*' and len(r) > 0:
if r[0] == "Spectating":
break
elif x != '*' and x.replace('*','') != '':
r.insert(0,x)
CApp.DirectScreen.activePop.modalTxt.text = ' '.join(r) #replace connecting text with match name in caster
break
else:
if lastCon != curCon:
lastCon = curCon
continue
def training(self):
proc = PtyProcess.spawn('cccaster.v3.0.exe')
self.aproc = proc
while proc.isalive():
if "Offline" in proc.read():
proc.write('4') #4 is offline
time.sleep(0.1)
proc.write('1')
break
def local(self):
proc = PtyProcess.spawn('cccaster.v3.0.exe')
self.aproc = proc
while proc.isalive():
if "Offline" in proc.read():
proc.write('4')
time.sleep(0.1)
proc.write('2')
break
def tournament(self):
proc = PtyProcess.spawn('cccaster.v3.0.exe')
self.aproc = proc
while proc.isalive():
if "Offline" in proc.read():
proc.write('4')
time.sleep(0.1)
proc.write('4')
break
def replays(self):
proc = PtyProcess.spawn('cccaster.v3.0.exe')
self.aproc = proc
while proc.isalive():
if "Offline" in proc.read():
proc.write('4')
time.sleep(0.1)
proc.write('5')
break
class GameModal(ModalView):
modalTxt = ObjectProperty(None)
closeBtn = ObjectProperty(None)
class FrameModal(ModalView):
frameTxt = ObjectProperty(None)
r_input = ObjectProperty(None)
d_input = ObjectProperty(None)
startBtn = ObjectProperty(None)
closeBtn = ObjectProperty(None)
class DirectScreen(Screen):
userIP = ObjectProperty(None) #IP input field
activePop = None #active popup on the screen
def host(self, *args):
caster = threading.Thread(target=CApp.game.host,args=[self],daemon=True)
caster.start()
while True:
if CApp.game.adr is not None:
popup = GameModal()
popup.modalTxt.text = 'Hosting to IP: %s\nAddress copied to clipboard.' % CApp.game.adr
popup.closeBtn.text = 'Stop Hosting'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
break
def join(self, *args):
caster = threading.Thread(target=CApp.game.join,args=[self.userIP.text,self],daemon=True)
caster.start()
popup = GameModal()
popup.modalTxt.text = 'Connecting to IP: %s' % self.userIP.text
popup.closeBtn.text = 'Stop Playing'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
def frameset(self,name,delay,ping):
fpopup = FrameModal()
fpopup.frameTxt.text = 'Connected to: %s\nPing: %s Network Delay: %s, Suggested: Rollback %s, Delay %s' % (name, ping, delay, CApp.game.rs, CApp.game.ds)
fpopup.r_input.text = str(CApp.game.rs)
fpopup.d_input.text = str(CApp.game.ds)
fpopup.startBtn.bind(on_release=partial(self.confirm,p=fpopup,r=fpopup.r_input,d=fpopup.d_input,n=name))
fpopup.closeBtn.bind(on_release=partial(self.dismiss,t=CApp.game.aproc,p=fpopup))
fpopup.open()
def confirm(self,obj,r,d,p,n,*args):
CApp.game.rf = int(r.text)
CApp.game.df = int(d.text)
CApp.DirectScreen.activePop.modalTxt.text += "\nConnected to: %s" % n
p.dismiss()
def training(self, *args):
caster = threading.Thread(target=CApp.game.training,daemon=True)
caster.start()
popup = GameModal()
popup.modalTxt.text = 'Training mode started.'
popup.closeBtn.text = 'Close game'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
def replays(self, *args):
caster = threading.Thread(target=CApp.game.replays,daemon=True)
caster.start()
popup = GameModal()
popup.modalTxt.text = 'Replay Theater started.'
popup.closeBtn.text = 'Close game'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
def local(self, *args):
caster = threading.Thread(target=CApp.game.local,daemon=True)
caster.start()
popup = GameModal()
popup.modalTxt.text = 'Local VS started.'
popup.closeBtn.text = 'Close game'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
def tournament(self, *args):
caster = threading.Thread(target=CApp.game.tournament,daemon=True)
caster.start()
popup = GameModal()
popup.modalTxt.text = 'Tournament Local VS started.'
popup.closeBtn.text = 'Close game'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
self.activePop = popup
popup.open()
def watch(self, *args):
popup = GameModal()
self.activePop = popup
caster = threading.Thread(target=CApp.game.watch,args=[self.userIP.text],daemon=True)
caster.start()
popup.modalTxt.text = 'Watching IP: %s' % self.userIP.text
popup.closeBtn.text = 'Close game'
popup.closeBtn.bind(on_release=partial(self.dismiss,t=caster,p=popup))
popup.open()
#TODO prevent players from dismissing caster until MBAA is open to avoid locking issues
def dismiss(self,obj,t,p,*args):
CApp.game.adr = None
CApp.game.rs = -1
CApp.game.ds = -1
CApp.game.rf = -1
CApp.game.df = -1
os.system('start /min taskkill /f /im cccaster.v3.0.exe')
del(t)
p.dismiss()
if self.activePop != None:
self.activePop.dismiss()
self.activePop = None
CApp.game.aproc = None
CApp.game.playing = False
class Concerto(App):
def __init__(self,c,**kwargs):
super(Concerto, self).__init__(**kwargs)
self.sm = ScreenManager()
self.game = c #expects Caster object
def build(self):
self.DirectScreen = DirectScreen()
self.sm.add_widget(self.DirectScreen)
self.DirectScreen.ids.background.source = resource_path("bg.png")
return self.sm
def checkPop(self,*args):
while True:
if self.game.aproc != None:
if self.game.aproc.isalive():
pass
else:
self.DirectScreen.activePop.dismiss()
self.DirectScreen.activePop = None
CApp.game.aproc = None
CApp.game.playing = False
CApp.game.adr = None
CApp.game.rs = -1
CApp.game.ds = -1
CApp.game.rf = -1
CApp.game.df = -1
os.system('start /min taskkill /f /im cccaster.v3.0.exe')
time.sleep(0.5) #checks for netplaying every 1 second by polling isalive() from game.aproc
CApp = Concerto(Caster())
if __name__ == '__main__':
Builder.load_file(resource_path("Concerto.kv")) #concerto.kv defines UI
netwatch = threading.Thread(target=CApp.checkPop,daemon=True) #netplay watchdog
netwatch.start()
CApp.run()
| StarcoderdataPython |
3222602 | import datetime
import typing
from typing_extensions import TypeGuard
from .. import spec
from .. import exceptions
from . import timelength_units
def detect_timelength_representation(
timelength: spec.Timelength,
) -> spec.TimelengthRepresentation:
"""return str name of Timelength representation"""
if is_timelength_seconds(timelength):
return 'TimelengthSeconds'
elif is_timelength_seconds_precise(timelength):
return 'TimelengthSecondsPrecise'
elif is_timelength_label(timelength):
return 'TimelengthLabel'
elif is_timelength_clock(timelength):
return 'TimelengthClock'
elif is_timelength_phrase(timelength):
return 'TimelengthPhrase'
elif is_timelength_clock_phrase(timelength):
return 'TimelengthClockPhrase'
elif is_timelength_timedelta(timelength):
return 'TimelengthTimedelta'
else:
raise exceptions.RepresentationDetectionException(
'could not determine Timelength representation: ' + str(timelength)
)
def is_timelength(timelength: typing.Any) -> TypeGuard[spec.Timelength]:
"""return bool of whether input is Timelength"""
try:
detect_timelength_representation(timelength)
return True
except exceptions.RepresentationDetectionException:
return False
def is_timelength_seconds(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSeconds]:
"""return bool of whether input is TimelengthSeconds"""
return isinstance(timelength, int)
def is_timelength_seconds_precise(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSecondsPrecise]:
"""return bool of whether input is TimelengthSecondsPrecise"""
return isinstance(timelength, float)
def is_timelength_label(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthLabel]:
"""return bool of whether input is TimelengthLabel"""
if not isinstance(timelength, str) or len(timelength) < 2:
return False
try:
int(timelength[:-1])
letter = timelength[-1]
return letter.isalnum()
except ValueError:
return False
def is_timelength_clock(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClock]:
"""return bool of whether input is TimelengthClock"""
if not isinstance(timelength, str):
return False
numbers = timelength.split(':')
try:
for number in numbers[:-1]:
int(number)
float(numbers[-1])
return True
except ValueError:
return False
def is_timelength_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthPhrase]:
"""return bool of whether input is TimelengthPhrase"""
if not isinstance(timelength, str):
return False
unit_names_to_labels = timelength_units.get_unit_labels()
pieces = timelength.split(', ')
try:
for piece in pieces:
amount, unit_name = piece.split(' ')
float(amount)
if unit_name not in unit_names_to_labels:
return False
return True
except Exception:
return False
def is_timelength_clock_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClockPhrase]:
"""return bool of whether input is TimelengthClockPhrase"""
if not isinstance(timelength, str):
return False
pieces = timelength.split(', ')
if ':' in pieces[-1]:
clock = pieces[-1]
if not is_timelength_clock(clock):
return False
phrase = ', '.join(pieces[:-1])
else:
phrase = ', '.join(pieces)
return is_timelength_phrase(phrase)
def is_timelength_timedelta(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthTimedelta]:
"""return bool of whether input is TimelengthTimedelta"""
return isinstance(timelength, datetime.timedelta)
| StarcoderdataPython |
3270854 | <filename>openprescribing/pipeline/management/commands/org_codes.py
from io import BytesIO
import requests
from zipfile import ZipFile
import datetime
import os
from django.conf import settings
from django.core.management import BaseCommand
"""Practice and CCG metadata, keyed by code.
Similar data, pertaining to specific points in time, is also found in
the files downloaded to `data/raw_data/T<datestamp>ADDR+BNFT.CSV`.
We prefer data from these files to the `ADDR+BNFT` versions, but the
data we download here is only available as current data; this means we
would lack address information for historic data points if we only
relied on these org files.
This data is therefore worth updating every month.
"""
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--ccg", action="store_true")
parser.add_argument("--practice", action="store_true")
parser.add_argument("--postcode", action="store_true")
parser.add_argument("--region", action="store_true")
parser.add_argument("--pcn", action="store_true")
def handle(self, *args, **kwargs):
self.verbose = kwargs["verbosity"] > 1
if kwargs["practice"]:
self.fetch_and_extract_zipped_file("epraccur", "practice_details")
if kwargs["ccg"]:
self.fetch_and_extract_zipped_file("eccg", "ccg_details")
if kwargs["postcode"]:
self.fetch_and_extract_zipped_file("gridall", "nhs_postcode_file")
if kwargs["region"]:
self.fetch_and_extract_zipped_file("eauth", "region_details")
if kwargs["pcn"]:
self.fetch_and_extract_zipped_file("epcn", "pcn_details")
def fetch_and_extract_zipped_file(self, base_filename, dest_dirname):
"""Grab a zipfile from a url, and extract a single file from it."""
zip_filename = base_filename + ".zip"
if base_filename == "epcn":
url = "https://nhs-prod.global.ssl.fastly.net/binaries/content/assets/website-assets/services/ods/data-downloads-other-nhs-organisations/epcn-.zip"
filename = "ePCN.xlsx"
else:
url = "https://files.digital.nhs.uk/assets/ods/current/" + zip_filename
filename = base_filename + ".csv"
buf = BytesIO()
buf.write(requests.get(url).content)
buf.flush()
zipfile = ZipFile(buf)
dest_dir = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
dest_dirname,
datetime.datetime.today().strftime("%Y_%m"),
)
zipfile.extract(filename, dest_dir)
| StarcoderdataPython |
141861 | <reponame>tonnkie/USC-Second-hand-Group
# coding:utf-8
import re
def emoji_print(rawstring):
flag = True
while flag:
start = rawstring.find('<span')
end = rawstring.find('></span>')
if start < 0 or end < 0:
flag = False
break
emojistr = rawstring[start : end + 8]
try:
emojicode = re.search(' emoji(.+?)\"', emojistr).group(1)
emoji = chr(int(emojicode, 16))
except:
emoji = ""
rawstring = rawstring.replace(emojistr, emoji)
return rawstring
s = u'【买】<span class="emoji emoji1f604"></span>过滤掉了'
print emoji_print(s)
| StarcoderdataPython |
3257332 | <filename>run.py
import itertools
import json
import random
import sys
import time
import networkx as nx
import eon
if hasattr(time, 'process_time_ns'):
process_time = time.process_time_ns
else:
process_time = time.process_time
def run(algorithm, seed, weight='distance', log=1):
if algorithm == 'filtered':
dijkstra = eon.dijkstra_filtered
elif algorithm == 'generic':
dijkstra = eon.dijkstra_generic
else:
raise ValueError
slot_bw = 1
max_m = 4
print('seed', 'nodes', 'topo_num', 'edges', 'units', 'mean_demand', 'n', 'bad', 'cum_demand', 'cum_util', 'src', 'dst', 'demand', 'paths', 'path_len', 'cu_start', 'elapsed')
for mean_demand_fraction in [10, 20]:
for nodes in range(25, 525, 25):
for topo_num in range(0, 10):
for units in range(100, 1100, 100):
mean_demand = units // mean_demand_fraction
rng_demand = random.Random(seed)
rng_path_choice = random.Random(seed)
topo_name = 'gabriel/%s/%s' % (nodes, topo_num)
g = nx.node_link_graph(json.load(open('topo_lib/%s.json' % topo_name)))
if not isinstance(g, nx.DiGraph):
g = g.to_directed()
edges = 0
for edge in g.edges.values():
edge['au'] = (1 << units) - 1
edge['cost'] = 1
edges += 1
demands_keys = []
demands_values = []
if 'demands' in g.graph:
for src, demands in g.graph['demands'].items():
for dst, demand in demands.items():
demands_keys.append((src, dst))
demands_values.append(demand)
if not demands_keys:
demands_keys = [pair for pair in itertools.permutations(g, 2)]
n = 0
bad = 0
cum_time = 0
cum_demand = 0
cum_util = 0
reach_1 = 0
for line in open('topo_lib/%s.tex' % topo_name):
if 'Max. length of disjoint shortest paths' in line:
sp_length = float(line.split()[-1])
reach_1 = max(reach_1, sp_length * 1.5)
reach_m = reach_1 / 2 ** (max_m - 1)
while True:
if demands_values:
src, dst = rng_demand.choices(demands_keys, demands_values)[0]
else:
src, dst = rng_demand.choice(demands_keys)
demand = rng_demand.randint(1, mean_demand * 2 - 1)
start_time = process_time()
paths = dijkstra(g, {src}, dst, demand, units, slot_bw, reach_m, max_m, weight=weight)
elapsed = process_time() - start_time
if isinstance(elapsed, float):
elapsed = int(elapsed * 1000000000)
if paths[1]:
cu_start, cu_end, path = rng_path_choice.choice(sorted(paths[1]))
cu_set = ((1 << demand) - 1) << cu_start
if log > 2:
print((cu_start, cu_start + demand), path)
for u, v, key in path:
# noinspection PyProtectedMember
g._succ[u][v][key]['au'] ^= cu_set
else:
cu_start, path = 0, []
if log:
print(seed, nodes, topo_num, edges, units, mean_demand, n, bad, cum_demand, cum_util, src, dst, demand, len(paths), len(path), cu_start, elapsed)
n += 1
cum_time += elapsed
if path:
cum_demand += demand
cum_util += demand * len(path)
else:
bad += 1
if cum_util / (edges * units) > 0.6:
break
if log == 0:
print(seed, nodes, topo_num, edges, units, mean_demand, n, bad, cum_demand, cum_util / (edges * units), '-', '-', '-', '-', '-', '-', cum_time)
# exit()
if __name__ == '__main__':
run(sys.argv[1], int(sys.argv[2]))
| StarcoderdataPython |
1695264 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This example demonstrates the use of Convolution1D for text classification.
# 利用1D-CNN模型对IMDB影评倾向分类
# Output after 5 epochs on CPU(i5-7500)/GPU(1050Ti): ~0.8773
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
max_features = 5000 # vocab大小
maxlen = 400 # 每条样本数据长度
batch_size = 32 # min-batch size
embedding_dims = 50 # 词向量维度
filters = 250 # 1D-CNN卷积核的数目(即输出的维度)
kernel_size = 3 # 整数或由单个整数构成的list/tuple,卷积核的空域或时域窗长度
hidden_dims = 250 # 隐藏层神经元数量
epochs = 5 # 循环次数
# 数据集来源IMDB影评,共50000条影评,标记正面/负面两种评价
# 每条数据被编码为一条索引序列(索引数字越小,代表单词出现次数越多)
# num_words: 选取的每条数据里的索引值不能超过num_words
print('========== 1.Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print('----- train sequences', len(x_train))
print('----- test sequences', len(x_test))
# 对每条词索引组成的数据进行长度对齐,去掉数据前面或后面多余的单词;长度不够插入0
print('========== 2.Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('----- x_train shape:', x_train.shape)
print('----- x_test shape:', x_test.shape)
# 搭建神经网络模型
print('========== 3.Build model...')
model = Sequential()
# input_dim=max_features单词表大小,output_dim=embedding_dims=50为词向量维度,input_length=maxlen每条样本数据长度
model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) # 输出(*,400,50)
model.add(Dropout(0.2))
# 1维卷积层,卷积输出维度为filters,卷积步长为strides
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) # 输出(*,398,250)
# 对于时间信号的全局最大池化
model.add(GlobalMaxPooling1D()) # 输出(*,250)
model.add(Dense(hidden_dims)) # 输出(*,250)
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# 神经网络编译/训练/测试集测试性能
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
| StarcoderdataPython |
1660041 | from sko.GA import GA
def demo_func(x):
x1, x2, x3 = x
return x1 ** 2 + (x2 - 0.05) ** 2 + x3 ** 2
ga = GA(func=demo_func, lb=[-1, -10, -5], ub=[2, 10, 2], max_iter=500)
best_x, best_y = ga.fit()
| StarcoderdataPython |
3206616 | from datetime import time
import time as t
from heatmiserV3.devices import Master, Device
from heatmiserV3.config import Config
import logging
import logging.config
from heatmiserV3.protocol_manager import ProtocolManager
def main():
log_config = Config.LOG_CONFIG
logging.config.dictConfig(log_config)
logger = logging.getLogger(__name__)
master = Master(Config.MASTER_IRQ_ADDRESS)
location = Config.MASTER_LOCATION['location']
if Config.MASTER_LOCATION['type'].casefold() == 'ip'.casefold():
master.connect_ip(location)
elif Config.MASTER_LOCATION['type'].casefold() == 'device'.casefold():
master.connect_device(location)
else:
raise ValueError("Unrecognized value for Config.MASTER_LOCATION.type, try ip or device",
Config.MASTER_LOCATION[
'type'])
tm1 = Device("tm1", "Boat Timer", 0)
protocol = ProtocolManager().get_protocol("tm1")
# # sync time always
# logger.info("Syncing time")
# dow_time = ProtocolManager.get_dow_time()
# response = master.update_field(tm1, "Current time", dow_time)
# logger.info("Time synced, response={}".format(ProtocolManager().to_hex_str(response)))
# t.sleep(1)
# logger.info("Updating weekday schedule")
# timer_block = ProtocolManager().get_timer_block(
# [[time(hour=5, minute=8), time(hour=11, minute=17)], [time(hour=19), time(hour=21, minute=9)]])
# response = master.update_field(tm1, "Weekday", timer_block)
# logger.info("Updated weekday schedule, response={}".format(ProtocolManager().to_hex_str(response)))
# #
t.sleep(1)
# master.update_field(tm1, "On/Off", 1)
#master.update_field(tm1, "Current timer state", 2) #1=on 2=off
#
response = master.send_request_all(tm1)
parsed_response = protocol.parse_response(response)
print("parsed response:")
for k, v in sorted(parsed_response.items()):
print(k, v)
master.close_connection()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1703437 | <reponame>cyphermaster/beholder
#!/usr/bin/python2.7
# -*- coding: utf8 -*-
"""
Copyright [2014,2015] [beholder developers]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import redis
import argparse
import bz2
import Queue
import sys
import os
import time
import ConfigParser
from termcolor import colored
from threading import Thread
REDIS_TIMEOUT = 120
class Rainbow:
def __init__(self, cmdln_args, redis_servers, redis_timeout):
self.args = cmdln_args
self.redis_timeout = redis_timeout
self.redis_servers = redis_servers
# Shared queue to store query results from Redis
self.output_queue = Queue.Queue()
def run(self):
if self.args.available:
self.redis_get_all_keys()
else:
self.redis_get_specific_key(self.args.file)
def formated_output(self, now, hostname, redis_ts, mtime_ts, line):
"""Colored output of a Redis entry plus metadata"""
redis_ts_str = time.strftime('%H:%M',
time.localtime(int(redis_ts)))
mtime_ts_str = time.strftime('%Y-%m-%dT%H:%M',
time.localtime(int(mtime_ts)))
if self.args.color:
hostname = colored(hostname, 'cyan')
diff = now - float(redis_ts)
if diff < 600:
redis_ts_str = colored(redis_ts_str, 'green')
elif diff > 3600:
redis_ts_str = colored(redis_ts_str, 'red')
else:
redis_ts_str = colored(redis_ts_str, 'yellow')
mtime_ts_str = colored(mtime_ts_str, 'blue')
line = colored(line, 'white', 'on_grey')
print "%s (%s/%s) %s" % (hostname, redis_ts_str, mtime_ts_str, line)
def redis_query(self, redis_server, method, key):
""" Query a Redis server (within a thread)
and write results to the shared queue"""
print "Querying_redis_server %s" % redis_server
r = redis.Redis(host=redis_server, port=6379)
if method == "GET":
# Fetch all keys matching the requested pattern
items = r.keys('*#%s' % key)
for item in items:
result = r.get(item)
if result:
# Split the key name, which format is <hostname>#<file>
try:
hostname, key = item.split('#')
except ValueError:
print "Warning - Skipping key with invalid \
format: %s" % item
continue
self.output_queue.put((hostname, result))
elif method == "KEYS":
# Fetch all available keys
items = r.keys('*')
for item in items:
# Split the key name, which format is <hostname>#<file>
try:
hostname, key = item.split('#')
except ValueError:
print "Warning - Skipping key with invalid \
format: %s" % item
continue
self.output_queue.put(key)
else:
print "Not supported. You're doing it wrong."
def redis_get_all_keys(self):
""" Fetch available keys from all Redis servers"""
threads = []
for redis_server in self.redis_servers:
worker = Thread(target=self.redis_query,
args=(redis_server, 'KEYS', '',))
worker.start()
threads.append(worker)
for t in threads:
t.join(REDIS_TIMEOUT)
self.display_redis_all_keys()
def display_redis_all_keys(self):
""" Print a nicely formatted summary of all keys found on
all the Redis servers
"""
uniq_files = {}
while not self.output_queue.empty():
i = self.output_queue.get()
self.output_queue.task_done()
if i in uniq_files:
uniq_files[i] += 1
else:
uniq_files[i] = 1
if self.args.color:
for k in sorted(uniq_files):
# 13 here because of color escape characters \033[xx(x)
print "%13s %s" % (colored(uniq_files[k], 'blue'),
colored(k, 'cyan'))
else:
for k in sorted(uniq_files):
print "%5s %s" % (uniq_files[k], k)
def display_redis_specific_key(self):
""" Display contents of a specific key through all the Redis servers"""
# Actual timestamp of Redis query
now = time.time()
while not self.output_queue.empty():
(hostname, result) = self.output_queue.get()
self.output_queue.task_done()
# Here we split our custom Redis value format:
# <base64_encoded_data> <epoch_redis> <epoch_mtime> <file_checksum>
fields = result.split(' ')
base64_data = fields[0]
try:
epoch_redis = fields[1]
epoch_mtime = fields[2]
file_md5 = fields[3]
except IndexError:
epoch_redis = 0
epoch_mtime = 0
file_md5 = "no_signature"
try:
c = bz2.decompress(base64_data.decode("base64"))
except:
c = "<error while decompressing bz2 data"
if self.args.signature:
self.formated_output(now, hostname, epoch_redis,
epoch_mtime, file_md5)
else:
for line in c.split('\n'):
self.formated_output(now, hostname, epoch_redis,
epoch_mtime, line)
def redis_get_specific_key(self, key):
""" Fetch contents of a specific key over all the Redis servers"""
threads = []
for redis_server in self.redis_servers:
worker = Thread(target=self.redis_query,
args=(redis_server, 'GET', key,))
worker.start()
threads.append(worker)
for t in threads:
t.join(REDIS_TIMEOUT)
keys_found = self.output_queue.qsize()
self.display_redis_specific_key()
print "### Keys found: %s ###" % keys_found
if keys_found == 0:
print 'try the -ac switch for a list of all available keys'
def autogen_conf(f):
config.add_section('query_servers')
config.set('query_servers', 'hostnames', 'redis1.example.org, \
redis2.example.org')
config.write(open(f, 'w'))
print("Created an empty config file at %s." % f)
print("Please modify it & re-run this command.")
sys.exit(1)
def test_default_conf(f):
if redis_servers[0] == 'redis1.example.org':
print("Please set your own redis instance in %s file \
and re-run this command." % f)
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--file",
help="File to query")
group.add_argument("-a", "--available",
help="Show available files", action='store_true')
parser.add_argument("-c", "--color",
help="Colorize output", action='store_true')
parser.add_argument("-s", "--signature",
help="MD5 of file", action='store_true')
parser.add_argument("-C", "--conffile",
help="Configuration file to Use")
args = parser.parse_args()
config = ConfigParser.RawConfigParser()
if args.conffile:
conffile = args.conffile
else:
conffile = os.path.abspath(os.path.expanduser('~/.rainbow.cf'))
if not os.path.exists(conffile):
autogen_conf(conffile)
config.read(conffile)
redis_servers = config.get('query_servers', 'hostnames').split(',')
test_default_conf(conffile)
rb = Rainbow(args, redis_servers, REDIS_TIMEOUT)
rb.run()
| StarcoderdataPython |
1605870 | <gh_stars>0
a,b = map(int, input().split())
if (b % a == 0) or (a % b == 0):
print("Sao Multiplos")
else:
print("Nao sao Multiplos") | StarcoderdataPython |
4815472 | <reponame>Elec5616/prophessor<filename>phabricator/api.py
import subprocess
import json
from local_settings import PHAB_API_ADDRESS, PHAB_API_TOKEN
phab_api_templates = {
"remove_user_from_project": {
"method": "project.edit",
"data": "api.token=" + PHAB_API_TOKEN + "&transactions[0][type]=members.remove&transactions[0][value][0]=%s&objectIdentifier=%s",
"args": ("user_phid", "group_phid")
},
"add_user_to_project": {
"method": "project.edit",
"data": "api.token=" + PHAB_API_TOKEN + "&transactions[0][type]=members.add&transactions[0][value][0]=%s&objectIdentifier=%s",
"args": ("user_phid", "group_phid")
},
"make_project_subproject": {
"method": "project.edit",
"data": "api.token=" + PHAB_API_TOKEN + "&transactions[0][type]=parent&transactions[0][value]=%s&objectIdentifier=%s",
"args": ("parent_phid", "child_phid")
},
"get_project_details": {
"method": "project.query",
"data": "api.token=" + PHAB_API_TOKEN + "&phids[]=%s",
"args": ("group_name")
},
"create_project": {
"method": "project.create",
"data": "api.token=" + PHAB_API_TOKEN + "&name=%s&icon=%s&color=%s%s",
"args": ("name", "icon", "color", "members[]=%s")
},
"phid_lookup": {
"method": "phid.lookup",
"data": "api.token=" + PHAB_API_TOKEN + "&%s",
"args": ("name[]=%s",)
},
"create_repository": {
"method": "repository.create",
"data": "api.token=" + PHAB_API_TOKEN + "&name=%s&callsign=%s&uri=%s&vcs=%s",
"args": ("name", "callsign", "uri", "vcs")
},
"create_raw_diff": {
"method": "differential.createrawdiff",
"data": "api.token=" + PHAB_API_TOKEN + "&diff=%s&viewPolicy=%s",
"args": ("diff", "view_policy")
},
}
class Call():
def raw(self, method, data_string):
# print("API CALL: %s -- %s" % (method, data_string))
return subprocess.check_output(["curl", "-s", PHAB_API_ADDRESS + "/api/" + method, "-d", data_string])
def template(self, template_name, args):
response = json.loads(self.raw(phab_api_templates[template_name]["method"],
phab_api_templates[template_name]["data"] % args).decode("ascii"))
if response["error_code"]:
raise Exception("Call to API Template %s resulted in an error code: %s (%s)" % (
template_name, str(response["error_code"]), response["error_info"]))
return response["result"]
api_call = Call()
| StarcoderdataPython |
3248685 | <filename>tests/conftest.py
from asyncio import get_event_loop
import pytest
@pytest.fixture
def event_loop():
return get_event_loop()
| StarcoderdataPython |
1605738 | <reponame>agaldemas/ngsi-timeseries-api
"""
Data structures and operations to manage work queues.
Notice these data structures and operations abstract away the underlying
RQ implementation so clients don't have to depend on the RQ API.
"""
from enum import Enum
from typing import Callable, Iterable, List, Optional
from pydantic import BaseModel
from rq.job import Job, JobStatus
from wq.core.task import WorkQ, _tasklet_from_rq_job, RqExcMan
from wq.core.rqutils import RqJobId, find_job_ids, find_failed_job_ids, \
find_successful_job_ids, find_pending_job_ids, load_jobs, delete_jobs, \
starts_with_matcher, count_jobs, count_pending_jobs, count_failed_jobs, \
count_successful_jobs
class TaskStatus(Enum):
"""
Enumerate the states a task can be in.
"""
PENDING = 'pending'
"""
A task is in the pending state from the time it gets enqueued to the
time it gets executed for the last time, i.e. until it gets retried
for the last time if previous runs failed.
"""
SUCCEEDED = 'succeeded'
"""
A task is in the succeeded state if it ran to completion successfully.
"""
FAILED = 'failed'
"""
A task is in the failed state if it failed permanently, i.e. there
was an error on every configured retry.
"""
UNKNOWN = 'unknown'
"""
A task is in the unknown state if its actual state (pending, succeeded,
or failed) couldn't be determined. This can happen momentarily as the
task is moved from state to state because transitions aren't atomic.
"""
def _task_status_from_job_status(s: JobStatus) -> TaskStatus:
if s in (JobStatus.QUEUED, JobStatus.STARTED,
JobStatus.DEFERRED, JobStatus.SCHEDULED):
return TaskStatus.PENDING
if s == JobStatus.FINISHED:
return TaskStatus.SUCCEEDED
if s == JobStatus.FAILED:
return TaskStatus.FAILED
return TaskStatus.UNKNOWN
class TaskRuntimeInfo(BaseModel):
"""
Runtime info about the task such as its work queue ID and status.
"""
task_id: str
task_type: str
status: TaskStatus
retries_left: Optional[int]
errors: List[str] = []
class TaskInfo(BaseModel):
"""
Aggregate of task runtime info and input, i.e. the data the task got
as input for processing.
"""
runtime: TaskRuntimeInfo
input: BaseModel
def _task_info_from_rq_job(j: Job) -> TaskInfo:
tasklet = _tasklet_from_rq_job(j)
status = _task_status_from_job_status(j.get_status())
errors = [repr(e) for e in RqExcMan.list_exceptions(j)]
return TaskInfo(
runtime=TaskRuntimeInfo(
task_id=tasklet.task_id().id_repr(),
task_type=str(type(tasklet)),
status=status,
retries_left=j.retries_left,
errors=errors
),
input=tasklet.task_input()
)
class QMan:
"""
Operations to manage a given work queue.
"""
def __init__(self, q: WorkQ):
self._q = q
self._pending_jid_finder = lambda m: find_pending_job_ids(q, m)
self._successful_jid_finder = lambda m: find_successful_job_ids(q, m)
self._failed_jid_finder = lambda m: find_failed_job_ids(q, m)
@staticmethod
def _load(jid_finder: Callable[[str], Iterable[RqJobId]],
task_id_prefix: str) -> Iterable[TaskInfo]:
matcher = starts_with_matcher(task_id_prefix)
job_ids = jid_finder(matcher)
js = load_jobs(job_ids)
for j in js:
yield _task_info_from_rq_job(j)
@staticmethod
def _count_tasks(jid_finder: Callable[[str], Iterable[RqJobId]],
task_id_prefix: str) \
-> int:
matcher = starts_with_matcher(task_id_prefix)
job_ids = jid_finder(matcher)
return len(list(job_ids))
@staticmethod
def load_tasks(task_id_prefix: str) -> Iterable[TaskInfo]:
"""
Load all the tasks with an ID having the same prefix as the input.
Stream data, i.e. don't load all tasks in memory but fetch them on
demand as the consumer iterates the result set.
:param task_id_prefix: the task ID prefix to match.
:return: a generator to iterate the matching tasks.
"""
return QMan._load(find_job_ids, task_id_prefix)
@staticmethod
def load_tasks_runtime_info(task_id_prefix: str) \
-> Iterable[TaskRuntimeInfo]:
"""
Same as ``load_tasks`` but only return task runtime info without
inputs.
"""
for t in QMan.load_tasks(task_id_prefix):
yield t.runtime
@staticmethod
def delete_tasks(task_id_prefix: str):
"""
Delete all the tasks with an ID having the same prefix as the input.
:param task_id_prefix: the task ID prefix to match.
"""
matcher = starts_with_matcher(task_id_prefix)
job_ids = find_job_ids(matcher)
delete_jobs(job_ids)
def count_all_tasks(self, task_id_prefix: Optional[str]) -> int:
"""
Count all the tasks with an ID having the same prefix as the input
if given, otherwise return the total number of tasks linked to the
work queue.
:param task_id_prefix: the task ID prefix to match.
:return: the number of matching tasks.
"""
if task_id_prefix is None:
return count_jobs(self._q)
return self._count_tasks(find_job_ids, task_id_prefix)
def count_pending_tasks(self, task_id_prefix: Optional[str]) -> int:
"""
Count all the pending tasks with an ID having the same prefix as
the input if given, otherwise return the total number of tasks
linked to the work queue that are in the pending state.
:param task_id_prefix: the task ID prefix to match.
:return: the number of matching tasks.
"""
if task_id_prefix is None:
return count_pending_jobs(self._q)
return self._count_tasks(self._pending_jid_finder, task_id_prefix)
def count_successful_tasks(self, task_id_prefix: Optional[str]) -> int:
"""
Count all the tasks with an ID having the same prefix as the input
that executed successfully, i.e. tasks in the succeeded state. If
the input is ``None``, count all tasks in the succeeded state that
are linked to the queue.
:param task_id_prefix: the task ID prefix to match.
:return: the number of matching tasks.
"""
if task_id_prefix is None:
return count_successful_jobs(self._q)
return self._count_tasks(self._successful_jid_finder, task_id_prefix)
def count_failed_tasks(self, task_id_prefix: Optional[str]) -> int:
"""
Count all the failed tasks with an ID having the same prefix as
the input if given, otherwise return the total number of tasks
linked to the work queue that are in the failed state.
:param task_id_prefix: the task ID prefix to match.
:return: the number of matching tasks.
"""
if task_id_prefix is None:
return count_failed_jobs(self._q)
return self._count_tasks(self._failed_jid_finder, task_id_prefix)
def load_pending_tasks(self, task_id_prefix: str) -> Iterable[TaskInfo]:
"""
Load all the pending tasks with an ID having the same prefix as
the input.
Stream data, i.e. don't load all tasks in memory but fetch them on
demand as the consumer iterates the result set.
:param task_id_prefix: the task ID prefix to match.
:return: a generator to iterate the matching tasks.
"""
return self._load(self._pending_jid_finder, task_id_prefix)
def load_successful_tasks(self, task_id_prefix: str) -> Iterable[TaskInfo]:
"""
Load all the tasks with an ID having the same prefix as the input
that executed successfully, i.e. tasks in the succeeded state.
Stream data, i.e. don't load all tasks in memory but fetch them on
demand as the consumer iterates the result set.
:param task_id_prefix: the task ID prefix to match.
:return: a generator to iterate the matching tasks.
"""
return self._load(self._successful_jid_finder, task_id_prefix)
def load_failed_tasks(self, task_id_prefix: str) -> Iterable[TaskInfo]:
"""
Load all the failed tasks with an ID having the same prefix as
the input.
Stream data, i.e. don't load all tasks in memory but fetch them on
demand as the consumer iterates the result set.
:param task_id_prefix: the task ID prefix to match.
:return: a generator to iterate the matching tasks.
"""
return self._load(self._failed_jid_finder, task_id_prefix)
| StarcoderdataPython |
117289 | #!/usr/bin/env python3
import os
import pathlib
import re
import subprocess
import sys
from typing import List
def main():
os.chdir(str(get_toplevel_dir()))
check_unstaged_changes()
files = get_tracked_files()
check_go_source(files)
make('build')
make('test')
make('test-large')
def check_go_source(files: List[pathlib.Path]):
go_files = [f for f in files if f.match('*.go')]
if not go_files:
return
check_gofmt(go_files)
check_golint('./...')
# TODO: change govet path to ./... when openapi-generator is removed
check_govet('./internal/go/...')
def check_gofmt(go_files: List[pathlib.Path]):
unformatted_files = gofmt(go_files)
if unformatted_files:
print(" Go files must be formatted with gofmt. Please run:")
for unformatted_file in unformatted_files:
print(" gofmt -s -w {}".format(unformatted_file))
sys.exit(1)
def check_golint(go_files: str):
warnings = golint(go_files)
# TODO: remove filter when openapi-generator is removed
def ignore(w):
for p in ['logger\.go', 'model_.+\.go', 'routers\.go']:
if re.search(p, w):
return True
return False
warnings = [w for w in warnings if not ignore(w)]
if warnings:
print(" Go files must pass golint. Please fix:")
for warning in warnings:
print(" {}".format(warning))
sys.exit(1)
def check_govet(package: str):
warnings = govet(package)
if warnings:
print(" Go files must pass go vet. Please fix:")
for warning in warnings:
print(" {}".format(warning))
sys.exit(1)
def check_unstaged_changes():
unstaged_changes = get_unstaged_changes()
if unstaged_changes:
print("Cannot push with unstaged changes:")
for unstaged_change in unstaged_changes:
print(" {}".format(unstaged_change))
print("Please run either:")
print(" git add -A && git commit")
print(" git stash -k -u")
sys.exit(1)
def get_tracked_files() -> List[pathlib.Path]:
result = subprocess.run(
['git', 'ls-tree', '-r', 'HEAD', '--name-only'],
check=True,
stdout=subprocess.PIPE)
files = result.stdout.decode('utf-8').rstrip('\n').splitlines()
return [pathlib.Path(f) for f in files]
def get_toplevel_dir() -> pathlib.Path:
result = subprocess.run(
['git', 'rev-parse', '--show-toplevel'],
check=True,
stdout=subprocess.PIPE)
return pathlib.Path(result.stdout.decode('utf-8').rstrip('\n'))
def get_unstaged_changes() -> List[str]:
result = subprocess.run(
['git', 'status', '--porcelain'],
check=True,
stdout=subprocess.PIPE)
statuses = result.stdout.decode('utf-8').rstrip('\n')
matches = re.findall(r"^((.D|.M|\?\?).+)$", statuses, re.MULTILINE)
return [m[0] for m in matches]
def gofmt(files: List[pathlib.Path]) -> List[str]:
print("gofmt")
files_arg = [str(f) for f in files]
result = subprocess.run(
['gofmt', '-l', '-s'] + files_arg,
check=True,
stdout=subprocess.PIPE)
return result.stdout.decode('utf-8').rstrip('\n').splitlines()
def golint(files: str) -> List[str]:
print("golint")
result = subprocess.run(
['golint', files],
check=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
return result.stdout.decode('utf-8').rstrip('\n').splitlines()
def govet(package: str) -> List[str]:
print("go vet")
result = subprocess.run(
['go', 'vet', package],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
return result.stdout.decode('utf-8').rstrip('\n').rstrip("exit status 1").splitlines()
def make(command: str):
print("make {}".format(command))
result = subprocess.run(
['make', command],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
if result.returncode != 0:
print("make {} failed:".format(command))
print(result.stdout.decode('utf-8').rstrip('\n'))
sys.exit(1)
main()
| StarcoderdataPython |
163764 | <filename>dynn/activations.py
#!/usr/bin/env python3
"""
Activation functions
====================
Common activation functions for neural networks.
Most of those are wrappers around standard dynet operations
(eg. ``rectify`` -> ``relu``)
"""
import dynet as dy
def identity(x):
"""The identity function
:math:`y=x`
Args:
x (:py:class:`dynet.Expression`): Input expression
Returns:
:py:class:`dynet.Expression`: :math:`x`
"""
return x
def tanh(x):
"""The hyperbolic tangent function
:math:`y=\\tanh(x)`
Args:
x (:py:class:`dynet.Expression`): Input expression
Returns:
:py:class:`dynet.Expression`: :math:`\\tanh(x)`
"""
return dy.tanh(x)
def sigmoid(x):
"""The sigmoid function
:math:`y=\\frac{1}{1+e^{-x}}`
Args:
x (:py:class:`dynet.Expression`): Input expression
Returns:
:py:class:`dynet.Expression`: :math:`\\frac{1}{1+e^{-x}}`
"""
return dy.logistic(x)
def relu(x):
"""The REctified Linear Unit
:math:`y=\max(0,x)`
Args:
x (:py:class:`dynet.Expression`): Input expression
Returns:
:py:class:`dynet.Expression`: :math:`\max(0,x)`
"""
return dy.rectify(x)
| StarcoderdataPython |
1773770 | <gh_stars>0
# Promote all symbols from submodules to the top-level package (TODO is this bad practice?)
from .api import *
from .core import *
from .kubernetes import *
from .settings import *
from .slo import *
| StarcoderdataPython |
1782253 | # -*- coding: utf-8 -*-
from scipy import sparse as spspa
from scipy.sparse import linalg as spspalinalg
from time import time
from root.config.main import cOmm, rAnk, mAster_rank
from tools.linear_algebra.data_structures.global_matrix.main import LocallyFullVector
def ___scipy_sparse_linalg_v0___(A, b, COD=None):
"""
:param A:
:param b:
:param COD: clean old data?
:return: Return a tuple of 5 outputs:
1. (LocallyFullVector) results -- The result vector.
2. (0) info -- The info which provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
* -1 : divergence
3. (0) beta -- The residual.
4. (0) ITER -- The number of outer iterations.
5. (str) message
"""
cOmm.barrier()
t0 = time()
# ...
if COD is None: COD = True
A = A.do.gather_M_to_core(core=mAster_rank, clean_local=COD)
b = b.do.gather_V_to_core(core=mAster_rank, clean_local=COD)
if rAnk == mAster_rank:
shape = A.shape[0]
assert shape == b.shape[0], f"A:{A.shape} does not match b{b.shape}."
x = spspalinalg.spsolve(A, b)
x = spspa.csr_matrix(x).T
else:
x = None
x = cOmm.bcast(x, root=mAster_rank)
x = LocallyFullVector(x)
t3 = time()
message = f'scipy_sparse_linalg_v0_direct = [SYSTEM-SHAPE: {A.shape}] >>> costs {int((t3-t0)*100)/100}s.'
return x, 0, 0, 0, message | StarcoderdataPython |
186654 | # Generated with Iteration
#
from enum import Enum
from enum import auto
class Iteration(Enum):
""""""
COLUMN = auto()
GRID = auto()
def label(self):
if self == Iteration.COLUMN:
return "Column"
if self == Iteration.GRID:
return "Grid" | StarcoderdataPython |
3324372 | from rdflib import XSD, Literal
class TestTokenDatatype:
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
assert str(lit) != str(lit2)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.token)
st = Literal("Being a Doctor Is a Full-Time Job", datatype=XSD.string)
assert Literal.eq(st, lit) is False
assert str(lit) == str(st)
def test3(self):
lit = Literal(" hey\nthere ", datatype=XSD.token).n3()
assert lit == '"hey there"^^<http://www.w3.org/2001/XMLSchema#token>'
def test4(self):
lit = Literal("hey\nthere\ta tab\rcarriage return", datatype=XSD.token)
expected = Literal("""hey there a tab carriage return""", datatype=XSD.string)
assert str(lit) == str(expected)
def test_whitespace_is_collapsed_and_trailing_whitespace_is_stripped(self):
lit = Literal(
"\n hey - white space is collapsed for xsd:token and preceding and trailing whitespace is stripped ",
datatype=XSD.token,
)
expected = Literal(
"hey - white space is collapsed for xsd:token and preceding and trailing whitespace is stripped",
datatype=XSD.string,
)
assert str(lit) == str(expected)
| StarcoderdataPython |
3282507 | # coding: utf-8
__author__ = 'cleardusk'
import sys
sys.path.append('..')
import cv2
import numpy as np
import os.path as osp
from Sim3DR import rasterize, rasterize_adv
from _3DDFA_V2.utils.functions import plot_image
from _3DDFA_V2.utils.io import _load, _dump
from _3DDFA_V2.utils.tddfa_util import _to_ctype
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
def calc_ncc_code():
from bfm import bfm
# formula: ncc_d = ( u_d - min(u_d) ) / ( max(u_d) - min(u_d) ), d = {r, g, b}
u = bfm.u
u = u.reshape(3, -1, order='F')
for i in range(3):
u[i] = (u[i] - u[i].min()) / (u[i].max() - u[i].min())
_dump('../configs/ncc_code.npy', u)
def pncc(img, ver_lst, tri, show_flag=False, wfp=None, with_bg_flag=True):
ncc_code = _load(make_abs_path('../configs/ncc_code.npy'))
if with_bg_flag:
overlap = img.copy()
else:
overlap = np.zeros_like(img)
# rendering pncc
for ver_ in ver_lst:
ver = _to_ctype(ver_.T) # transpose
#(3, 38365) ncc_code
_, _,ref_triangles,buffer= rasterize_adv(ver, tri, ncc_code.T, bg=overlap, alpha=0.5) # m x 3
buffer=buffer-0.1
# print(np.sum(ref_triangles<0))
# print(ver[:,2])
overlap, used_area,ref_triangles,buffer= rasterize_adv(ver, tri, ncc_code.T, bg=overlap, alpha=0.5,buffer=buffer) # m x 3
print(np.sum(ref_triangles>0))
#(112, 112, 3)
#print(np.shape(used_area))
overlap=overlap.astype(np.uint8)
#print(np.sum(used_area>0)/3)
if wfp is not None:
cv2.imwrite(wfp, overlap)
print(f'Save visualization result to {wfp}')
if show_flag:
plot_image(overlap)
return overlap
def main():
# `configs/ncc_code.npy` is generated by `calc_nnc_code` function
# calc_ncc_code()
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
3203640 | # This algorithm search the way to move disk from one tower to another with just one rule.
# You can't put a big disk over a small one. #
# Input: height, initTower, middleTower, finalTower
# Output: initTower, middleTower, finalTower
# we put this to prove we sent from the initTower to the finalTower every disk
def hanoi(height):
initTower = [i if i > 0 else i for i in range(height,0,-1)]
finalTower = []*height
middleTower = []*height
def hanoiAux(height, initTower, middleTower, finalTower):
if height > 0:
hanoiAux(height - 1, initTower, finalTower, middleTower)
finalTower.append(initTower.pop())
hanoiAux(height - 1, middleTower, initTower, finalTower)
return initTower, middleTower, finalTower
return hanoiAux(height,initTower,middleTower,finalTower) | StarcoderdataPython |
182336 | <filename>src/local_run.py
__author__ = 'david'
from davesite.app.factory import create_app
app = create_app()
app.run("", 8080) | StarcoderdataPython |
1745900 | <reponame>kingformatty/E2E-NLG-Project
import os
import logging
import numpy as np
import torch
import torch.nn as nn
from components.utils.visualize import plot_train_progress, plot_lcurve
from components.data.common import cuda_if_gpu
from torch.autograd import Variable
from components.trainer import BaseTrainer
logger = logging.getLogger('experiment')
class E2EMLPTrainer(BaseTrainer):
def set_train_criterion(self, vocab_size, pad_id, nos_option):
"""
NMT Criterion from: https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Loss.py
:return:
"""
weight = torch.ones(vocab_size)
weight[pad_id] = 0
# self.criterion = nn.NLLLoss(weight, size_average=True)
self.criterion = nn.NLLLoss(weight, reduction="mean")
if self.use_cuda:
self.criterion = self.criterion.cuda()
def train_step(self, model, datum):
datum = [cuda_if_gpu(Variable(torch.LongTensor(t)).transpose(0, 1))
for t in datum] # [SL x B, TL x B]
logits = model.forward(datum) # TL x B x TV
loss_var = self.calc_loss(logits, datum, model.nos_option) # have to compute log_logits, since using NLL loss
return loss_var
def calc_loss(self, logits, datum, nos_option):
batch_y_var = datum[1]
vocab_size = logits.size()[-1]
logits = logits.contiguous().view(-1, vocab_size)
if nos_option == 2:
nos_num = (batch_y_var == 41).sum(dim = 0)
nos_input = nos_num + vocab_size - 7
targets = torch.cat([nos_input.unsqueeze(0), batch_y_var], dim=0).contiguous().view(-1,1).squeeze(1)
loss = self.criterion(logits, targets)
else:
targets = batch_y_var.contiguous().view(-1,1).squeeze(1)
loss = self.criterion(logits, targets)
return loss
def plot_lcurve(self):
fig_fname = os.path.join(self.model_dir, "lcurve.pdf")
title = self.config['modeltype']
plot_lcurve(self.train_losses, self.dev_losses, img_title=title, save_path=fig_fname, show=False)
def plot_training_results(self):
losses = np.asarray([self.train_losses, self.dev_losses]).transpose()
plot_train_progress(scores=(losses,
self.bleu_scores,
self.nist_scores,
self.cider_scores,
self.rouge_scores,
self.meteor_scores),
names=self.get_plot_names(),
img_title=self.config['modeltype'],
save_path=os.path.join(self.model_dir, "lcurve_scores.pdf"),
show=False)
def get_plot_names(self):
return [['TrainLoss', 'DevLoss'], 'BLEU', 'NIST', 'CIDEr', 'ROUGE_L', 'METEOR']
component = E2EMLPTrainer
| StarcoderdataPython |
190889 | <gh_stars>0
from urllib import request
class Request:
def __init__(self, url):
self.url = url
class Client:
def __init__(self, req):
self.req = req
def get(self):
source = request.urlopen(self.req.url)
data = source.read()
return data | StarcoderdataPython |
138988 | <gh_stars>0
import os
from setuptools import setup
import crudbuilder
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-crudbuilder',
version=crudbuilder.VERSION,
packages=['crudbuilder'],
include_package_data=True,
license='BSD License',
description='A simple Django CRUD builder',
url='https://github.com/asifpy/django-crudbuilder',
author='<NAME>',
author_email='<EMAIL>',
long_description=read('README.md'),
install_requires=[
'django_tables2',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
]
)
| StarcoderdataPython |
99266 | import sys, configparser
from math import floor
from fractions import Fraction
from .matrixOp import frange
from collections import OrderedDict
# NOTE File path starts where main.py executes
config = configparser.ConfigParser()
filePath = 'config.ini'
# Reads config file and returns variables
# TODO enforce input types here
def readConfig(typ='DEFAULT'):
global config
config.read(filePath)
configType = config[typ]
return typ, OrderedDict([('LambdaStartValue',float(configType['LambdaStartValue'])),
('LambdaEndValue',float(configType['LambdaEndValue'])),
('LambdaIncrValue',float(configType['LambdaIncrValue'])),
('CSVFile',configType['CSVFile']),
('Labels',configType['Labels']),
('OneHot',configType['OneHot']),
('Skip',configType['Skip']),
('RandomSeed',int(configType['randomSeed'])),
('SampleSize',float(configType['SampleSize'])),
('RatioTrainData',float(-1 if configType['RatioTrainData'] == '' else Fraction(configType['RatioTrainData']))),
('RatioValidData',float(-1 if configType['RatioValidData'] == '' else Fraction(configType['RatioValidData']))),
('Mode',int(configType['Mode'])),
('Models',configType['Models']),
('LogFile',configType['LogFile'])])
# sets the configuration variables for the run
# Input: User input string of the configuration name
# Output: OrderedDict of config variables
def setConfig():
return readConfig(sys.argv[1])
| StarcoderdataPython |
3391494 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from context import bbfsa
#read csv file
spectra = pd.read_csv('./tests/workflow/input/700cow.csv', delimiter=',', names= ['wn', 'ab'])
# cristallinity index
s = bbfsa.slice(spectra,700,400) #slice for baseline
b = bbfsa.baseline(s) #baseline
s2 = bbfsa.slice(b,620,530) #slice for peak picking
pp = bbfsa.peakpickMin (s2) #needs test if 2 values
pn = bbfsa.peakpickMin (s2) #needs test if 1 value
#nearest peak
NV = pp['ab'].where (pp['a'].abs(500)==min(pp['a'].abs(500)))
print (NV)
plotmeX = s2.wn
plotmeY = s2.ab
plt.plot(plotmeX, plotmeY)
plt.plot(pp.wn, pp.ab,'ro')
plt.xlim(max(plotmeX)+100, min(plotmeX)-100)
plt.ylim(min(plotmeY), max(plotmeY)+0.1)
#plt.show()
| StarcoderdataPython |
1747627 | from machine import Pin
import utime
button = Pin(16, Pin.IN, Pin.PULL_UP)
while True:
b1 = button.value()
if not b1:
print('Button pressed!')
utime.sleep(0.5)
| StarcoderdataPython |
5689 | <gh_stars>0
from __future__ import absolute_import
import six
from sentry.utils.safe import get_path, trim
from sentry.utils.strings import truncatechars
from .base import BaseEvent
def get_crash_location(exception, platform=None):
default = None
for frame in reversed(get_path(exception, 'stacktrace', 'frames', filter=True) or ()):
fn = frame.get('filename') or frame.get('abs_path')
if fn:
func = frame.get('function')
if func is not None:
from sentry.interfaces.stacktrace import trim_function_name
func = trim_function_name(func, frame.get('platform') or platform)
if frame.get('in_app'):
return fn, func
if default is None:
default = fn, func
return default
class ErrorEvent(BaseEvent):
key = 'error'
def has_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
return exception and any(v is not None for v in six.itervalues(exception))
def get_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
if not exception:
return {}
loc = get_crash_location(exception, data.get('platform'))
rv = {
'value': trim(get_path(exception, 'value', default=''), 1024),
}
# If the exception mechanism indicates a synthetic exception we do not
# want to record the type and value into the metadata.
if not get_path(exception, 'mechanism', 'synthetic'):
rv['type'] = trim(get_path(exception, 'type', default='Error'), 128)
# Attach crash location if available
if loc is not None:
fn, func = loc
if fn:
rv['filename'] = fn
if func:
rv['function'] = func
return rv
def get_title(self, metadata):
ty = metadata.get('type')
if ty is None:
return metadata.get('function') or '<unknown>'
if not metadata.get('value'):
return ty
return u'{}: {}'.format(
ty,
truncatechars(metadata['value'].splitlines()[0], 100),
)
def get_location(self, metadata):
return metadata.get('filename')
| StarcoderdataPython |
194791 | <reponame>cgarciae/tf-interface
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xaa0f5a8b
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
| StarcoderdataPython |
3227639 | # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017
import unittest
import sys
import itertools
import time
import os
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology import schema
import streamsx.topology.context
import streamsx.spl.op as op
class TestPending(unittest.TestCase):
_multiprocess_can_split_ = True
""" Test pending connections.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_simple_map(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = [ e + "PC" for e in data ]
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.map(lambda s : s + "PC")
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_simple_filter(self):
"""Test pending connection simple case.
"""
data = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
expected = ['A3']
topo = Topology()
pending = PendingStream(topo)
ap = pending.stream.filter(lambda s : s.startswith('A'))
ap = ap.filter(lambda s : s.endswith('3'))
self.assertFalse(pending.is_complete())
pending.complete(topo.source(data))
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(ap, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_fan_in_out(self):
"""Test pending connection fan in/out.
"""
data1 = ['A1','B1', 'A2', 'A3', 'C1', 'C1']
data2 = ['X','Y', 'Z', 'Q', 'T', 'X']
all_data = data1 + data2
expected_pc = [ e + "PC" for e in all_data ]
expected_cp = [ "CP" + e for e in all_data ]
expected_su = [ "SU" + e + "US" for e in all_data ]
topo = Topology()
pending = PendingStream(topo)
apc = pending.stream.map(lambda s : s + "PC")
acp = pending.stream.map(lambda s : 'CP' + s)
self.assertFalse(pending.is_complete())
s1 = topo.source(data1)
s2 = topo.source(data2)
su = s1.union({s2})
asu = su.map(lambda s : 'SU' + s + 'US')
pending.complete(su)
self.assertTrue(pending.is_complete())
tester = Tester(topo)
tester.contents(apc, expected_pc, ordered=False)
tester.contents(acp, expected_cp, ordered=False)
tester.contents(asu, expected_su, ordered=False)
tester.test(self.test_ctxtype, self.test_config)
def test_feedback_loop(self):
topo = Topology()
data = ['A','B', 'A', 'A', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
expected = ['B', 'X', 'C', 'C', 'D', 'A', 'A', 'E']
s = topo.source(data)
s = s.filter(lambda t : time.sleep(1) or True).as_string();
feedback = PendingStream(topo)
df = op.Invoke(topo, 'spl.utility::DynamicFilter',
inputs = [s, feedback.stream],
schemas= [schema.CommonSchema.String])
df.params['key'] = df.attribute(s, 'string')
df.params['addKey'] = df.attribute(feedback.stream, 'string')
delayed_out = op.Map('spl.utility::Delay', df.outputs[0], params={'delay': 0.05}).stream
x = delayed_out.filter(lambda s : s == 'X').map(lambda s : 'A').as_string()
i = topo.source(['B', 'X', 'C', 'D', 'E']).as_string()
x = x.union({i})
feedback.complete(x)
result = delayed_out
result.print()
#streamsx.topology.context.submit('TOOLKIT', topo)
tester = Tester(topo)
tester.contents(result, expected)
tester.test(self.test_ctxtype, self.test_config)
class TestSasPending(TestPending):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
class TestPendingCompileOnly(unittest.TestCase):
@unittest.skipIf("STREAMS_INSTALL" not in os.environ, "STREAMS_INSTALL not set")
def test_pure_loop(self):
topo = Topology()
feedback = PendingStream(topo)
df = op.Map('spl.utility::Custom',
feedback.stream,
schema=schema.CommonSchema.String)
delayed_out = op.Map('spl.utility::Delay', df.stream, params={'delay': 0.05}).stream
feedback.complete(delayed_out)
sr = streamsx.topology.context.submit('BUNDLE', topo)
self.assertEqual(0, sr['return_code'])
os.remove(sr.bundlePath)
| StarcoderdataPython |
152418 | <filename>server/athenian/api/models/web/pull_request_event.py
from athenian.api.models.web.base_model_ import Enum, Model
class PullRequestEvent(Model, metaclass=Enum):
"""PR's modelled lifecycle events."""
CREATED = "created"
COMMITTED = "committed"
REVIEW_REQUESTED = "review_requested"
REVIEWED = "reviewed"
APPROVED = "approved"
CHANGES_REQUESTED = "changes_requested"
MERGED = "merged"
RELEASED = "released"
REJECTED = "rejected"
DEPLOYED = "deployed"
| StarcoderdataPython |
4818088 | <reponame>tao-shen/FedGraph
import torch.nn as nn
from dgl.nn.pytorch import GraphConv
import torch.nn.functional as F
import dgl.function as f
class MLP(nn.Module):
def __init__(self, in_feats, n_hidden, num_classes, n_layers, dropout):
super(MLP, self).__init__()
self.activation = F.relu
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(in_feats, n_hidden))
for i in range(n_layers - 1):
self.layers.append(nn.Linear(n_hidden, n_hidden))
self.layers.append(nn.Linear(n_hidden, num_classes))
self.dropout = nn.Dropout(p=dropout)
def forward(self, none, h):
for i, layer in enumerate(self.layers):
if i != 0:
h = self.dropout(h)
h = self.activation(layer(h))
return h
class DotProductPredictor(nn.Module):
def forward(self, graph, h):
# h contains the node representations computed from the GNN defined
# in the node classification section (Section 5.1).
with graph.local_scope():
graph.ndata['h'] = h
graph.apply_edges(f.u_dot_v('h', 'h', 'score'))
return graph.edata['score']
class GCN(nn.Module):
def __init__(self,
in_feats,
n_hidden,
num_classes,
n_layers,
dropout):
super(GCN, self).__init__()
# self.g = g
self.layers = nn.ModuleList()
self.activation = F.relu
# input layer
self.layers.append(
GraphConv(in_feats, n_hidden, activation=self.activation, allow_zero_in_degree=True))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(
GraphConv(n_hidden, n_hidden, activation=self.activation, allow_zero_in_degree=True))
# output layer
self.layers.append(GraphConv(n_hidden, num_classes, allow_zero_in_degree=True))
self.dropout = nn.Dropout(p=dropout)
def forward(self, g, features):
h = features
for i, layer in enumerate(self.layers):
if i != 0:
h = self.dropout(h)
h = layer(g, h)
return h
| StarcoderdataPython |
1786691 | <reponame>fengjixuchui/androguard
import hashlib
import os
import sys
import collections
from androguard.core.analysis.analysis import Analysis
from androguard.core.bytecodes.dvm import DalvikVMFormat, DalvikOdexVMFormat
from androguard.core.bytecodes.apk import APK
from androguard.decompiler.decompiler import DecompilerDAD
from androguard.core import androconf
import pickle
import logging
import datetime
log = logging.getLogger("androguard.session")
def Save(session, filename=None):
"""
save your session to use it later.
Returns the filename of the written file.
If not filename is given, a file named `androguard_session_<DATE>.ag` will
be created in the current working directory.
`<DATE>` is a timestamp with the following format: `%Y-%m-%d_%H%M%S`.
This function will overwrite existing files without asking.
If the file could not written, None is returned.
example::
s = session.Session()
session.Save(s, "msession.ag")
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string
"""
if not filename:
filename = "androguard_session_{:%Y-%m-%d_%H%M%S}.ag".format(datetime.datetime.now())
if os.path.isfile(filename):
log.warning("{} already exists, overwriting!")
# Setting the recursion limit according to the documentation:
# https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled
#
# Some larger APKs require a high recursion limit.
# Tested to be above 35000 for some files, setting to 50k to be sure.
# You might want to set this even higher if you encounter problems
reclimit = sys.getrecursionlimit()
sys.setrecursionlimit(50000)
saved = False
try:
with open(filename, "wb") as fd:
pickle.dump(session, fd)
saved = True
except RecursionError:
log.exception("Recursion Limit hit while saving. "
"Current Recursion limit: {}. "
"Please report this error!".format(sys.getrecursionlimit()))
# Remove partially written file
os.unlink(filename)
sys.setrecursionlimit(reclimit)
return filename if saved else None
def Load(filename):
"""
load your session!
example::
s = session.Load("mysession.ag")
:param filename: the filename where the session has been saved
:type filename: string
:rtype: the elements of your session :)
"""
with open(filename, "rb") as fd:
return pickle.load(fd)
class Session:
"""
A Session is able to store multiple APK, DEX or ODEX files and can be pickled
to disk in order to resume work later.
The main function used in Sessions is probably :meth:`add`, which adds files
to the session and performs analysis on them.
Afterwards, the files can be gathered using methods such as
:meth:`get_objects_apk`, :meth:`get_objects_dex` or :meth:`get_classes`.
example::
s = Session()
digest = s.add("some.apk")
print("SHA256 of the file: {}".format(digest))
a, d, dx = s.get_objects_apk("some.apk", digest)
print(a.get_package())
# Reset the Session for a fresh set of files
s.reset()
digest2 = s.add("classes.dex")
print("SHA256 of the file: {}".format(digest2))
for h, d, dx in s.get_objects_dex():
print("SHA256 of the DEX file: {}".format(h))
"""
def __init__(self, export_ipython=False):
"""
Create a new Session object
:param export_ipython: set to True in order to create attributes for the
use in iPython
"""
self._setup_objects()
self.export_ipython = export_ipython
def save(self, filename=None):
"""
Save the current session, see also :func:`~androguard.session.Save`.
"""
return Save(self, filename)
def _setup_objects(self):
self.analyzed_files = collections.defaultdict(list)
self.analyzed_digest = dict()
self.analyzed_apk = dict()
# Stores Analysis Objects
# needs to be ordered to return the outermost element when searching for
# classes
self.analyzed_vms = collections.OrderedDict()
# Dict of digest and DalvikVMFormat/DalvikOdexFormat
# Actually not needed, as we have Analysis objects which store the DEX
# files as well, but we do not remove it here for legacy reasons
self.analyzed_dex = dict()
def reset(self):
"""
Reset the current session, delete all added files.
"""
self._setup_objects()
def isOpen(self):
"""
Test if any file was analyzed in this session
:return: `True` if any file was analyzed, `False` otherwise
"""
return len(self.analyzed_digest) > 0
def show(self):
"""
Print information to stdout about the current session.
Gets all APKs, all DEX files and all Analysis objects.
"""
print("APKs in Session: {}".format(len(self.analyzed_apk)))
for d, a in self.analyzed_apk.items():
print("\t{}: {}".format(d, a))
print("DEXs in Session: {}".format(len(self.analyzed_dex)))
for d, dex in self.analyzed_dex.items():
print("\t{}: {}".format(d, dex))
print("Analysis in Session: {}".format(len(self.analyzed_vms)))
for d, a in self.analyzed_vms.items():
print("\t{}: {}".format(d, a))
def addAPK(self, filename, data):
"""
Add an APK file to the Session and run analysis on it.
:param filename: (file)name of APK file
:param data: binary data of the APK file
:return: a tuple of SHA256 Checksum and APK Object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add APK:%s" % digest)
apk = APK(data, True)
self.analyzed_apk[digest] = [apk]
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
dx = Analysis()
self.analyzed_vms[digest] = dx
for dex in apk.get_all_dex():
# we throw away the output... FIXME?
self.addDEX(filename, dex, dx, postpone_xref=True)
# Postponed
dx.create_xref()
log.debug("added APK:%s" % digest)
return digest, apk
def addDEX(self, filename, data, dx=None, postpone_xref=False):
"""
Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:param postpone_xref: True if no xref shall be created, and will be called manually
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEX:%s" % digest)
log.debug("Parsing format ...")
d = DalvikVMFormat(data)
log.debug("added DEX:%s" % digest)
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_dex[digest] = d
if dx is None:
dx = Analysis()
dx.add(d)
if not postpone_xref:
dx.create_xref()
# TODO: If multidex: this will called many times per dex, even if already set
for d in dx.vms:
# TODO: allow different decompiler here!
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_vms[digest] = dx
if self.export_ipython:
log.debug("Exporting in ipython")
d.create_python_export()
return digest, d, dx
def addDEY(self, filename, data, dx=None):
"""
Add an ODEX file to the session and run the analysis
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEY:%s" % digest)
d = DalvikOdexVMFormat(data)
log.debug("added DEY:%s" % digest)
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_dex[digest] = d
if self.export_ipython:
d.create_python_export()
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
for d in dx.vms:
# TODO: allow different decompiler here!
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_vms[digest] = dx
return digest, d, dx
def add(self, filename, raw_data=None, dx=None):
"""
Generic method to add a file to the session.
This is the main method to use when adding files to a Session!
If an APK file is supplied, all DEX files are analyzed too.
For DEX and ODEX files, only this file is analyzed (what else should be
analyzed).
Returns the SHA256 of the analyzed file.
:param filename: filename to load
:param raw_data: bytes of the file, or None to load the file from filename
:param dx: An already exiting :class:`~androguard.core.analysis.analysis.Analysis` object
:return: the sha256 of the file or None on failure
"""
if not raw_data:
log.debug("Loading file from '{}'".format(filename))
with open(filename, "rb") as fp:
raw_data = fp.read()
ret = androconf.is_android_raw(raw_data)
log.debug("Found filetype: '{}'".format(ret))
if not ret:
return None
if ret == "APK":
digest, _ = self.addAPK(filename, raw_data)
elif ret == "DEX":
digest, _, _ = self.addDEX(filename, raw_data, dx)
elif ret == "DEY":
digest, _, _ = self.addDEY(filename, raw_data, dx)
else:
return None
return digest
def get_classes(self):
"""
Returns all Java Classes from the DEX objects as an array of DEX files.
"""
for idx, digest in enumerate(self.analyzed_vms):
dx = self.analyzed_vms[digest]
for vm in dx.vms:
filename = self.analyzed_digest[digest]
yield idx, filename, digest, vm.get_classes()
def get_analysis(self, current_class):
"""
Returns the :class:`~androguard.core.analysis.analysis.Analysis` object
which contains the `current_class`.
:param current_class: The class to search for
:type current_class: androguard.core.bytecodes.dvm.ClassDefItem
:rtype: androguard.core.analysis.analysis.Analysis
"""
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return dx
return None
def get_format(self, current_class):
"""
Returns the :class:`~androguard.core.bytecodes.dvm.DalvikVMFormat` of a
given :class:`~androguard.core.bytecodes.dvm.ClassDefItem`.
:param current_class: A ClassDefItem
"""
return current_class.CM.vm
def get_filename_by_class(self, current_class):
"""
Returns the filename of the DEX file where the class is in.
Returns the first filename this class was present.
For example, if you analyzed an APK, this should return the filename of
the APK and not of the DEX file.
:param current_class: ClassDefItem
:returns: None if class was not found or the filename
"""
for digest, dx in self.analyzed_vms.items():
if dx.is_class_present(current_class.get_name()):
return self.analyzed_digest[digest]
return None
def get_digest_by_class(self, current_class):
"""
Return the SHA256 hash of the object containing the ClassDefItem
Returns the first digest this class was present.
For example, if you analyzed an APK, this should return the digest of
the APK and not of the DEX file.
"""
for digest, dx in self.analyzed_vms.items():
if dx.is_class_present(current_class.get_name()):
return digest
return None
def get_strings(self):
"""
Yields all StringAnalysis for all unique Analysis objects
"""
seen = []
for digest, dx in self.analyzed_vms.items():
if dx in seen:
continue
seen.append(dx)
yield digest, self.analyzed_digest[digest], dx.get_strings_analysis()
def get_nb_strings(self):
"""
Return the total number of strings in all Analysis objects
"""
nb = 0
seen = []
for digest, dx in self.analyzed_vms.items():
if dx in seen:
continue
seen.append(dx)
nb += len(dx.get_strings_analysis())
return nb
def get_all_apks(self):
"""
Yields a list of tuples of SHA256 hash of the APK and APK objects
of all analyzed APKs in the Session.
"""
for digest, a in self.analyzed_apk.items():
yield digest, a
def get_objects_apk(self, filename=None, digest=None):
"""
Returns APK, DalvikVMFormat and Analysis of a specified APK.
You must specify either `filename` or `digest`.
It is possible to use both, but in this case only `digest` is used.
example::
s = Session()
digest = s.add("some.apk")
a, d, dx = s.get_objects_apk(digest=digest)
example::
s = Session()
filename = "some.apk"
digest = s.add(filename)
a, d, dx = s.get_objects_apk(filename=filename)
:param filename: the filename of the APK file, only used of digest is None
:param digest: the sha256 hash, as returned by :meth:`add` for the APK
:returns: a tuple of (APK, [DalvikVMFormat], Analysis)
"""
if not filename and not digest:
raise ValueError("Must give at least filename or digest!")
if digest is None:
digests = self.analyzed_files.get(filename)
# Negate to reduce tree
if not digests:
return None, None, None
digest = digests[0]
a = self.analyzed_apk[digest][0]
dx = self.analyzed_vms[digest]
return a, dx.vms, dx
def get_objects_dex(self):
"""
Yields all dex objects inclduing their Analysis objects
:returns: tuple of (sha256, DalvikVMFormat, Analysis)
"""
# TODO: there is no variant like get_objects_apk
for digest, d in self.analyzed_dex.items():
yield digest, d, self.analyzed_vms[digest]
| StarcoderdataPython |
1607661 | <gh_stars>100-1000
from __future__ import absolute_import
import logging
import numpy as np
from .import utils
from .import sampling
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
from sklearn.model_selection import StratifiedShuffleSplit
logger = logging.getLogger(__name__)
class Dataset(object):
def __init__(self, inputs, labels, test_indices=None, **kwargs):
"""Encapsulates all pieces of data to run an experiment. This is basically a bag of items that makes it
easy to serialize and deserialize everything as a unit.
Args:
inputs: The raw model inputs. This can be set to None if you dont want
to serialize this value when you save the dataset.
labels: The raw output labels.
test_indices: The optional test indices to use. Ideally, this should be generated one time and reused
across experiments to make results comparable. `generate_test_indices` can be used generate first
time indices.
**kwargs: Additional key value items to store.
"""
self.X = np.array(inputs)
self.y = np.array(labels)
for key, value in kwargs.items():
setattr(self, key, value)
self._test_indices = None
self._train_indices = None
self.test_indices = test_indices
self.is_multi_label = isinstance(labels[0], (set, list, tuple))
self.label_encoder = MultiLabelBinarizer() if self.is_multi_label else LabelBinarizer()
self.y = self.label_encoder.fit_transform(self.y).flatten()
def update_test_indices(self, test_size=0.1):
"""Updates `test_indices` property with indices of `test_size` proportion.
Args:
test_size: The test proportion in [0, 1] (Default value: 0.1)
"""
if self.is_multi_label:
self._train_indices, self._test_indices = sampling.multi_label_train_test_split(self.y, test_size)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size)
self._train_indices, self._test_indices = next(sss.split(self.X, self.y))
def save(self, file_path):
"""Serializes this dataset to a file.
Args:
file_path: The file path to use.
"""
utils.dump(self, file_path)
def train_val_split(self, split_ratio=0.1):
"""Generates train and validation sets from the training indices.
Args:
split_ratio: The split proportion in [0, 1] (Default value: 0.1)
Returns:
The stratified train and val subsets. Multi-label outputs are handled as well.
"""
if self.is_multi_label:
train_indices, val_indices = sampling.multi_label_train_test_split(self.y, split_ratio)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=split_ratio)
train_indices, val_indices = next(sss.split(self.X, self.y))
return self.X[train_indices], self.X[val_indices], self.y[train_indices], self.y[val_indices]
@staticmethod
def load(file_path):
"""Loads the dataset from a file.
Args:
file_path: The file path to use.
Returns:
The `Dataset` instance.
"""
return utils.load(file_path)
@property
def test_indices(self):
return self._test_indices
@test_indices.setter
def test_indices(self, test_indices):
if test_indices is None:
self._train_indices = np.arange(0, len(self.y))
else:
self._test_indices = test_indices
self._train_indices = np.setdiff1d(np.arange(0, len(self.y)), self.test_indices)
@property
def train_indices(self):
return self._train_indices
@property
def labels(self):
return self.label_encoder.classes_
@property
def num_classes(self):
if len(self.y.shape) == 1:
return 1
else:
return len(self.labels)
| StarcoderdataPython |
3209885 | # -*- coding: utf-8 -*-
from six import text_type
from typing import Union
from zerver.lib.test_classes import WebhookTestCase
class BitbucketHookTests(WebhookTestCase):
STREAM_NAME = 'bitbucket'
URL_TEMPLATE = "/api/v1/external/bitbucket?payload={payload}&stream={stream}"
FIXTURE_DIR_NAME = 'bitbucket'
EXPECTED_SUBJECT = u"Repository name"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"Repository name / master"
def test_bitbucket_on_push_event(self):
# type: () -> None
fixture_name = 'push'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c'
expected_message = u"kolaszek pushed to branch master\n\n{}".format(commit_info)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_push_commits_above_limit_event(self):
# type: () -> None
fixture_name = 'push_commits_above_limit'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c\n'
expected_message = u"kolaszek pushed to branch master\n\n{}[and 40 more commit(s)]".format(commit_info * 10)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_force_push_event(self):
# type: () -> None
fixture_name = 'force_push'
self.url = self.build_url(fixture_name)
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name)"
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def get_body(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return {}
def get_payload(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name)
def build_webhook_url(self):
# type: () -> text_type
return ''
def build_url(self, fixture_name):
# type: (text_type) -> text_type
return self.URL_TEMPLATE.format(payload=self.get_payload(fixture_name), stream=self.STREAM_NAME)
| StarcoderdataPython |
1612278 | <gh_stars>1-10
import logging
logging.basicConfig(filename='test_deleteme.log', filemode='w', level=logging.DEBUG)
"""
Acquire data for training a Machine-Learning algorithm
"""
import pandas as pd
from .ArXivTrainingData import ArXivTrainingData
from .CrossRefTrainingData import CrossRefTrainingData
from .TrainingRow import TrainingRow
from .config import Config as config
from .CleanTrainingData import CleanTrainingData
import os
import sys
import logging
logger = logging.getLogger(__name__)
class TrainingData():
"""
Given that we have CrossRef and ArXiv API data, combine
to make a training dataset.
"""
def knit_training_dataframe(self, arx_training_data, cr_training_data):
training_data = []
logger.info('ArXiv training data, shape:{}'.format(arx_training_data.shape))
logger.info('CrossRef training data, length {}'.format(len(cr_training_data)))
# now splice the arxiv data AND the CrossRef search results
# into 1 dataframe
k=0
for i,row in arx_training_data.iterrows():
# doi = row['doi']
pid = row['id']
works_records = cr_training_data.get(pid,None)
if works_records!=None:
query_article = dict(row)
for works_record in works_records:
# print('DETAILS:',details)
# print('Match article:',match_article)
training_row = TrainingRow(works_record=works_record,
query_article=query_article
).to_dict()
training_data.append(training_row)
k+=1
if k>0 and k%1000==0:
logger.info(f'{k} rows of data knitted together.')
return pd.DataFrame(training_data)
def load_gen_training_df(self):
if os.path.exists(config.training_dataloc):
df = pd.read_csv(config.training_dataloc, error_bad_lines=False) #, dtype=str)
logger.debug('Training data found. Shape:{}'.format(df.shape))
else:
logger.debug('Training data not found. Generating from available data.')
df = self.gen_training_df()
df.to_csv(config.training_dataloc, index=False, encoding = 'utf-8-sig')
return df
def load_gen_clean_training_df(self):
if os.path.exists(config.clean_training_dataloc):
df = pd.read_csv(config.clean_training_dataloc, error_bad_lines=False) #, dtype=str)
logger.debug('Clean training data found. Shape:{}'.format(df.shape))
else:
logger.debug('Clean training data not found. Generating from available data.')
df = self.load_gen_training_df()
df = CleanTrainingData(df).clean_df
df.to_csv(config.clean_training_dataloc, index=False, encoding = 'utf-8-sig')
logger.debug('Clean training data shape:{}'.format(df.shape))
return df
def gen_training_df(self):
logger.info('Acquiring ArXiv data.')
arx_training_data = ArXivTrainingData().oai_to_df()
# print('Acquiring CrossRef training data')
logger.info('Acquiring CrossRef training data')
cr_training_data = CrossRefTrainingData().build_json_training_data(arx_training_data)
logger.info('Done.')
return self.knit_training_dataframe(arx_training_data, cr_training_data) | StarcoderdataPython |
4809123 | # Generated by Django 3.2.4 on 2021-08-12 23:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0066_rename_patron_level_profile_patronage'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bkzzt_topics',
field=models.TextField(blank=True, editable=False, max_length=2000),
),
migrations.AlterField(
model_name='profile',
name='closer_look_nominations',
field=models.TextField(blank=True, editable=False, max_length=2000),
),
migrations.AlterField(
model_name='profile',
name='closer_look_selections',
field=models.TextField(blank=True, editable=False, max_length=2000),
),
migrations.AlterField(
model_name='profile',
name='guest_stream_selections',
field=models.TextField(blank=True, editable=False, max_length=2000),
),
migrations.AlterField(
model_name='profile',
name='stream_poll_nominations',
field=models.TextField(blank=True, max_length=2000),
),
migrations.AlterField(
model_name='profile',
name='stream_selections',
field=models.TextField(blank=True, editable=False, max_length=2000),
),
]
| StarcoderdataPython |
1692767 | # Generated by Django 3.1.2 on 2020-11-04 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bills', '0007_auto_20201104_0251'),
]
operations = [
migrations.AddField(
model_name='storeunion',
name='is_credit_union',
field=models.BooleanField(db_column='isCreditUnion', default=False),
),
]
| StarcoderdataPython |
1672920 | <filename>src/sage/rings/padics/padic_extension_leaves.py
"""
p-Adic Extension Leaves
The final classes for extensions of Zp and Qp (ie classes that are not
just designed to be inherited from).
AUTHORS:
- <NAME>
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.integer_ring import ZZ
from sage.rings.finite_rings.integer_mod_ring import Zmod
from .pow_computer_ext import PowComputer_ext_maker
from .pow_computer_flint import PowComputer_flint_maker
from sage.libs.ntl.ntl_ZZ_pX import ntl_ZZ_pX
from .unramified_extension_generic import UnramifiedExtensionGeneric
from .eisenstein_extension_generic import EisensteinExtensionGeneric
#from padic_general_extension_generic import pAdicGeneralExtensionGeneric
from .generic_nodes import pAdicCappedRelativeRingGeneric, \
pAdicCappedRelativeFieldGeneric, \
pAdicCappedAbsoluteRingGeneric, \
pAdicFixedModRingGeneric, \
pAdicFloatingPointRingGeneric, \
pAdicFloatingPointFieldGeneric
#from unramified_extension_absolute_element import UnramifiedExtensionAbsoluteElement
#from unramified_extension_capped_relative_element import UnramifiedExtensionCappedRelativeElement
#from unramified_extension_lazy_element import UnramifiedExtensionRelaxedElement
#from eisenstein_extension_absolute_element import EisensteinExtensionAbsoluteElement
#from eisenstein_extension_capped_relative_element import EisensteinExtensionCappedRelativeElement
#from eisenstein_extension_lazy_element import EisensteinExtensionRelaxedElement
#from padic_general_extension_absolute_element import pAdicGeneralExtensionAbsoluteElement
#from padic_general_extension_capped_relative_element import pAdicGeneralExtensionCappedRelativeElement
#from padic_general_extension_lazy_element import pAdicGeneralExtensionRelaxedElement
from .padic_ZZ_pX_FM_element import pAdicZZpXFMElement
from .padic_ZZ_pX_CR_element import pAdicZZpXCRElement
from .padic_ZZ_pX_CA_element import pAdicZZpXCAElement
from .qadic_flint_CR import qAdicCappedRelativeElement
from .qadic_flint_CA import qAdicCappedAbsoluteElement
from .qadic_flint_FM import qAdicFixedModElement
from .qadic_flint_FP import qAdicFloatingPointElement
def _make_integral_poly(exact_modulus, p, prec):
"""
Convert a defining polynomial into one with integral coefficients.
INPUT:
- ``exact_modulus`` -- a univariate polynomial
- ``p`` -- a prime
- ``prec`` -- the precision
EXAMPLES::
sage: from sage.rings.padics.padic_extension_leaves import _make_integral_poly
sage: R.<x> = QQ[]
sage: f = _make_integral_poly(x^2 - 2, 5, 3); f
x^2 - 2
sage: f.parent()
Univariate Polynomial Ring in x over Integer Ring
sage: f = _make_integral_poly(x^2 - 2/7, 5, 3); f
x^2 + 89
sage: f.parent()
Univariate Polynomial Ring in x over Integer Ring
"""
try:
return exact_modulus.change_ring(ZZ)
except TypeError:
return exact_modulus.change_ring(Zmod(p**prec)).change_ring(ZZ)
class UnramifiedExtensionRingCappedRelative(UnramifiedExtensionGeneric, pAdicCappedRelativeRingGeneric):
"""
TESTS::
sage: R.<a> = ZqCR(27,10000)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
"""
A capped relative representation of Zq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in a `p`-adic ring.
- ``poly`` -- t polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name,
unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = ZqCR(27,10000); R #indirect doctest
3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
sage: R.<a> = ZqCR(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
"""
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**prec)
if prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), prec, prec, prec, False, ntl_poly, "small", "u")
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, prec, prec, False, ntl_poly, "big", "u")
element_class = pAdicZZpXCRElement
else:
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, False, Zpoly, prec_type='capped-rel')
element_class = qAdicCappedRelativeElement
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, element_class)
if implementation != 'NTL':
from .qadic_flint_CR import pAdicCoercion_ZZ_CR, pAdicConvert_QQ_CR
self.register_coercion(pAdicCoercion_ZZ_CR(self))
self.register_conversion(pAdicConvert_QQ_CR(self))
class UnramifiedExtensionFieldCappedRelative(UnramifiedExtensionGeneric, pAdicCappedRelativeFieldGeneric):
"""
TESTS::
sage: R.<a> = QqCR(27,10000)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
r"""
A representation of Qq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with rational coefficients, for example,
while ``poly`` has coefficients in a `p`-adic field.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name,
unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = Qq(27,10000); R #indirect doctest
3-adic Unramified Extension Field in a defined by x^3 + 2*x + 1
sage: R.<a> = Qq(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
"""
# Currently doesn't support polynomials with non-integral coefficients
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**prec)
if prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), prec, prec, prec, True, ntl_poly, "small", "u")
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, prec, prec, True, ntl_poly, "big", "u")
element_class = pAdicZZpXCRElement
else:
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, True, Zpoly, prec_type='capped-rel')
element_class = qAdicCappedRelativeElement
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, element_class)
if implementation != 'NTL':
from .qadic_flint_CR import pAdicCoercion_ZZ_CR, pAdicCoercion_QQ_CR
self.register_coercion(pAdicCoercion_ZZ_CR(self))
self.register_coercion(pAdicCoercion_QQ_CR(self))
def _coerce_map_from_(self, R):
r"""
Return a coercion from ``R`` into this ring or ``True`` if the default
conversion map can be used to perform a coercion.
EXAMPLES::
sage: R.<a> = QqCR(27)
sage: R.coerce_map_from(ZqCR(27,names='a')) # indirect doctest
Ring morphism:
From: 3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
To: 3-adic Unramified Extension Field in a defined by x^3 + 2*x + 1
sage: R.coerce_map_from(ZqCA(27,names='a')) # indirect doctest
Ring morphism:
From: 3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
To: 3-adic Unramified Extension Field in a defined by x^3 + 2*x + 1
"""
if isinstance(R, UnramifiedExtensionRingCappedRelative) and R.fraction_field() is self:
from sage.rings.padics.qadic_flint_CR import pAdicCoercion_CR_frac_field
return pAdicCoercion_CR_frac_field(R, self)
if isinstance(R, UnramifiedExtensionRingCappedAbsolute) and R.fraction_field() is self:
from sage.rings.padics.qadic_flint_CA import pAdicCoercion_CA_frac_field
return pAdicCoercion_CA_frac_field(R, self)
return super(UnramifiedExtensionFieldCappedRelative, self)._coerce_map_from_(R)
class UnramifiedExtensionRingCappedAbsolute(UnramifiedExtensionGeneric, pAdicCappedAbsoluteRingGeneric):
"""
TESTS::
sage: R.<a> = ZqCA(27,10000)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
"""
A capped absolute representation of Zq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while poly has coefficients in a `p`-adic ring.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- A dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name,
unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = ZqCA(27,10000); R #indirect doctest
3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
sage: R.<a> = ZqCA(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
"""
# Currently doesn't support polynomials with non-integral coefficients
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**prec)
if prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), prec, prec, prec, True, ntl_poly, "small", "u")
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, prec, prec, True, ntl_poly, "big", "u")
element_class = pAdicZZpXCAElement
else:
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, False, Zpoly, prec_type='capped-abs')
element_class = qAdicCappedAbsoluteElement
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, element_class)
if implementation != 'NTL':
from .qadic_flint_CA import pAdicCoercion_ZZ_CA, pAdicConvert_QQ_CA
self.register_coercion(pAdicCoercion_ZZ_CA(self))
self.register_conversion(pAdicConvert_QQ_CA(self))
class UnramifiedExtensionRingFixedMod(UnramifiedExtensionGeneric, pAdicFixedModRingGeneric):
"""
TESTS::
sage: R.<a> = ZqFM(27,10000)
sage: TestSuite(R).run(skip='_test_log',max_runs=4) # long time
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
"""
A fixed modulus representation of Zq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in a `p`-adic field.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = ZqFM(27,10000); R #indirect doctest
3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
sage: R.<a> = ZqFM(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
"""
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**prec)
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), max(min(prec - 1, 30), 1), prec, prec, False, ntl_poly, "FM", "u")
element_class = pAdicZZpXFMElement
else:
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = 0 # prevents caching
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, False, Zpoly, prec_type='fixed-mod')
element_class = qAdicFixedModElement
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, element_class)
if implementation != 'NTL':
from .qadic_flint_FM import pAdicCoercion_ZZ_FM, pAdicConvert_QQ_FM
self.register_coercion(pAdicCoercion_ZZ_FM(self))
self.register_conversion(pAdicConvert_QQ_FM(self))
#def coerce_map_explicit(self, S):
# from sage.rings.padics.morphism import Morphism_ZZ_UnrFM, Morphism_ZpFM_UnrFM
# if S is ZZ:
# return Morphism_ZZ_UnrFM(self)
# elif isinstance(S, pAdicRingFixedMod) and S.prime() == self.prime():
# return Morphism_ZpFM_UnrFM(S, self)
# return None
class UnramifiedExtensionRingFloatingPoint(UnramifiedExtensionGeneric, pAdicFloatingPointRingGeneric):
"""
TESTS::
sage: R.<a> = ZqFP(27,10000); R == loads(dumps(R))
True
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
"""
A floating point representation of Zq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in Zp.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = ZqFP(27,10000); R #indirect doctest
3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
sage: R.<a> = ZqFP(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
TESTS:
Check that :trac:`23228` has been resolved::
sage: a % R.prime()
a
"""
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
raise NotImplementedError
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, False, Zpoly, prec_type='floating-point')
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, qAdicFloatingPointElement)
from .qadic_flint_FP import pAdicCoercion_ZZ_FP, pAdicConvert_QQ_FP
self.register_coercion(pAdicCoercion_ZZ_FP(self))
self.register_conversion(pAdicConvert_QQ_FP(self))
class UnramifiedExtensionFieldFloatingPoint(UnramifiedExtensionGeneric, pAdicFloatingPointFieldGeneric):
"""
TESTS::
sage: R.<a> = QqFP(27,10000); R == loads(dumps(R))
True
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='FLINT'):
"""
A representation of Qq.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with rational coefficients, for example,
while ``poly`` has coefficients in a `p`-adic field.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R.<a> = QqFP(27,10000); R #indirect doctest
3-adic Unramified Extension Field in a defined by x^3 + 2*x + 1
sage: R.<a> = Qq(next_prime(10^30)^3, 3); R.prime()
1000000000000000000000000000057
"""
# Currently doesn't support polynomials with non-integral coefficients
self._shift_seed = None
self._exact_modulus = exact_modulus
self._implementation = implementation
if implementation == 'NTL':
raise NotImplementedError
Zpoly = _make_integral_poly(exact_modulus, poly.base_ring().prime(), prec)
cache_limit = min(prec, 30)
self.prime_pow = PowComputer_flint_maker(poly.base_ring().prime(), cache_limit, prec, prec, True, Zpoly, prec_type='floating-point')
UnramifiedExtensionGeneric.__init__(self, poly, prec, print_mode, names, qAdicFloatingPointElement)
from .qadic_flint_FP import pAdicCoercion_ZZ_FP, pAdicCoercion_QQ_FP
self.register_coercion(pAdicCoercion_ZZ_FP(self))
self.register_coercion(pAdicCoercion_QQ_FP(self))
def _coerce_map_from_(self, R):
r"""
Return a coercion from ``R`` into this ring or ``True`` if the default
conversion map can be used to perform a coercion.
EXAMPLES::
sage: R.<a> = QqFP(27)
sage: R.coerce_map_from(ZqFP(27,names='a')) # indirect doctest
Ring morphism:
From: 3-adic Unramified Extension Ring in a defined by x^3 + 2*x + 1
To: 3-adic Unramified Extension Field in a defined by x^3 + 2*x + 1
"""
if isinstance(R, UnramifiedExtensionRingFloatingPoint) and R.fraction_field() is self:
from sage.rings.padics.qadic_flint_FP import pAdicCoercion_FP_frac_field
return pAdicCoercion_FP_frac_field(R, self)
return super(UnramifiedExtensionFieldFloatingPoint, self)._coerce_map_from_(R)
class EisensteinExtensionRingCappedRelative(EisensteinExtensionGeneric, pAdicCappedRelativeRingGeneric):
"""
TESTS::
sage: R = Zp(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='NTL'):
"""
A capped relative representation of an eisenstein extension of Zp.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in a `p`-adic ring.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R = Zp(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f); W #indirect doctest
3-adic Eisenstein Extension Ring in w defined by x^3 + 9*x - 3
sage: W.precision_cap()
30000
sage: R.<p> = Zp(next_prime(10^30), 3, print_pos=False); S.<x> = ZZ[]; f = x^3 + p^2*x - p
sage: W.<w> = R.ext(f); W.prime()
1000000000000000000000000000057
sage: W.precision_cap()
9
"""
unram_prec = (prec + poly.degree() - 1) // poly.degree()
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**unram_prec)
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], shift_seed.base_ring().prime()**unram_prec)
if unram_prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), unram_prec, unram_prec, prec, False, ntl_poly, "small", "e", shift_poly)
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, unram_prec, prec, False, ntl_poly, "big", "e", shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXCRElement)
class EisensteinExtensionFieldCappedRelative(EisensteinExtensionGeneric, pAdicCappedRelativeFieldGeneric):
"""
TESTS::
sage: R = Qp(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='NTL'):
"""
A capped relative representation of an eisenstein extension of Qp.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with rational coefficients, for example,
while ``poly`` has coefficients in a `p`-adic field.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R = Qp(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f); W #indirect doctest
3-adic Eisenstein Extension Field in w defined by x^3 + 9*x - 3
sage: W.precision_cap()
30000
sage: R.<p> = Qp(next_prime(10^30), 3, print_pos=False); S.<x> = ZZ[]; f = x^3 + p^2*x - p
sage: W.<w> = R.ext(f); W.prime()
1000000000000000000000000000057
sage: W.precision_cap()
9
"""
# Currently doesn't support polynomials with non-integral coefficients
unram_prec = (prec + poly.degree() - 1) // poly.degree()
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**unram_prec)
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], shift_seed.base_ring().prime()**unram_prec)
if unram_prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), unram_prec, unram_prec, prec, True, ntl_poly, "small", "e", shift_poly)
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, unram_prec, prec, True, ntl_poly, "big", "e", shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXCRElement)
class EisensteinExtensionRingCappedAbsolute(EisensteinExtensionGeneric, pAdicCappedAbsoluteRingGeneric):
"""
TESTS::
sage: R = ZpCA(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation):
"""
A capped absolute representation of an eisenstein extension of Zp.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in a `p`-adic ring.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R = ZpCA(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f); W
3-adic Eisenstein Extension Ring in w defined by x^3 + 9*x - 3
sage: W.precision_cap()
30000
sage: R.<p> = ZpCA(next_prime(10^30), 3, print_pos=False); S.<x> = ZZ[]; f = x^3 + p^2*x - p
sage: W.<w> = R.ext(f); W.prime()
1000000000000000000000000000057
sage: W.precision_cap()
9
"""
unram_prec = (prec + poly.degree() - 1) // poly.degree()
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**unram_prec)
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], shift_seed.base_ring().prime()**unram_prec)
if unram_prec <= 30:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), unram_prec, unram_prec, prec, False, ntl_poly, "small", "e", shift_poly)
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, unram_prec, prec, False, ntl_poly, "big", "e", shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXCAElement)
class EisensteinExtensionRingFixedMod(EisensteinExtensionGeneric, pAdicFixedModRingGeneric):
"""
TESTS::
sage: R = ZpFM(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f)
sage: TestSuite(R).run(skip='_test_log',max_runs=4)
"""
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='NTL'):
"""
A fixed modulus representation of an eisenstein extension of Zp.
INPUT:
- ``exact_modulus`` -- the original polynomial defining the extension.
This could be a polynomial with integer coefficients, for example,
while ``poly`` has coefficients in a `p`-adic ring.
- ``poly`` -- the polynomial with coefficients in :meth:`base_ring`
defining this extension
- ``prec`` -- the precision cap of this ring
- ``print_mode`` -- a dictionary of print options
- ``shift_seed`` -- unused
- ``names`` -- a 4-tuple, ``(variable_name, residue_name, unramified_subextension_variable_name, uniformizer_name)``
EXAMPLES::
sage: R = ZpFM(3, 10000, print_pos=False); S.<x> = ZZ[]; f = x^3 + 9*x - 3
sage: W.<w> = R.ext(f); W #indirect doctest
3-adic Eisenstein Extension Ring in w defined by x^3 + 9*x - 3
sage: W.precision_cap()
30000
sage: R.<p> = ZpFM(next_prime(10^30), 3, print_pos=False); S.<x> = ZZ[]; f = x^3 + p^2*x - p
sage: W.<w> = R.ext(f); W.prime()
1000000000000000000000000000057
sage: W.precision_cap()
9
"""
unram_prec = (prec + poly.degree() - 1) // poly.degree()
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], poly.base_ring().prime()**unram_prec)
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], shift_seed.base_ring().prime()**unram_prec)
#print poly.base_ring().prime(), prec, poly.degree(), ntl_poly
# deal with prec not a multiple of e better.
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), max(min(unram_prec - 1, 30), 1), unram_prec, prec, False, ntl_poly, "FM", "e", shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXFMElement)
def fraction_field(self):
"""
Eisenstein extensions with fixed modulus do not support fraction fields.
EXAMPLES::
sage: S.<x> = ZZ[]
sage: R.<a> = ZpFM(5).extension(x^2 - 5)
sage: R.fraction_field()
Traceback (most recent call last):
...
TypeError: This implementation of the p-adic ring does not support fields of fractions.
"""
raise TypeError("This implementation of the p-adic ring does not support fields of fractions.")
#def coerce_map_explicit(self, S):
# from sage.rings.padics.morphism import Morphism_ZZ_EisFM, Morphism_ZpFM_EisFM
# if S is ZZ:
# return Morphism_ZZ_EisFM(self)
# elif isinstance(S, pAdicRingFixedMod) and S.prime() == self.prime():
# return Morphism_ZpFM_EisFM(S, self)
# return None
| StarcoderdataPython |
1618228 | <filename>test/integration/ggrc/converters/test_import_assessment_templates.py
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member
"""Test Assessment Template import."""
from collections import OrderedDict
from ggrc import models
from ggrc.converters import errors
from ggrc.utils import errors as common_errors
from integration.ggrc import TestCase
from integration.ggrc.models import factories
class TestAssessmentTemplatesImport(TestCase):
"""Assessment Template import tests."""
def setUp(self):
"""Set up for Assessment Template test cases."""
super(TestAssessmentTemplatesImport, self).setUp()
self.client.get("/login")
def test_valid_import(self):
"""Test valid import."""
response = self.import_file("assessment_template_no_warnings.csv")
expected_messages = {
"Assessment Template": {
"rows": 4,
"updated": 0,
"created": 4,
}
}
self._check_csv_response(response, expected_messages)
people = {p.email: p.id for p in models.Person.query.all()}
template = models.AssessmentTemplate.query \
.filter(models.AssessmentTemplate.slug == "T-2") \
.first()
self.assertEqual(
template.default_people["verifiers"],
[people["<EMAIL>"], people["<EMAIL>"]],
)
def test_modify_over_import(self):
"""Test import modifies Assessment Template and does not fail."""
self.import_file("assessment_template_no_warnings.csv")
slug = "T-1"
response = self.import_data(OrderedDict([
("object_type", "Assessment_Template"),
("Code*", slug),
("Audit*", "Audit"),
("Title", "Title"),
("Object Under Assessment", 'Control'),
]))
template = models.AssessmentTemplate.query \
.filter(models.AssessmentTemplate.slug == slug) \
.first()
self._check_csv_response(response, {})
self.assertEqual(template.default_people['verifiers'], 'Auditors')
self.assertEqual(template.default_people['assignees'], 'Admin')
def test_modify_persons_over_import(self):
"""Test import modifies Assessment Template and does not fail."""
self.import_file("assessment_template_no_warnings.csv")
slug = "T-1"
response = self.import_data(OrderedDict([
("object_type", "Assessment_Template"),
("Code*", slug),
("Audit*", "Audit"),
("Title", "Title"),
("Object Under Assessment", 'Control'),
("Default Verifiers", "Secondary Contacts")
]))
template = models.AssessmentTemplate.query \
.filter(models.AssessmentTemplate.slug == slug) \
.first()
self._check_csv_response(response, {})
self.assertEqual(template.default_people['verifiers'],
"Secondary Contacts")
def test_invalid_import(self):
"""Test invalid import."""
data = "assessment_template_with_warnings_and_errors.csv"
response = self.import_file(data, safe=False)
expected_messages = {
"Assessment Template": {
"rows": 5,
"updated": 0,
"created": 4,
"row_warnings": {
errors.UNKNOWN_USER_WARNING.format(
line=12,
column_name="Default Verifiers",
email="<EMAIL>",
),
errors.UNKNOWN_USER_WARNING.format(
line=12,
column_name="Default Verifiers",
email="<EMAIL>"
),
},
"row_errors": {
errors.ERROR_TEMPLATE.format(
line=15,
message=common_errors.DUPLICATE_RESERVED_NAME.format(
attr_name="ASSESSMENT PROCEDURE"
),
)
},
}
}
self._check_csv_response(response, expected_messages)
def test_duplicated_gcad_import(self):
"""Test import of LCAD with same name as GCAD."""
cad_title = "Test GCA"
with factories.single_commit():
factories.CustomAttributeDefinitionFactory(
definition_type="assessment",
title=cad_title,
)
audit = factories.AuditFactory()
response = self.import_data(OrderedDict([
("object_type", "Assessment_Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees", "<EMAIL>"),
("Default Verifiers", "<EMAIL>"),
("Title", "Title"),
("Object Under Assessment", "Control"),
("Custom Attributes", "Text, {}".format(cad_title)),
]))
expected_messages = {
"Assessment Template": {
"rows": 1,
"updated": 0,
"created": 0,
"row_warnings": set(),
"row_errors": {
errors.ERROR_TEMPLATE.format(
line=3,
message=common_errors.DUPLICATE_GCAD_NAME.format(
attr_name=cad_title
),
)
},
}
}
self._check_csv_response(response, expected_messages)
def test_duplicated_acr_import(self):
"""Test import of LCAD with same name as GCAD."""
acr_name = "Test ACR"
with factories.single_commit():
factories.AccessControlRoleFactory(
object_type="Assessment",
name=acr_name,
)
audit = factories.AuditFactory()
response = self.import_data(OrderedDict([
("object_type", "Assessment_Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees", "<EMAIL>"),
("Default Verifiers", "<EMAIL>"),
("Title", "Title"),
("Object Under Assessment", "Control"),
("Custom Attributes", "Text, {}".format(acr_name)),
]))
expected_messages = {
"Assessment Template": {
"rows": 1,
"updated": 0,
"created": 0,
"row_warnings": set(),
"row_errors": {
errors.ERROR_TEMPLATE.format(
line=3,
message=common_errors.DUPLICATE_CUSTOM_ROLE.format(
role_name=acr_name
),
)
},
}
}
self._check_csv_response(response, expected_messages)
| StarcoderdataPython |
3219319 | <reponame>Minkov/python-oop-2021-02<filename>interators_generators/iterators.py
ll = [1, 2, 3, 4, 5]
for x in ll:
print(x)
print([[x + 1] for x in ll])
ll_iter_1 = iter(ll)
ll_iter_2 = iter(ll)
print(ll_iter_1)
# print(f'Iter1: {next(ll_iter_1)}')
# print(f'Iter1: {next(ll_iter_1)}')
# print(f'Iter1: {next(ll_iter_1)}')
# print(f'Iter1: {next(ll_iter_1)}')
# print(f'Iter2: {next(ll_iter_2)}')
#
# print(f'Iter1: {next(ll_iter_1)}')
while True:
try:
print(next(ll_iter_1))
except StopIteration:
break
| StarcoderdataPython |
1799010 | <reponame>SilvaneiMartins/api_user_python_flask
from flask.json import JSONEncoder
class USERS(object):
new_id = 1
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
self.id = USERS.new_id
USERS.new_id += 1
class USERSEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, USERS):
return obj.__dict__
return super(
USERSEncoder, self
).default(obj)
| StarcoderdataPython |
3350010 | #!/usr/bin/env python3
import argparse as arp
import os
import sys
##### PARSING COMMAND LINE ARGUMENTS #####
prs = arp.ArgumentParser()
prs.add_argument('sp_data_path', type = str, help = 'path of spatial data')
prs.add_argument('cuda_device', type = str, help = "index of cuda device ID or cpu")
prs.add_argument('-m', '--model_path', default = "model/", type = str, help = 'path to regression model')
prs.add_argument('-o','--out_dir', default = os.getcwd() ,
type = str, help = 'model and proportions output directory')
prs.add_argument('-e', '--epochs', default=2500, type = int, help = "number of epochs to fit the model")
args = prs.parse_args()
cuda_device = args.cuda_device
sp_data_path = args.sp_data_path
output_folder = args.out_dir
assert (cuda_device.isdigit() or cuda_device == "cpu"), "invalid device input"
print("Parameters\n==========")
print("Training epochs: {}".format(args.epochs))
print("==========")
##### MAIN PART #####
if cuda_device.isdigit():
os.environ["CUDA_VISIBLE_DEVICES"]=cuda_device
import scanpy as sc
import numpy as np
from scvi.model import CondSCVI, DestVI
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.use('Agg')
# silence scanpy that prints a lot of warnings
import warnings
warnings.filterwarnings('ignore')
print("Reading in spatial data from " + sp_data_path + "...")
st_adata = sc.read_h5ad(sp_data_path)
st_adata.layers["counts"] = st_adata.X.copy()
print("Reading in the sc model...")
sc_model = CondSCVI.load(args.model_path)
if st_adata.shape[1] != sc_model.adata.shape[1]:
print("The number of genes do not match. Subsetting spatial data...")
st_adata = st_adata[:, st_adata.var_names.isin(sc_model.adata.var_names)].copy()
# Prepare anndata
print("Setting up spatial model...")
DestVI.setup_anndata(st_adata, layer="counts")
# Set up model
st_model = DestVI.from_rna_model(st_adata, sc_model)
st_model.view_anndata_setup()
st_model.train(max_epochs=args.epochs)
# Save training figure
st_model.history["elbo_train"].iloc[5:].plot()
plt.savefig("train.png")
# Export proportion file
st_model.get_proportions().to_csv(os.path.join(output_folder, 'proportions.tsv'), sep="\t")
| StarcoderdataPython |
3289045 | from auto_yolo import envs
readme = "Testing air variational autoencoder with math."
distributions = None
durations = dict()
n_digits = 2
largest_digit = n_digits * 9
n_classes = largest_digit + 1
config = dict(
n_train=16000, min_digits=n_digits, max_digits=n_digits,
max_time_steps=n_digits, run_all_time_steps=True,
largest_digit=largest_digit, n_classes=n_classes)
envs.run_experiment(
"test_math", config, readme, alg="air_2stage_math",
task="arithmetic", durations=durations, distributions=distributions
)
| StarcoderdataPython |
3212936 | """Handles for Broden-like datasets.
The original Broad and Densely Labeled Dataset (Broden_) was introduced in
[Bau2017]_ as a combination of several existing semantic segmentation and
classification datasets on overlapping image sets.
For more details on Broden and its encoding see :py:class:`BrodenHandle`.
.. note::
The original Broden dataset is not required for usage of this code.
Used datasets just must use a format as is used by the Broden dataset.
.. _Broden:
https://github.com/CSAILVision/NetDissect-Lite/blob/master/script/dlbroden.sh
.. [Bau2017] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. 2017.
“Network Dissection: Quantifying Interpretability of Deep Visual
Representations.”
In Proc. 2017 IEEE Conf. Comput. Vision and Pattern Recognition, 3319–3327.
Honolulu, HI, USA: IEEE Computer Society.
https://doi.org/10.1109/CVPR.2017.354.
"""
# Copyright (c) 2020 Continental Automotive GmbH
import os
from typing import NamedTuple, Optional, Dict, Tuple, List, Sequence, Union, \
Set, Any, Callable
import PIL.Image
import numpy as np
import pandas as pd
import torch
import torchvision as tv
from tqdm import tqdm
from .. import transforms as trafo
from ..base import BaseDataset
class BrodenLabel(NamedTuple):
"""Information needed to load the annotation of a Broden label."""
name: str
"""The (unique) name of the label in the annotations."""
number: int
"""The label ID."""
category: str
"""The category from which to select samples for the label"""
class BrodenHandle(BaseDataset):
"""Handle to collect a sub-dataset of a dataset following Broden format.
.. note::
The original Broden dataset is not required for usage of this handle.
Used datasets just must use a format as is used by the Broden dataset.
In the following, the format specifics relevant for the datasets that
can be handled are explained, using the original Broden Dataset as
role model. *(No code from the original datasets was used.)*
**About the Original Broden Dataset**
The Broden dataset is the broad and densely labeled dataset initially
prepared for the paper
`Network Dissection <http://arxiv.org/abs/1704.05796>`_.
It is a combination of the following datasets:
- `ADE (scene, object, part)
<https://groups.csail.mit.edu/vision/datasets/ADE20K/>`_
- `Pascal-Context (object)
<https://cs.stanford.edu/~roozbeh/pascal-context/>`_
- `Pascal-Part (part)
<http://roozbehm.info/pascal-parts/pascal-parts.html>`_
- `OpenSurfaces (material)
<http://opensurfaces.cs.cornell.edu/>`_
- `DTD (texture)
<https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_
- and a generated color dataset, with 11 human selected colors
The original Broden data features both
pixel-level semantic segmentation annotations
(for categories see :py:attr:`SEG_CATS`), and
image-level classification annotations
(for categories see :py:attr:`CLS_CATS`).
The :py:attr:`annotations` attribute stores the raw annotation information
as :py:class:`pandas.DataFrame` as it is loaded from the index file
(see :py:attr:`INDEX_CSV_FILE`) within the
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`.
For the format of the annotations see :py:attr:`annotations` directly.
.. note::
To create sub-sets, one can also provide the annotations information
on init.
**Default Output Format**
The :py:meth:`~hybrid_learning.datasets.base.BaseDataset.getitem` method
yields tuples of input image and a dictionary ``{label_name: annotation}``
containing the annotations for all specified labels.
For the exact output format of the annotations have a look at the
:py:meth:`getitem` doc.
By default, for classification, the annotation is ``bool``, and for
segmentation, it is a :py:class:`numpy.ndarray` binary mask for the
label. If the label information is missing for the selected item,
``None`` is returned instead.
This output is transformed by
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` before
yielding it as output of
:py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`.
.. note::
- To collect a single custom label/merged annotations from the Broden
dataset, refer to the :py:meth:`custom_label` builder.
- To modify the internal annotations table after init, use
:py:meth:`prune` or directly modify :py:attr:`annotations`.
"""
CAT_SEP = ">>"
"""Separator string if the category is specified for a label.
Then the format is ``"{label}{sep}{category}"``."""
LABEL_CSV_FILE: str = "label.csv"
"""Path to the file containing meta-information about the labels, relative
to a dataset root.
For details on the encoding see :py:meth:`label_info_for`."""
INDEX_CSV_FILE: str = "index.csv"
"""Path to the file containing the annotation information, relative to a
dataset root.
For the encoding see the documentation of this class."""
IMAGES_ROOT: str = "images"
"""Root directory for annotated image files.
Relative to the
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`.
Annotations can be found in :py:attr:`INDEX_CSV_FILE`.
"""
SEG_CATS = ('object', 'part', 'color', 'material')
"""Categories that provide segmentation data."""
CLS_CATS = ('scene', 'texture')
"""Categories that provide classification data."""
def __init__(self,
labels: Sequence[BrodenLabel],
dataset_root: str,
annotations: pd.DataFrame = None,
prune_na: bool = True, prune_na_rule: str = 'all',
broden_split: Optional[str] = None,
max_num_samples: Optional[int] = None,
shuffle: bool = False,
**dataset_args):
"""Init.
For further arguments see the details in :py:meth:`standard_prune`.
.. warning::
Currently, no labels with duplicate names are allowed.
Therefore, a label may only occur for one category.
:param labels: list of labels to collect for each sample.
:param dataset_root: the path to the root directory holding the
annotation files and the images/ directory with the images and
segmentations
:param annotations: optional initializer for :py:attr:`annotations`,
which is by default loaded from :py:const:`INDEX_CSV_FILE`;
use to create sub-sets
:param dataset_args: arguments to
:py:class:`~hybrid_learning.datasets.base.BaseDataset`.
"""
if annotations is not None and len(annotations) <= 0:
raise ValueError("Empty annotations!")
if len(labels) == 0:
raise ValueError("Empty labels!")
self._default_transforms = self.datum_to_tens
"""The default transformation will return tensors."""
super(BrodenHandle, self).__init__(dataset_root=dataset_root,
**dataset_args)
self.annotations: pd.DataFrame = annotations \
if annotations is not None \
else self.load_annotations_table(self.dataset_root)
"""The actual annotation (meta-)information.
The columns used here are described below.
.. rubric:: Preliminary Remarks
- All file-paths are relative to
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`
``/images``.
- Several files or class labels may be given, separated by semi-colon.
- A mask for a category is an RGB-image encoding segmentation masks for
all different labels of that category.
For the encoding see :py:meth:`process_seg_mask`.
- An annotation may have labels in different categories
(i.e. entries in these category columns). If annotation information
for a category is missing, this column is ``None``.
.. rubric:: The Columns
The following columns are used here:
- *image*: The file-path to the original image file of this annotation
- *split*: The dataset split for which this annotation was used
(``train`` or ``val``)
- category columns:
- *color*: color mask file-path
- *object*: object mask file-path (semantic object segmentation)
- *part*: part mask file-path (same as object masks, only parts
belong to a super-object)
- *material*: material mask file-path
- *scene*: label number of the depicted scene
- *texture*: texture label numbers
"""
if len(self) == 0:
raise RuntimeError("Loaded annotations information is empty!")
label_infos: pd.DataFrame = pd.read_csv(
os.path.join(self.dataset_root, self.LABEL_CSV_FILE))
self.labels: List[BrodenLabel] = \
[self.parse_label(label_spec, label_infos)
for label_spec in labels]
"""The labels to load the values for in each line of the Broden
annotations."""
# Check for duplicate labels:
for label in self.labels:
duplicates: List[BrodenLabel] = [lab for lab in self.labels if
lab.name == label.name]
if self.labels.count(label) > 1:
raise ValueError(
"Duplicate label names for labels {}".format(duplicates))
# Prune annotations
self.standard_prune(max_num_samples=max_num_samples, prune_na=prune_na,
prune_na_rule=prune_na_rule,
broden_split=broden_split, shuffle=shuffle)
def standard_prune(self, max_num_samples: Optional[int] = None,
prune_na: bool = True, prune_na_rule: str = 'all',
broden_split: Optional[str] = None,
shuffle: bool = False) -> 'BrodenHandle':
"""Apply the specified standard pruning operations.
Pruning is applied to the :py:attr:`annotations` table.
:param prune_na: whether to prune all entries (rows) from the
:py:attr:`annotations` table in which ``'all'`` or ``'any'`` of
the covered label categories are ``NaN`` (see also ``prune_rule``)
:param prune_na_rule: if ``prune_na`` is ``True``, rule by which to
select candidates for pruning:
- ``'all'``: all categories occurring in the specified labels must
be ``NaN``
- ``'any'``: any must be ``NaN``
:param broden_split: the original dataset had a fix split into
training and validation data; choose the corresponding original
split (see also :py:attr:`annotations`, where the split
meta-information is stored in)
:param max_num_samples: the maximum number of samples to select;
if set to ``None``, no restriction is applied
:param shuffle: whether to shuffle the dataset (before restricting to
``max_num_samples``)
:return: self
"""
# region Value checks
if broden_split is not None and broden_split not in ('train', 'val'):
raise ValueError(("broden_split must be one of ('train', 'val'), "
"but was: {}").format(broden_split))
if prune_na and prune_na_rule not in ('all', 'any'):
raise ValueError(("prune_na_rule must be one of ('all', 'any'), "
"but was {}").format(prune_na_rule))
# endregion
# Prune NaN values
if prune_na:
na_selector = \
self.annotations[{la.category for la in self.labels}].isna()
if prune_na_rule == 'all':
na_selector = na_selector.all(axis=1)
else:
na_selector = na_selector.any(axis=1)
self.annotations: pd.DataFrame = self.annotations.loc[~na_selector]
# Restrict to the selected split
if broden_split is not None:
self.annotations = \
self.annotations.loc[self.annotations['split'] == broden_split]
# Restrict to the selected number of samples (and shuffle)
if max_num_samples is None or max_num_samples <= 0 or \
max_num_samples > len(self.annotations):
max_num_samples = len(self.annotations)
if shuffle:
self.annotations = self.annotations.sample(n=max_num_samples
).reset_index(drop=True)
self.annotations = self.annotations.iloc[:max_num_samples]
# Final sanity check
if len(self) == 0:
raise RuntimeError("Annotations information is now empty after "
"standard pruning!")
return self
@classmethod
def load_annotations_table(cls, dataset_root: str,
index_file: str = None) -> pd.DataFrame:
"""Load the annotation information from the ``index_file``
under ``dataset_root``.
For simplicity of parsing, all category and the ``"image"`` column
are parsed to string.
:param dataset_root: the root directory under which to find the
index file
:param index_file: the file name / relative path under ``dataset_root``
of the index CSV file to load the annotations from;
defaults to :py:attr:`INDEX_CSV_FILE`
:return: annotations table with correct types of the category columns
"""
index_file = index_file or cls.INDEX_CSV_FILE
return pd.read_csv(os.path.join(dataset_root, index_file),
dtype={col: str for col in
[*cls.CLS_CATS, *cls.SEG_CATS, "image"]})
def parse_label(self, label_spec: Union[str, BrodenLabel],
label_infos: pd.DataFrame,
) -> BrodenLabel:
"""Given a label specifier, parse it to a :py:class:`BrodenLabel` given
``label_infos``.
:param label_spec: the label specifier to turn into a
:py:class:`BrodenLabel`
:param label_infos: the meta-information about all Broden labels;
contains the information about available labels
:return: the :py:class:`BrodenLabel` instance with information of
the ``label_spec``
"""
# Already in correct format:
if isinstance(label_spec, BrodenLabel):
return label_spec
category: Optional[str] = None
# region collect category information from label_spec if available
if self.CAT_SEP not in label_spec:
label_name: str = label_spec
elif label_spec.split(self.CAT_SEP) == 2:
label_name, category = label_spec.split(self.CAT_SEP)
else:
raise ValueError(
("Wrong label format of label specifier {}: expected exactly 1 "
"occurrence of {}").format(label_spec, self.CAT_SEP))
# endregion
# select category
label_info: pd.Series = self.label_info_for(label_name, label_infos)
categories: Dict[str, int] = self._to_cat_info(label_info['category'])
category: str = category or list(categories.keys())[0]
# region validate category
if category not in categories:
raise ValueError(("Category {} not available for labels {}; "
"choose one of {}"
).format(category, self.labels, categories))
if category not in [*self.SEG_CATS, *self.CLS_CATS]:
raise ValueError("Label {} has invalid category {}; allowed: {}"
.format(label_spec, category,
[*self.SEG_CATS, *self.CLS_CATS]))
if category not in self.annotations.columns:
raise ValueError(("Category {} of label {} not available in "
"annotations; found cols: {}"
).format(category, label_spec,
self.annotations.columns))
# endregion
return BrodenLabel(name=label_name, number=label_info.number,
category=category)
@staticmethod
def label_info_for(label_name: str, label_infos: pd.DataFrame) -> pd.Series:
"""Obtain information for label given by name from label information.
A label may have samples in different categories.
The output features the following information (compare Broden README):
:number: the label ID (used for annotation in the segmentation masks)
:name: the trivial unique name
:category:
the categories the labels have samples in, specified as
semi-colon separated list of entries in
``{'color', 'object', 'material', 'part', 'scene', 'texture'}``,
each entry followed by the total amount of samples for the label
for that category;
use :py:meth:`_to_cat_info` to process those
:frequency: total number of images having that label over all categories
:coverage: the mean(?) pixels per image
:syns: synonyms
:param label_name: the name of the label
:param label_infos: the meta-information on all Broden labels as can
by default be loaded from :py:const:`LABEL_CSV_FILE`.
:returns: :py:class:`pandas.Series` with above fields filled
:raises: :py:exc:`ValueError` if the label is not unique or cannot
be found
"""
label_info = label_infos[label_infos['name'] == label_name]
if len(label_info) < 1:
raise ValueError("Label {} not found".format(label_name))
if len(label_info) > 1:
raise ValueError("Label {} ambiguous: {} occurrences"
.format(label_name, len(label_info)))
label_info = label_info.iloc[0]
return label_info
@staticmethod
def _to_cat_info(cat_info_str: str):
"""Transform category info str of cat1(freq1);cat2(freq2);... to a dict.
:meta public:
"""
cats_freq: List[Tuple[str, ...]] = [tuple(cf.split('(')) for cf in
cat_info_str.split(';')]
for cat_freq in (cf for cf in cats_freq if not len(cf) == 2):
raise ValueError(("Unknown format for category: {} (full category"
"info: {})").format('('.join(cat_freq),
cat_info_str))
return {c: f.rstrip(')') for c, f in cats_freq}
def __len__(self):
return len(self.annotations)
def getitem(self, i: int) -> Tuple[PIL.Image.Image,
Dict[str, Union[bool, np.ndarray]]]:
"""Provide tuple of input image and dictionary with annotations for
all labels. (See :py:attr:`labels`).
Used for
:py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`.
The output format is a tuple of
``(input_image, {label_name: annotation})``.
The return type is as follows:
The input image is an RGB image as :py:class:`~PIL.Image.Image`;
For the annotations dictionary holds:
- Each label from :py:attr:`labels` is considered, and the annotation
for a label is
- for classification: a ``bool`` value
- for segmentation: a binary mask as :py:class:`numpy.ndarray`
- In case the label is not available, its value in the annotations dict
is ``None``.
is a tuple of the input :py:class:`~PIL.Image.Image` and the
annotations dict.
:return: tuple of input image and annotations dict
"""
img: PIL.Image.Image = PIL.Image.open(self.image_filepath(i))
anns: Dict[str, Union[bool, np.ndarray]] = self.load_anns(i)
return img, anns
def load_anns(self, i: int) -> Dict[str, Union[bool, np.ndarray]]:
"""Load all annotation information for row ``i``.
Information is retrieved from :py:attr:`annotations`.
For details on the output format see :py:meth:`load_ann`."""
loaded_rgb_masks = {}
raw_ann_row: pd.Series = self.annotations.iloc[i]
anns: Dict[str, Union[bool, np.ndarray]] = {
label.name: self.load_ann(label, raw_ann_row=raw_ann_row,
loaded_rgb_masks=loaded_rgb_masks)
for label in self.labels
}
return anns
@staticmethod
def datum_to_tens(img: PIL.Image.Image, anns: Dict[bool, np.ndarray]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""This transformation will convert an output tuple of image, label dict
to a tensor. For the input format see :py:meth:`getitem`.
Any ``None`` entries in the annotations dictionary will remain ``None``.
"""
img_t = tv.transforms.ToTensor()(img)
# pylint: disable=no-member
anns_t = {k: (torch.as_tensor(a, dtype=torch.float)
if a is not None else None) for k, a in anns.items()}
# pylint: enable=no-member
return img_t, anns_t
def image_filepath(self, i: int) -> str:
"""Get the path to the image file for row ``i``.
Information is retrieved from :py:attr:`annotations`."""
return os.path.join(self.dataset_root, self.IMAGES_ROOT,
self.annotations.iloc[i]['image'])
def load_ann(self, label: BrodenLabel, i: Optional[int] = None,
raw_ann_row: pd.Series = None,
loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = None
) -> Optional[Union[bool, np.ndarray]]:
"""Load the annotation information for ``label`` at row ``i``.
Information is retrieved from :py:attr:`annotations`.
If the annotation information is missing for the given label category,
return ``None``.
.. note::
If ``loaded_rgb_masks`` is given, this function has the side effect
of updating this dict with newly loaded masks!
This is used to speed up loading of several labels from the same
mask.
:param label: the label to restrict the annotation to
:param i: the index of the row in the annotations information
:py:attr:`annotations` which holds the information for this
single annotation of interest
:param raw_ann_row: optionally directly hand over the row of interest
instead of providing its index (see ``i``)
:param loaded_rgb_masks: RGB segmentation masks loaded so far
(for speed-up); gets updated with any newly loaded masks
:return: One of
- ``None`` if category information is missing,
- the binary segmentation mask for the label in case of a
segmentation category,
- the boolean truth value whether the label holds for the image in
case of a classification category
"""
if i is None and raw_ann_row is None:
raise ValueError("Either index i or the annotation row raw_ann_row"
" must be given but both were None")
if loaded_rgb_masks is None:
loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = {}
if raw_ann_row is None:
raw_ann_row: pd.Series = self.annotations.iloc[i]
raw_ann: Union[str, float] = raw_ann_row[label.category]
# Missing annotation: return None
if pd.isnull(raw_ann):
return None
raw_anns: List[str] = raw_ann.split(';')
# raw_anns is list of file paths:
if label.category in self.SEG_CATS:
# RGB masks with label information encoded in red and green channel
if label.category not in loaded_rgb_masks:
# Update loaded mask list with newly loaded mask
loaded_rgb_masks[label.category] = [
PIL.Image.open(
os.path.join(self.dataset_root, self.IMAGES_ROOT, fp))
for fp in raw_anns]
ann = self.process_seg_mask(label, loaded_rgb_masks[label.category])
return ann
if label.category in self.CLS_CATS:
# raw_anns is list of classification label numbers
return str(label.number) in raw_anns
raise ValueError("Unknown category for label {}; known ones: {}"
.format(label, [*self.SEG_CATS, *self.CLS_CATS]))
def process_seg_mask(self, label: BrodenLabel,
rgb_masks: List[PIL.Image.Image]) -> np.ndarray:
"""Collect the binary segmentation mask for ``label`` from given
relative file paths.
Pixels belonging to the given ``label`` are 1, others 0.
:param label: the label to look for
(:py:attr:`~BrodenLabel.number` needed)
:param rgb_masks: a list of RGB masks with label information encoded in
red and green channel; for details on encoding see
:py:meth:`to_seg_mask`
:return: binary segmentation mask for ``label`` merged from the
segmentation masks at given file paths
:raises: :py:exc:`ValueError` for invalid label category
"""
if len(rgb_masks) == 0:
raise ValueError("Empty relative file path list rel_fp!")
# Convert to binary masks only for self.label:
masks_np = [self.to_seg_mask(ext_mask, label_num=label.number)
for ext_mask in rgb_masks]
# Add up masks
return (np.sum(masks_np, axis=0) > 0) \
if len(masks_np) > 1 else masks_np[0]
@staticmethod
def to_seg_mask(seg: PIL.Image.Image, label_num: int) -> np.ndarray:
"""Given a Broden RGB segmentation, reduce it to a binary mask for
``label_num``.
Broden segmentations are saved as RGB images, where the the label
number of a pixel is
``(256 * green + red)`` with ``red`` the red channel value of the pixel,
and ``green`` its green channel value.
A label number of ``0`` means background.
The label number is the ``'number'`` field from
:py:attr:`label_info_for` respectively the
:py:attr:`BrodenLabel.number` attribute.
One can either specify a single label number as ``int``, or an iterable
of label numbers.
:param seg: the original RGB segmentation mask encoded as described
above
:param label_num: the label number to restrict the mask to
:return: union of binary segmentation masks for given label numbers
"""
# noinspection PyTypeChecker
seg_np = np.array(seg)
red, green = seg_np[..., 0], seg_np[..., 1]
binary_seg_np = (256 * green + red) == label_num
return binary_seg_np
def prune(self, condition: Callable[[Tuple[Any, Any]], bool],
by_target: bool = False,
show_progress_bar: bool = False) -> 'BrodenHandle':
"""Prune all items that fulfill ``condition`` from this dataset.
For this, :py:attr:`annotations` is modified accordingly.
:param condition: callable that accepts the output of
:py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`
and returns a ``bool`` stating whether this item is to be pruned
:param show_progress_bar: whether to show a progress bar while
collecting the selector for ``condition``
:param by_target: only load the target annotations of each item
(the
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms`
are applied with dummy input) and apply ``condition`` to the target;
asserts that transforms yields a tuple of ``(input, target)``;
this is useful to avoid the costly loading of input images if they
do not contribute to the transformations or the ``condition``.
:return: this instance (with modified :py:attr:`annotations`)
"""
selector: np.ndarray = self._selector_for(
condition,
show_progress_bar=show_progress_bar,
by_target=by_target)
self.annotations = self.annotations[~selector]
return self
def balance(self, condition: Callable[[Tuple[Any, Any]], bool],
proportion: float = 0.5,
by_target: bool = False,
show_progress_bar: bool = False) -> 'BrodenHandle':
"""Restrict this dataset to a subset with an exact ``proportion``
fulfilling ``condition``.
For this, :py:attr:`annotations` is modified accordingly.
After splitting the dataset by ``condition``, the half which is too
large to fulfill ``proportion`` is reduced by random sub-sampling,
determining the final size of the dataset.
If there is only one class in the dataset, only shuffling is applied.
:param condition: callable that accepts the output of
:py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`
and returns a ``bool`` stating whether this item belongs to the
first split
:param proportion: the aimed-for proportion of the first split on
the final dataset
:param show_progress_bar: whether to show a progress bar while
collecting the selector for ``condition``
:param by_target: only load the target annotations of each item
(the
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms`
are applied with dummy input) and apply ``condition`` to the target;
asserts that transforms yields a tuple of ``(input, target)``;
this is useful to avoid the costly loading of input images if they
do not contribute to the transformations or the ``condition``.
:return: self
"""
selector: np.ndarray = self._selector_for(
condition,
by_target=by_target,
show_progress_bar=show_progress_bar)
# Reduce positives
pos: pd.DataFrame = self.annotations.loc[selector]
if len(pos) / len(self.annotations) > proportion:
to_reduce: pd.DataFrame = pos
to_keep: pd.DataFrame = self.annotations.loc[~selector]
prop_to_keep: float = 1 - proportion
# Reduce negatives
else:
to_reduce: pd.DataFrame = self.annotations.loc[~selector]
to_keep: pd.DataFrame = pos
prop_to_keep: float = proportion
# Is there only one class in the dataset?
if np.allclose(prop_to_keep, 0):
return self.shuffle()
# Calc the final amounts of samples for each slice
num_to_keep: int = len(to_keep)
num_all: int = int(num_to_keep / prop_to_keep)
num_to_reduce: int = max(1, num_all - num_to_keep)
# Subsample, shuffle:
self.annotations: pd.DataFrame = pd.concat(
[to_reduce.sample(n=num_to_reduce),
to_keep.sample(n=num_to_keep)],
ignore_index=True)
self.shuffle()
return self
def _selector_for(self, condition: Callable[[Tuple[Any, Any]], bool],
show_progress_bar: bool = False,
by_target: bool = False) -> np.ndarray:
"""Provide ``bool`` list matching indices of this dataset for which
``condition`` holds.
Optionally show a progress bar while processing the data.
:param by_target: only load target
(transforms is applied with dummy input) and apply
condition to target; asserts that transforms yields a tuple of
``(input, target)``
"""
if by_target:
dummy_img: PIL.Image.Image = PIL.Image.open(self.image_filepath(0))
load_fn: Callable[[int], Any] = \
(lambda i: self.transforms(dummy_img, self.load_anns(i))[1])
else:
load_fn: Callable[[int], Any] = lambda i: self[i]
selector: List[bool] = []
iterator = range(len(self))
if show_progress_bar:
iterator = tqdm(iterator,
desc="Iterating " + self.__class__.__name__)
for i in iterator:
selector.append(condition(load_fn(i)))
return np.array(selector, dtype=bool)
def shuffle(self) -> 'BrodenHandle':
"""Shuffle the held annotations and return self."""
self.annotations = self.annotations.sample(frac=1
).reset_index(drop=True)
return self
@classmethod
def custom_label(cls, dataset_root: str, label: str,
prune_empty: Union[bool, str] = True,
balance_pos_to: Optional[float] = None,
verbose: bool = False,
**init_args):
# pylint: disable=line-too-long
"""Return a :py:class:`BrodenHandle` instance with output restricted to
single ``label``.
The transformations in
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` will be chosen
such that :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`
outputs a tuple of ``(input_image, annotation)`` where
- ``input_image`` is encoded as :py:class:`torch.Tensor`
- ``annotation`` is a :py:class:`torch.Tensor` holding either the
binary mask for the specified label or the bool classification value.
The label may either be a label as would be specified in
:py:class:`__init__ <BrodenHandle>` or a string representation of a
:py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation.
:param dataset_root: the ``dataset_root`` parameter for init of the
:py:class:`BrodenHandle`
:param label: the label to restrict to; may either be a valid string
label name, a valid
:py:class:`BrodenLabel`, or a valid string representation of a
:py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation
the
:py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge.all_in_keys`
of which are all valid string label names;
:param init_args: further init arguments to the :py:class:`BrodenHandle`
:param balance_pos_to: if a value given, balance the resulting
:py:class:`BrodenHandle` instance such that the proportion of the
``True`` entries is this value;
only use for classification examples
:param prune_empty: whether to prune empty entries
(``None`` values and empty masks) using :py:meth:`prune`
:param verbose: show progress bars
:return: :py:class:`BrodenHandle` instance for ``dataset_root`` with
:py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` and
:py:class:`~BrodenHandle.labels`
selected such that the output of :py:meth:`getitem` is
transformed to the format specified above
"""
# pylint: enable=line-too-long
# region Value checks
if "labels" in init_args:
raise ValueError(("init_args must not contain labels key, "
"but were {}").format(init_args))
# endregion
merge_op: Optional[trafo.Merge] = None # Merge op before flatten
# region Parse the label (and collect Merge operation if necessary):
# collect: labels, merge_op, final_key (=the final key to which to
# restrict the dict)
if isinstance(label, BrodenLabel):
labels: List[BrodenLabel] = [label]
final_key: str = label.name
elif isinstance(label, trafo.Merge):
merge_op: trafo.Merge = label
labels: Set[str] = merge_op.all_in_keys
final_key: str = merge_op.out_key
elif isinstance(label, str):
# Can be parsed to merge operation?
parsed_label: Union[str, trafo.Merge] = trafo.Merge.parse(label)
if isinstance(parsed_label, str):
labels: List[str] = [label]
final_key: str = label
else:
merge_op: trafo.Merge = parsed_label
labels: Set[str] = merge_op.all_in_keys
final_key: str = merge_op.out_key
else:
raise ValueError("label {} has unknown format {}"
.format(label, type(label)))
assert final_key != ""
assert len(labels) > 0
# endregion
# region Collect the transformation
trafos: List[trafo.TupleTransforms] = []
trafos += [trafo.OnTarget(merge_op),
trafo.OnTarget(trafo.RestrictDict([final_key]))] \
if merge_op else []
trafos += [cls.datum_to_tens,
trafo.OnTarget(trafo.FlattenDict(final_key))]
user_defined_trafo = init_args.pop('transforms', None)
# endregion
broden_inst = cls(dataset_root=dataset_root, labels=labels, **init_args)
# specify separately for IDE type inference:
broden_inst.transforms = trafo.Compose(trafos)
if prune_empty:
broden_inst.prune(
lambda a: a is None or (a.dim() > 0 and a.sum() == 0),
by_target=True, show_progress_bar=verbose)
if balance_pos_to is not None:
broden_inst.balance(lambda a: a, proportion=balance_pos_to,
by_target=True, show_progress_bar=verbose)
# Append the user-defined transforms
# (after pruning, since this requires control over the output format!)
if user_defined_trafo is not None:
broden_inst.transforms.append(user_defined_trafo)
return broden_inst
| StarcoderdataPython |
19460 | <reponame>ceshine/pytorch-helper-bot
""" Finetuning BERT using DeepSpeed's ZeRO-Offload
"""
import json
import dataclasses
from pathlib import Path
from functools import partial
import nlp
import torch
import typer
import deepspeed
import numpy as np
from transformers import BertTokenizerFast
from transformers import BertForSequenceClassification
from sklearn.model_selection import train_test_split
from pytorch_helper_bot import (
DeepSpeedBot, MovingAverageStatsTrackerCallback, CheckpointCallback,
LearningRateSchedulerCallback, MultiStageScheduler, Top1Accuracy,
LinearLR, CosineAnnealingScheduler
)
CACHE_DIR = Path("cache/")
CACHE_DIR.mkdir(exist_ok=True)
APP = typer.Typer()
class SST2Dataset(torch.utils.data.Dataset):
def __init__(self, entries_dict):
super().__init__()
self.entries_dict = entries_dict
def __len__(self):
return len(self.entries_dict["label"])
def __getitem__(self, idx):
return (
self.entries_dict["input_ids"][idx],
self.entries_dict["attention_mask"][idx],
self.entries_dict["token_type_ids"][idx],
self.entries_dict["label"][idx]
)
@dataclasses.dataclass
class SST2Bot(DeepSpeedBot):
log_dir = CACHE_DIR / "logs"
def __post_init__(self):
super().__post_init__()
self.loss_format = "%.6f"
@staticmethod
def extract_prediction(output):
return output[0]
class Object(object):
pass
def convert_to_features(tokenizer, example_batch):
# Tokenize contexts and questions (as pairs of inputs)
encodings = tokenizer.batch_encode_plus(
example_batch['sentence'], padding='max_length', max_length=64, truncation=True)
return encodings
@APP.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def main(arch="bert-base-uncased", config="gpu.json"):
# Reference:
#
# * https://github.com/huggingface/nlp/blob/master/notebooks/Overview.ipynb
with open(config) as fin:
config_params = json.load(fin)
dataset = nlp.load_dataset('glue', "sst2")
print(set([x['label'] for x in dataset["train"]]))
tokenizer = BertTokenizerFast.from_pretrained(arch)
# Format our dataset to outputs torch.Tensor to train a pytorch model
columns = ['input_ids', 'token_type_ids', 'attention_mask', "label"]
for subset in ("train", "validation"):
dataset[subset] = dataset[subset].map(
partial(convert_to_features, tokenizer), batched=True)
dataset[subset].set_format(type='torch', columns=columns)
print(tokenizer.decode(dataset['train'][6]["input_ids"].numpy()))
print(dataset['train'][0]["attention_mask"])
valid_idx, test_idx = train_test_split(
list(range(len(dataset["validation"]))), test_size=0.5, random_state=42)
train_dict = {
"input_ids": dataset['train']["input_ids"],
"attention_mask": dataset['train']["attention_mask"],
"token_type_ids": dataset['train']["token_type_ids"],
"label": dataset['train']["label"]
}
valid_dict = {
"input_ids": dataset['validation']["input_ids"][valid_idx],
"attention_mask": dataset['validation']["attention_mask"][valid_idx],
"token_type_ids": dataset['validation']["token_type_ids"][valid_idx],
"label": dataset['validation']["label"][valid_idx]
}
test_dict = {
"input_ids": dataset['validation']["input_ids"][test_idx],
"attention_mask": dataset['validation']["attention_mask"][test_idx],
"token_type_ids": dataset['validation']["token_type_ids"][test_idx],
"label": dataset['validation']["label"][test_idx]
}
# Instantiate a PyTorch Dataloader around our dataset
train_loader = torch.utils.data.DataLoader(
SST2Dataset(train_dict), batch_size=config_params["train_batch_size"], shuffle=True)
valid_loader = torch.utils.data.DataLoader(
SST2Dataset(valid_dict), batch_size=config_params["train_batch_size"], drop_last=False)
test_loader = torch.utils.data.DataLoader(
SST2Dataset(test_dict), batch_size=config_params["train_batch_size"], drop_last=False)
model = BertForSequenceClassification.from_pretrained(arch)
# torch.nn.init.kaiming_normal_(model.classifier.weight)
# torch.nn.init.constant_(model.classifier.bias, 0)
# torch.nn.init.kaiming_normal_(model.bert.pooler.dense.weight)
# torch.nn.init.constant_(model.bert.pooler.dense.bias, 0);
args = Object()
setattr(args, "local_rank", 0)
setattr(args, "deepspeed_config", config)
if config[:3] == "cpu":
if "optimizer" in config_params:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
)
else:
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model.parameters(), lr=2e-5)
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters(),
optimizer=optimizer
)
else:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
# optimizer=optimizer
)
total_steps = len(train_loader) * 3
# checkpoints = CheckpointCallback(
# keep_n_checkpoints=1,
# checkpoint_dir=CACHE_DIR / "model_cache/",
# monitor_metric="accuracy"
# )
lr_durations = [
int(total_steps*0.2),
int(np.ceil(total_steps*0.8))
]
break_points = [0] + list(np.cumsum(lr_durations))[:-1]
callbacks = [
MovingAverageStatsTrackerCallback(
avg_window=len(train_loader) // 8,
log_interval=len(train_loader) // 10
),
LearningRateSchedulerCallback(
MultiStageScheduler(
[
LinearLR(optimizer, 0.01, lr_durations[0]),
CosineAnnealingScheduler(optimizer, lr_durations[1])
],
start_at_epochs=break_points
)
),
# checkpoints
]
bot = SST2Bot(
model=model,
train_loader=train_loader,
valid_loader=valid_loader,
clip_grad=10.,
optimizer=optimizer, echo=True,
criterion=torch.nn.CrossEntropyLoss(),
callbacks=callbacks,
pbar=False,
use_tensorboard=False,
# use_amp=APEX_AVAILABLE,
metrics=(Top1Accuracy(),)
)
print(total_steps)
bot.train(
total_steps=total_steps,
checkpoint_interval=len(train_loader) // 2
)
# bot.load_model(checkpoints.best_performers[0][1])
# checkpoints.remove_checkpoints(keep=0)
# TARGET_DIR = CACHE_DIR / "sst2_bert_uncased"
# TARGET_DIR.mkdir(exist_ok=True)
# bot.model.save_pretrained(TARGET_DIR)
bot.eval(valid_loader)
bot.eval(test_loader)
if __name__ == "__main__":
APP()
| StarcoderdataPython |
1778542 | #!/usr/bin/env python
# Author: <NAME>
# License: Open Source based, so open source distribution.
import sys
import pdfkit
from barcode import Code128
from barcode.writer import ImageWriter
BARCODE_OPTIONS = {
'font_size': 12,
'text_distance': 2.0
}
PDF_OPTIONS = {
'page-size': 'A4',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
}
def main():
if len(sys.argv) >= 2:
ns = str(sys.argv[1])
# Generación del código de barras
with open('imgs/barcode.png', 'wb') as f:
Code128(ns, writer=ImageWriter()).write(f, options=BARCODE_OPTIONS)
# Generación el pdf desde el template
pdfkit.from_file('template.html', "etiquetas/" + ns + ".pdf", options=PDF_OPTIONS)
if __name__ == '__main__':
main()
| StarcoderdataPython |
93330 | <reponame>mindsolve/pySymProxy
import logging
import logging.config
import logging.handlers
import json
import os
def findConfigFile(candidates):
for location in candidates:
if os.path.isfile(location):
return location
return candidates[-1]
def findConfigValue(rootDict, name, required = False, default = None):
curElement = rootDict
elements = name.split(".")
for element in elements:
curElement = curElement.get(element)
if (curElement == None):
break
if (curElement == None):
if (required):
raise Exception("Configuration value missing: " + name)
curElement = default
return curElement
class Config:
def __init__(self, configFile):
# Load configuration information
self._configFile = configFile
with open(configFile) as data_file:
self._configData = json.load(data_file)
logging.config.dictConfig(self.loggingConfig())
def configFile(self):
return self._configFile
def name(self):
return self.findConfigValue("identity.name")
def host(self):
return self.findConfigValue("identity.host")
def administrator(self):
return self.findConfigValue("identity.administrator")
def sympath(self):
return self.findConfigValue("identity.default_sympath")
def servers(self):
return self.findConfigValue("servers")
def cacheLocation(self):
return self.findConfigValue("general.cacheLocation")
def blacklist(self):
return self.findConfigValue("general.blacklist")
def loggingConfig(self):
return self.findConfigValue("logging", required=False, default={})
def extractLogFiles(self, logger, logfiles):
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
logfiles.append(handler.baseFilename)
if isinstance(handler, logging.handlers.RotatingFileHandler):
for x in range(0, handler.backupCount):
logfiles.append(handler.baseFilename + "." + str(x))
def logfiles(self):
logfiles = []
for loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
self.extractLogFiles(logger, logfiles)
self.extractLogFiles(logger.root, logfiles)
logfiles = list(set(logfiles))
logfiles = [f for f in logfiles if os.path.exists(f)]
logfiles.sort()
return logfiles
def findConfigValue(self, name, required=True, default=None):
return findConfigValue(self._configData, name, required, default)
| StarcoderdataPython |
102430 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('frontend.urls')), # home page react
path('', include('leads.urls')), # leads page api
path('', include('accounts.urls')), # accounts page api
]
| StarcoderdataPython |
1683761 | import os
import nbformat
from textwrap import dedent
from .preprocessor import Preprocessor
from ..utils import (
is_grade, is_solution, is_description,
get_task_info, get_valid_name, get_points)
class AddTaskHeader(Preprocessor):
def get_header(self, idx, points):
header = nbformat.v4.new_markdown_cell()
header.metadata['nbgrader'] = {
'grade_id': 'taskheader_{}'.format(idx),
'locked': True,
'solution': False,
'grade': False,
'task': False,
'schema_version': 3
}
header.source = dedent("""
---
# Task {}
**[{} Point(s)]**
""".format(idx, points))
return header
def get_sub_header(self, idx, sub_idx, points):
header = nbformat.v4.new_markdown_cell()
header.metadata['nbgrader'] = {
'grade_id': 'taskheader_{}_{}'.format(idx, sub_idx),
'locked': True,
'solution': False,
'grade': False,
'task': False,
'schema_version': 3
}
header.source = dedent("""
## Task {}.{}
**[{} Point(s)]**
""".format(idx, sub_idx, points))
return header
def add_headers(self, nb, idx):
total_points = sum([get_points(cell) for cell in nb.cells])
task = get_task_info(nb)
if len(task['subtasks']) < 1:
return nb
new_cells = []
header = self.get_header(idx, total_points)
new_cells.append(header)
if 'header' in task:
new_cells.append(nb.cells[task['header']])
if len(task['subtasks']) == 1:
new_cells.extend([nb.cells[i] for i in task['subtasks'][0]])
if 'other' in task:
new_cells.extend([nb.cells[i] for i in task['other']])
nb.cells = new_cells
return nb
if len(task['subtasks']) > 1:
for sub_idx, subtask in enumerate(task['subtasks']):
points = sum([get_points(nb.cells[i]) for i in subtask])
new_cells.append(self.get_sub_header(idx, sub_idx+1, points))
new_cells.extend([nb.cells[i] for i in subtask])
if 'other' in task:
new_cells.extend([nb.cells[i] for i in task['other']])
nb.cells = new_cells
return nb
def preprocess(self, resources):
if not resources['exercise_options']['task-headers']:
return
idx = 0
for task in resources['tasks']:
task_path = os.path.join(
resources['tmp_dir'],
'tasks',
task
)
notebooks = [file for file in os.listdir(task_path) \
if file.endswith('.ipynb')]
for nb_file in notebooks:
idx += 1
task_nb = nbformat.read(os.path.join(task_path, nb_file),
as_version=4)
task_nb = self.add_headers(task_nb, idx)
nbformat.write(task_nb, os.path.join(task_path, nb_file)) | StarcoderdataPython |
1614329 | <gh_stars>0
from numba.core import dispatcher, compiler
from numba.core.registry import cpu_target, dispatcher_registry
import numba_dppy.config as dppy_config
class DpplOffloadDispatcher(dispatcher.Dispatcher):
targetdescr = cpu_target
def __init__(self, py_func, locals={}, targetoptions={}, impl_kind='direct', pipeline_class=compiler.Compiler):
if dppy_config.dppy_present:
from numba_dppy.compiler import DPPLCompiler
targetoptions['parallel'] = True
dispatcher.Dispatcher.__init__(self, py_func, locals=locals,
targetoptions=targetoptions, impl_kind=impl_kind, pipeline_class=DPPLCompiler)
else:
print("---------------------------------------------------------------------")
print("WARNING : DPPL pipeline ignored. Ensure OpenCL drivers are installed.")
print("---------------------------------------------------------------------")
dispatcher.Dispatcher.__init__(self, py_func, locals=locals,
targetoptions=targetoptions, impl_kind=impl_kind, pipeline_class=pipeline_class)
dispatcher_registry['__dppl_offload_gpu__'] = DpplOffloadDispatcher
dispatcher_registry['__dppl_offload_cpu__'] = DpplOffloadDispatcher
| StarcoderdataPython |
3299489 | <reponame>jerinka/Redis_OpenCV
import redis
import cv2
import numpy as np
import time
import io
import uuid
r = redis.StrictRedis.from_url('redis://')
img_path ="redis.png"
uid = str(uuid.uuid1())
img1 = cv2.imread(img_path, 1)
retval, buffer = cv2.imencode('.png', img1,[cv2.IMWRITE_PNG_COMPRESSION, 0])
img1_bytes = np.array(buffer).tostring()
# Write into redis server
r.set(uid, img1_bytes)
# Reading Redis
img1_bytes_ = r.get(uid)
# Decoding CV2+Redis
decoded = cv2.imdecode(np.frombuffer(img1_bytes_, np.uint8), 1)
cv2.imwrite("cv2_redis.png", decoded)
| StarcoderdataPython |
149830 | <gh_stars>1-10
import os
import numpy as np
from taskinit import tb
def cpxx2yy(tb_in=[]):
if not tb_in:
print('tb_in not provided. Abort...')
if type(tb_in) is str:
tb_in = [tb_in]
tb.open(tb_in[0] + '/SPECTRAL_WINDOW', nomodify=False)
nspw = tb.nrows()
tb.close()
for ctb in tb_in:
tb.open(ctb, nomodify=False)
for s in range(nspw):
subt = tb.query("DATA_DESC_ID==" + str(s))
model_d = subt.getcol('MODEL_DATA')
# cp xx to yy
model_d[1] = model_d[0]
subt.putcol('MODEL_DATA', model_d)
subt.close()
tb.close()
def concat(tb_in=[], tb_out=None):
if not tb_in:
print('tb_in not provided. Abort...')
if os.path.exists(tb_out):
os.system('rm -r ' + tb_out)
# os.system('cp -r '+tb_in[0]+' '+tb_out)
os.system('cp -r ' + tb_in[0] + ' ' + tb_out)
tb.open(tb_out + '/SPECTRAL_WINDOW', nomodify=True)
nspw = tb.nrows()
tb.close()
tim = []
fld = []
spw = []
ant1 = []
ant2 = []
intv = []
scan = []
obid = []
cpar = []
para = []
flag = []
snr = []
# wght=[]
for ctb in tb_in:
tb.open(ctb, nomodify=True)
cols = tb.colnames()
tim0 = tb.getcol(cols[0])
if len(tim0) == 0:
continue
else:
tim.append(tb.getcol(cols[0]))
fld.append(tb.getcol(cols[1]))
spw.append(tb.getcol(cols[2]))
ant1.append(tb.getcol(cols[3]))
ant2.append(tb.getcol(cols[4]))
intv.append(tb.getcol(cols[5]))
scan.append(tb.getcol(cols[6]))
obid.append(tb.getcol(cols[7]))
cpar.append(tb.getcol(cols[8]))
para.append(tb.getcol(cols[9]))
flag.append(tb.getcol(cols[10]))
snr.append(tb.getcol(cols[11]))
# wght.append(tb.getcol(cols[12]))
tb.close()
if len(tim) == 0:
print('tables have no data. Return')
return -1
else:
tim = np.concatenate(tim)
fld = np.concatenate(fld)
spw = np.concatenate(spw)
ant1 = np.concatenate(ant1)
ant2 = np.concatenate(ant2)
intv = np.concatenate(intv)
scan = np.concatenate(scan)
obid = np.concatenate(obid)
cpar = np.concatenate(cpar, axis=2)
para = np.concatenate(para, axis=2)
flag = np.concatenate(flag, axis=2)
snr = np.concatenate(snr, axis=2)
# wght=np.concatenate(wght)
tb.open(tb_out, nomodify=False)
nrows = tb.nrows()
nrows_new = len(tim)
tb.addrows(nrows_new - nrows)
tb.putcol(cols[0], tim)
tb.putcol(cols[1], fld)
tb.putcol(cols[2], spw)
tb.putcol(cols[3], ant1)
tb.putcol(cols[4], ant2)
tb.putcol(cols[5], intv)
tb.putcol(cols[6], scan)
tb.putcol(cols[7], obid)
tb.putcol(cols[8], cpar)
tb.putcol(cols[9], para)
tb.putcol(cols[10], flag)
tb.putcol(cols[11], snr)
tb.close()
return tb_out
| StarcoderdataPython |
4839786 | <reponame>mrTavas/owasp-fstm-auto
import sys
import logging
import os
import json
import binascii
from pwn import *
import random
# Unicorn imports
# require unicorn moudle
from unicorn import *
from unicorn.arm_const import *
from unicorn.arm64_const import *
from unicorn.x86_const import *
from unicorn.mips_const import *
# custom module import
from hook.hook_loader import *
from fuzz.fuzz_loader import *
from crash.crash_loader import *
# Name of the index file
CONTEXT_JSON = "_index.json"
UNICORN_PAGE_SIZE = 0x1000
MAX_ALLOWABLE_SEG_SIZE = 1024 * 1024 * 1024
ALIGN_PAGE_DOWN = lambda x: x & ~(UNICORN_PAGE_SIZE - 1)
ALIGN_PAGE_UP = lambda x: (x + UNICORN_PAGE_SIZE - 1) & ~(UNICORN_PAGE_SIZE-1)
LITTLE2BIG = lambda num : int( num.decode('hex')[::-1].encode('hex') , 16)
COMPILE_GCC = 1
COMPILE_MSVC = 2
BASE = 0x0400000
class Firmcorn( Uc ): # Firmcorn object inherit from Uc object
'''
Firmcorn-object is main object of our Firmcorn Framework
'''
def __init__(self ,compiler = COMPILE_GCC , enable_debug = True):
self.enable_debug = enable_debug
self.trace_start_addr = 0
self.trace_end_addr = 0
self.dbg_addr_list = []
self.skip_func_list = None
self.unresolved_funcs = None
self.fuzztarget = None
self.compiler = compiler
self.instrs = []
def load_context(self , context_dir , binary , libc):
self.context_dir = context_dir
self.elf = ELF(binary)
self.libc = ELF(libc)
self.got = self.elf.got
self.get_arch_info()
Uc.__init__(self, self.uc_arch, self.uc_mode + self.uc_endian)
def rand_seed(self , seed_len):
sa = []
for i in range(seed_len):
sa.append( chr(random.randint(0,255)))
return ''.join(sa)
def _load_context(self):
"""
load context and binary actual
"""
self.get_arch_info()
context_json = os.path.join( self.context_dir, CONTEXT_JSON)
if not os.path.isfile(context_json):
raise Exception("Contex json not found")
context_json_file = open(context_json , "r")
context = json.load(context_json_file) # load _index.json
context_json_file.close()
regs_map = self.get_regs_by_arch(self.arch)
regs = context['regs']
self.init_class()
self.get_common_regs()
# endian to uc_endian
if self.endian == "big":
self.uc_endian = UC_MODE_BIG_ENDIAN
else:
self.uc_endian = UC_MODE_LITTLE_ENDIAN
# init uc object
Uc.__init__(self, self.uc_arch, self.uc_mode + self.uc_endian)
# setup registers
if not self.set_reg(regs , regs_map):
raise Exception("Error in setup registers")
# setup segment
segments_list = context['segments'] #
if not self.set_memory(segments_list):
raise Exception("Error in setup memory")
# init got
self.init_got()
self.rebased_got()
def get_arch_info(self):
"""
get uc_arch , uc_mode , endian
"""
context_json = os.path.join( self.context_dir, CONTEXT_JSON)
if not os.path.isfile(context_json):
raise Exception("Contex json not found")
# load context from json
context_json_file = open(context_json , "r")
context = json.load(context_json_file) # load _index.json
context_json_file.close()
self.arch = context['arch']['arch']
self.endian = context['arch']['endian']
# arch to uc_arch
if self.arch == "x64":
self.uc_arch = UC_ARCH_X86
self.uc_mode = UC_MODE_64
elif self.arch == "x86":
self.uc_arch = UC_ARCH_X86
self.uc_mode = UC_MODE_32
elif self.arch == "mips":
self.uc_arch = UC_ARCH_MIPS
self.uc_mode = UC_MODE_32
elif self.arch == "arm":
self.uc_arch = UC_ARCH_ARM
self.uc_mode = UC_MODE_32
else:
raise Exception("Error arch")
# endian to uc_endian
if self.endian == "big":
self.uc_endian = UC_MODE_BIG_ENDIAN
else:
self.uc_endian = UC_MODE_LITTLE_ENDIAN
def load_library(self , libc):
self.libc = ELF(libc)
def init_got(self , enable_debug = True):
"""
read GOT table entries in memory
"""
print "=====================Init GOT Table Start========================"
print self.got.items()
self.mem_got = dict()
for name , addr in self.got.items():
_addr = str(self.mem_read(addr , self.size)).encode("hex")
if self.endian == "little":
_addr = LITTLE2BIG(_addr)
else:
_addr = int(_addr , 16)
self.mem_got.update({ _addr : name})
print "Name : {:<40} Addr : {:<10} Value: {:<10}".format( name, hex(addr) , hex(_addr))
print "======================Init GOT Table End========================="
def rebased_got(self):
"""
reload GOT table entries
"""
self.rebase_got = dict()
print "====================Rebase GOT Table Start======================="
for addr , name in self.mem_got.items():
if int(addr) & 0xff000000 != 0:
dl_resolve_addr = addr
dl_resolve_name = name
break
print self.libc
libc_base = dl_resolve_addr - self.libc.symbols[dl_resolve_name]
print "libc_base : {}".format(hex(libc_base))
for addr , name in self.mem_got.items():
if self.libc.symbols.has_key(name):
self.rebase_got.update( { name : libc_base + self.libc.symbols[name] })
print "Name : {:<40} Rebase addr : {}".format(name , hex(libc_base + self.libc.symbols[name]) )
#raw_input()
print "=====================Rebase GOT Table End========================"
def dbg_hook_code(mu, address, size, user_data):
print('>>> Tracing instruction at 0x%x, instruction size = 0x%x' %(address, size))
def debug_moudle(self , start_addr , end_addr):
"""
debug
"""
dbg_mu = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN)
dbg_mu.hook_add(UC_HOOK_CODE, self.dbg_hook_code)
dbg_mu.emu_start(start_addr )
def set_reg(self , regs, regs_map , debug_func = True ):
self.enable_debug = debug_func
# setup register
for register , value in regs.iteritems():
if self.enable_debug and value is not None:
print "Reg {0} start_address = {1}".format(register, hex(value))
pass
if not regs_map.has_key(register.lower()):
if self.enable_debug:
print "Skip Reg:{0}".format(register)
else:
# ====copy from unicorn loader.py=====
#
reg_write_retry = True
try:
self.reg_write(regs_map[register.lower()], value)
reg_write_retry = False
except Exception as e:
if self.enable_debug:
print "ERROR writing register: {}, value: {} -- {}".format(register, value, repr(e))
if reg_write_retry:
if self.enable_debug:
print "Trying to parse value ({}) as hex string".format(value)
try:
self.reg_write(regs_map[register.lower()], int(value, 16))
except Exception as e:
if self.enable_debug:
print "ERROR writing hex string register: {}, value: {} -- {}".format(register, value, repr(e))
return True
def set_memory(self , segments_list , debug_func = True ):
"""
setup memory need 2 steps
1. mu.mem_map
2. mu.mem_write
before mem_map, must check it's not already mapped
copy from __map_segments
"""
self.enable_debug = debug_func
for segment in segments_list:
seg_name = segment['name']
seg_start = segment['start']
seg_end = segment['end']
perms = \
(UC_PROT_READ if segment['permissions']['r'] == True else 0) | \
(UC_PROT_WRITE if segment['permissions']['w'] == True else 0) | \
(UC_PROT_EXEC if segment['permissions']['x'] == True else 0)
if self.enable_debug:
print "Handling segment {}".format(seg_name)
"""
before map memory , do some check
there are 3 cases:
======= 1 =======
+----------------------------+ <-----+ mem_start
| |
| +----------------------+<----+ seg_start
| | | |
| | | |
| +----------------------+<----+ seg_end
| |
+----------------------------+ <-----+ mem_end
for this case, shoud't map memory
======= 2 =======
+-----------------------------+<-----+ mem_start
| |
| |
+------------------------------<----+ seg_start
| |
| |
| |
+------------------------------<-----+ mem_end=tmp
|-----------------------------|
|--------------------------------------------->map area
|-----------------------------|
+------------------------------<----+ seg_end
======= 3 =======
+------------------------------<----+ seg_start
|-----------------------------|
|--------------------------------------------->map area
|-----------------------------|
+------------------------------<-----+ mem_start=tmp
| |
| |
| |
+------------------------------<----+ seg_end
| |
| |
| |
+-----------------------------+<-----+ mem_end
"""
found = False
overlap_start = False
overlap_end = False
tmp = 0
for (mem_start, mem_end, mem_perm) in self.mem_regions():
mem_end = mem_end + 1
if seg_start >= mem_start and seg_end < mem_end:
found = True
break
if seg_start >= mem_start and seg_start < mem_end:
overlap_start = True
tmp = mem_end
break
if seg_end >= mem_start and seg_end < mem_end:
overlap_end = True
tmp = mem_start
break
# Map memory into the address space if it is of an acceptable size.
if (seg_end - seg_start) > MAX_ALLOWABLE_SEG_SIZE:
if self.enable_debug:
print "Skipping segment (LARGER THAN {0}) from {1:016x} - {2:016x} with perm={3}: {4}".format(MAX_ALLOWABLE_SEG_SIZE, seg_start, seg_end, perms, name)
continue
elif not found: # Make sure it's not already mapped
if overlap_start: # Partial overlap (start) case 3
self.map_segment(seg_name, tmp, seg_end - tmp, perms)
elif overlap_end: # Patrial overlap (end) case 2
self.map_segment(seg_name, seg_start, tmp - seg_start, perms)
else: # Not found
self.map_segment(seg_name, seg_start, seg_end - seg_start, perms)
else:
if self.enable_debug:
print "Segment {} already mapped. Moving on.".format(seg_name)
# Load the content (*.bin)
# directly copy from unicorn_loader.py
if 'content_file' in segment and len(segment['content_file']) > 0:
content_file_path = os.path.join(self.context_dir, segment['content_file'])
if not os.path.isfile(content_file_path):
raise Exception("Unable to find segment content file. Expected it to be at {}".format(content_file_path))
if self.enable_debug:
print "Loading content for segment {} from {}".format(seg_name, segment['content_file'])
content_file = open(content_file_path, 'rb')
compressed_content = content_file.read()
content_file.close()
self.mem_write(seg_start, zlib.decompress(compressed_content))
else:
if self.enable_debug:
print("No content found for segment {0} @ {1:016x}".format(seg_name, seg_start))
self.mem_write(seg_start, '\x00' * (seg_end - seg_start))
return True
def map_segment(self , name, address, size, perms , debug_func = True ):
self.enable_debug = debug_func
map_start = address
map_end = address + size
# page alingn
map_start_align = ALIGN_PAGE_DOWN(map_start)
map_end_align = ALIGN_PAGE_UP(map_end)
if self.enable_debug:
print " segment name: {}".format(name)
print " segment start: {0:016x} -> {1:016x}".format(map_start, map_start_align)
print " segment end: {0:016x} -> {1:016x}".format(map_end, map_end_align)
if map_start_align < map_end_align:
self.mem_map(map_start_align , map_end_align - map_start_align , perms) # map memory
# pass
def func_skip(self , skip_list = None):
self.skip_func_list = skip_list
def set_trace(self , trace_start_addr , trace_end_addr , debug_func=True):
self.trace_start_addr = trace_start_addr
self.trace_end_addr = trace_end_addr
def _set_trace(self , uc , address , size , user_data):
if address >= self.trace_start_addr and address <=self.trace_end_addr:
# print('>>> Tracing instruction at 0x%x, instruction size = 0x%x' %(address, size))
print "{} ".format(hex(address)) ,
instr = self.mem_read(address, size)
# context.arch = 'i386'
context.endian = str(self.endian)
# context.os = 'linux'
# context.word_size = 32
# print ("0x%x %s" % (address - BASE , disasm(instr)) )
if self.arch == "x86":
print "{}".format( disasm(instr , arch="{}".format("i386")))
elif self.arch == "x64":
print "{}".format( disasm(instr , arch="{}".format("amd64")))
elif self.arch == "mips":
print "{}".format( disasm(instr , arch="{}".format("mips")))
elif self.arch == "arm":
print "{}".format( disasm(instr , arch="{}".format("arm")))
else:
raise Exception("arch not found")
def show_debug_info(self , dbg_addr_list):
self.dbg_addr_list = dbg_addr_list
def _show_debug_info(self, uc , address , size , user_data ):
"""
show registers and memory info when debug
"""
if address in self.dbg_addr_list:
self.show_reg_value()
self.show_memory_layout()
def show_reg_value(self):
context_json = os.path.join( self.context_dir, CONTEXT_JSON)
if not os.path.isfile(context_json):
raise Exception("Contex json not found")
# load context from json
context_json_file = open(context_json , "r")
context = json.load(context_json_file) # load _index.json
context_json_file.close()
regs_map = self.get_regs_by_arch(self.arch)
regs = context['regs']
# show registers value
print("=========================Registers Value=========================")
for register , value in regs.iteritems():
try:
print("Reg {} --> {:<51} {}".format(register.lower() ,hex(self.reg_read(regs_map[register.lower()])), "||"))
except Exception as e:
# print "ERROR writing register: {}, value: {} -- {}".format(register, value, repr(e))
pass
print("=================================================================")
def show_memory_layout(self):
print("=========================Memory Layout===========================")
# show stack memory
for i in range(6):
# reg_sp = self.reg_read(self.REG_SP , size)
#stack_addr = reg_sp + 0x14 + 4*i
# print self.size
stack_addr = 0x7fffffffd870 - 8*i
mem_cont = self.mem_read(stack_addr, self.size)
print("{} --> {:<41} {}".format( hex(stack_addr) ,str(mem_cont).encode("hex") , "||"))
print("=================================================================")
def show_instrs(self):
"""
print crash location instruction
"""
print "=========================Instructions=========================="
for instr in self.instrs[:-50:-1]:
print('>>> Tracing instruction at 0x%x, instruction size = 0x%x' %(instr, self.size))
print "==============================================================="
def log_instrs(self , uc , address , size , user_data):
self.instrs.append(address)
def add_func(self , func_list = None):
self.unresolved_funcs = func_list
def add_fuzz(self, fuzzTarget):
"""
add a fuzz targrt object
"""
self.fuzztarget = fuzzTarget
def start_find(self , start_address , end_address):
print " ______ _____ _____ __ __ _____ ____ _____ _ _ "
print " | ____|_ _| __ \| \/ |/ ____/ __ \| __ \| \ | | "
print " | |__ | | | |__) | \ / | | | | | | |__) | \| | "
print " | __| | | | _ /| |\/| | | | | | | _ /| . ` | "
print " | | _| |_| | \ \| | | | |___| |__| | | \ \| |\ | "
print " |_| |_____|_| \_\_| |_|\_____\____/|_| \_\_| \_| "
print " "
# uc_result = self.emu_start(start_address , end_address)
self.unresolved_funcs = []
rounds = 0
while True:
self._load_context()
#raw_input()
"""
some hook function
"""
#raw_input()
last_round_list_len = len(self.unresolved_funcs)
if self.skip_func_list is not None:
self.hook_add(UC_HOOK_CODE , self.hookcode._func_skip)
if self.dbg_addr_list is not None:
self.hook_add(UC_HOOK_CODE, self._show_debug_info)
if self.trace_start_addr!=0 and self.trace_end_addr!=0:
self.hook_add(UC_HOOK_CODE , self._set_trace)
if self.unresolved_funcs is not None:
self.hook_add(UC_HOOK_CODE , self.hookcode.hook_unresolved_func)
self.hook_add(UC_HOOK_CODE , self.log_instrs)
self.hook_add(UC_HOOK_CODE , self.hookcode.hookauto.record_last_func)
self.hook_add( UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED , self.hookcode.hookauto.find_unresolved_func)
try:
uc_result = self.emu_start(start_address , end_address)
except UcError as e:
print "next round"
print "Round : {}".format(rounds)
rounds += 1
print "find all unresolved funcs : {}".format(self.unresolved_funcs)
# raw_input()
# raw_input()
if len(self.unresolved_funcs) == last_round_list_len:
print self.unresolved_funcs
print "End Find!"
break
def start_run(self , start_address , end_address):
self.start_find(start_address , end_address)
print "=================End Find================="
print "start run!"
# raw_input()
rounds = 0
while True:
self._load_context()
if self.fuzztarget is not None:
self.fuzztarget.init(self)
self.hook_add(UC_HOOK_CODE , self.fuzztarget.find_magic_num)
if self.skip_func_list is not None:
self.hook_add(UC_HOOK_CODE , self.hookcode._func_skip)
if self.dbg_addr_list is not None:
self.hook_add(UC_HOOK_CODE, self._show_debug_info)
if self.trace_start_addr!=0 and self.trace_end_addr!=0:
self.hook_add(UC_HOOK_CODE , self._set_trace)
if self.got is not None:
# self.hook_add(UC_HOOK_CODE , self.hookcode.func_alt_auto_libc)
pass
if self.unresolved_funcs is not None:
self.hook_add(UC_HOOK_CODE , self.hookcode.hook_unresolved_func)
# self.hook_add( UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED , self.crash.mem_crash_check)
# self.hook_add(UC_ERR_FETCH_UNMAPPED , self.crash.crash_check_dbg)
self.hook_add(UC_HOOK_CODE , self.log_instrs)
# try:
# uc_result = self.emu_start(start_address , end_address)
# except UcError as e:
# # if e.errno == UC_ERR_READ_UNMAPPED:
# print(" \033[1;31;40m !!! about to bail due to bad fetch... here's the data at PC: {} \033[0m ".format( binascii.hexlify(self.mem_read(self.reg_read(self.REG_PC), self.size))) )
# # print(binascii.hexlify(self.mem_read(self.reg_read(self.REG_PC), self.size)))
# self.show_instrs()
import datetime
oldtime=datetime.datetime.now()
try:
uc_result = self.emu_start(start_address , end_address)
except UcError as e:
print e.errno
if e.errno == UC_ERR_FETCH_UNMAPPED:
print " \033[1;31;40m !!! Find Crash !!! \033[0m "
self.crash.crash_log()
break
newtime=datetime.datetime.now()
print "time : {}".format( (newtime-oldtime).microseconds )
print "Round : {}".format(rounds)
rounds += 1
# raw_input()
def init_class(self):
"""
import other classes
"""
self.hookcode = HookLoader(self)
self.crash = CrashLoader(self)
def get_regs_by_arch(self , arch):
if arch == "arm64le" or arch == "arm64be":
arch = "arm64"
elif arch == "armle" or arch == "armbe" or "thumb" in arch:
arch = "arm"
elif arch == "mipsel":
arch = "mips"
registers = {
"x64" : {
"rax": UC_X86_REG_RAX,
"rbx": UC_X86_REG_RBX,
"rcx": UC_X86_REG_RCX,
"rdx": UC_X86_REG_RDX,
"rsi": UC_X86_REG_RSI,
"rdi": UC_X86_REG_RDI,
"rbp": UC_X86_REG_RBP,
"rsp": UC_X86_REG_RSP,
"r8": UC_X86_REG_R8,
"r9": UC_X86_REG_R9,
"r10": UC_X86_REG_R10,
"r11": UC_X86_REG_R11,
"r12": UC_X86_REG_R12,
"r13": UC_X86_REG_R13,
"r14": UC_X86_REG_R14,
"r15": UC_X86_REG_R15,
"rip": UC_X86_REG_RIP,
"rsp": UC_X86_REG_RSP,
"efl": UC_X86_REG_EFLAGS,
"cs": UC_X86_REG_CS,
"ds": UC_X86_REG_DS,
"es": UC_X86_REG_ES,
"fs": UC_X86_REG_FS,
"gs": UC_X86_REG_GS,
"ss": UC_X86_REG_SS,
},
"x86" : {
"dil": UC_X86_REG_DIL,
"ip": UC_X86_REG_IP ,
"fs": UC_X86_REG_FS ,
"eip": UC_X86_REG_EIP,
"bh": UC_X86_REG_BH ,
"edi": UC_X86_REG_EDI,
"ah": UC_X86_REG_AH ,
"al": UC_X86_REG_AL ,
"cs": UC_X86_REG_CS ,
"cx": UC_X86_REG_CX ,
"eax": UC_X86_REG_EAX,
"di": UC_X86_REG_DI ,
"ebp": UC_X86_REG_EBP,
"edx": UC_X86_REG_EDX,
"ebx": UC_X86_REG_EBX,
"cl": UC_X86_REG_CL ,
"ecx": UC_X86_REG_ECX,
"ch": UC_X86_REG_CH ,
"bp": UC_X86_REG_BP ,
"dl": UC_X86_REG_DL ,
"esp": UC_X86_REG_ESP,
"eiz": UC_X86_REG_EIZ,
"fpsw": UC_X86_REG_FPSW,
"bpl": UC_X86_REG_BPL,
"dh": UC_X86_REG_DH ,
"gs": UC_X86_REG_GS ,
"ax": UC_X86_REG_AX ,
"eflags": UC_X86_REG_EFLAGS,
"ds": UC_X86_REG_DS ,
"es": UC_X86_REG_ES ,
"bx": UC_X86_REG_BX ,
"dx": UC_X86_REG_DX ,
"bl": UC_X86_REG_BL ,
"esi": UC_X86_REG_ESI
},
"arm" : {
"r0": UC_ARM_REG_R0,
"r1": UC_ARM_REG_R1,
"r2": UC_ARM_REG_R2,
"r3": UC_ARM_REG_R3,
"r4": UC_ARM_REG_R4,
"r5": UC_ARM_REG_R5,
"r6": UC_ARM_REG_R6,
"r7": UC_ARM_REG_R7,
"r8": UC_ARM_REG_R8,
"r9": UC_ARM_REG_R9,
"r10": UC_ARM_REG_R10,
"r11": UC_ARM_REG_R11,
"r12": UC_ARM_REG_R12,
"pc": UC_ARM_REG_PC,
"sp": UC_ARM_REG_SP,
"lr": UC_ARM_REG_LR,
"cpsr": UC_ARM_REG_CPSR
},
"arm64" : {
"x0": UC_ARM64_REG_X0,
"x1": UC_ARM64_REG_X1,
"x2": UC_ARM64_REG_X2,
"x3": UC_ARM64_REG_X3,
"x4": UC_ARM64_REG_X4,
"x5": UC_ARM64_REG_X5,
"x6": UC_ARM64_REG_X6,
"x7": UC_ARM64_REG_X7,
"x8": UC_ARM64_REG_X8,
"x9": UC_ARM64_REG_X9,
"x10": UC_ARM64_REG_X10,
"x11": UC_ARM64_REG_X11,
"x12": UC_ARM64_REG_X12,
"x13": UC_ARM64_REG_X13,
"x14": UC_ARM64_REG_X14,
"x15": UC_ARM64_REG_X15,
"x16": UC_ARM64_REG_X16,
"x17": UC_ARM64_REG_X17,
"x18": UC_ARM64_REG_X18,
"x19": UC_ARM64_REG_X19,
"x20": UC_ARM64_REG_X20,
"x21": UC_ARM64_REG_X21,
"x22": UC_ARM64_REG_X22,
"x23": UC_ARM64_REG_X23,
"x24": UC_ARM64_REG_X24,
"x25": UC_ARM64_REG_X25,
"x26": UC_ARM64_REG_X26,
"x27": UC_ARM64_REG_X27,
"x28": UC_ARM64_REG_X28,
"pc": UC_ARM64_REG_PC,
"sp": UC_ARM64_REG_SP,
"fp": UC_ARM64_REG_FP,
"lr": UC_ARM64_REG_LR,
"nzcv": UC_ARM64_REG_NZCV,
"cpsr": UC_ARM_REG_CPSR,
},
"mips" : {
"0" : UC_MIPS_REG_ZERO,
"at": UC_MIPS_REG_AT,
"v0": UC_MIPS_REG_V0,
"v1": UC_MIPS_REG_V1,
"a0": UC_MIPS_REG_A0,
"a1": UC_MIPS_REG_A1,
"a2": UC_MIPS_REG_A2,
"a3": UC_MIPS_REG_A3,
"t0": UC_MIPS_REG_T0,
"t1": UC_MIPS_REG_T1,
"t2": UC_MIPS_REG_T2,
"t3": UC_MIPS_REG_T3,
"t4": UC_MIPS_REG_T4,
"t5": UC_MIPS_REG_T5,
"t6": UC_MIPS_REG_T6,
"t7": UC_MIPS_REG_T7,
"t8": UC_MIPS_REG_T8,
"t9": UC_MIPS_REG_T9,
"s0": UC_MIPS_REG_S0,
"s1": UC_MIPS_REG_S1,
"s2": UC_MIPS_REG_S2,
"s3": UC_MIPS_REG_S3,
"s4": UC_MIPS_REG_S4,
"s5": UC_MIPS_REG_S5,
"s6": UC_MIPS_REG_S6,
"s7": UC_MIPS_REG_S7,
"s8": UC_MIPS_REG_S8,
"k0": UC_MIPS_REG_K0,
"k1": UC_MIPS_REG_K1,
"gp": UC_MIPS_REG_GP,
"pc": UC_MIPS_REG_PC,
"sp": UC_MIPS_REG_SP,
"fp": UC_MIPS_REG_FP,
"ra": UC_MIPS_REG_RA,
"hi": UC_MIPS_REG_HI,
"lo": UC_MIPS_REG_LO
}
}
return registers[arch]
def get_common_regs(self):
"""
get some common register
REG_PC: IP
REG_SP: stack pointer
REG_RA: return address (just like arm $lr and mips $ra)
REG_ARGS: args
REG_RES: return value
arch to uc_arch
"""
if self.uc_arch == UC_ARCH_X86:
if self.uc_mode == UC_MODE_16:
self.size = 2
self.pack_fmt = '<H'
self.REG_PC = UC_X86_REG_IP
self.REG_SP = UC_X86_REG_SP
self.REG_RA = 0
self.REG_RES = UC_X86_REG_AX
self.REG_ARGS = []
elif self.uc_mode == UC_MODE_32:
self.size = 4
self.pack_fmt = '<I'
self.REG_PC = UC_X86_REG_EIP
self.REG_SP = UC_X86_REG_ESP
self.REG_RA = 0
self.REG_RES = UC_X86_REG_EAX
self.REG_ARGS = []
elif self.uc_mode == UC_MODE_64:
self.size = 8
self.pack_fmt = '<Q'
self.REG_PC = UC_X86_REG_RIP
self.REG_SP = UC_X86_REG_RSP
self.REG_RA = 0
self.REG_RES = UC_X86_REG_RAX
if self.compiler == COMPILE_GCC:
self.REG_ARGS = [UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_RCX,
UC_X86_REG_R8, UC_X86_REG_R9]
# print "test"
elif self.compiler == COMPILE_MSVC:
self.REG_ARGS = [UC_X86_REG_RCX, UC_X86_REG_RDX, UC_X86_REG_R8, UC_X86_REG_R9]
elif self.uc_arch == UC_ARCH_ARM:
if self.uc_mode == UC_MODE_ARM:
self.size = 4
self.pack_fmt = '<I'
elif self.uc_mode == UC_MODE_THUMB:
self.size = 2
self.pack_fmt = '<H'
self.REG_PC = UC_ARM_REG_PC
self.REG_SP = UC_ARM_REG_SP
self.REG_RA = UC_ARM_REG_LR
self.REG_RES = UC_ARM_REG_R0
self.REG_ARGS = [UC_ARM_REG_R0, UC_ARM_REG_R1, UC_ARM_REG_R2, UC_ARM_REG_R3]
elif self.uc_arch == UC_ARCH_ARM64:
self.size = 8
self.pack_fmt = '<Q'
self.REG_PC = UC_ARM64_REG_PC
self.REG_SP = UC_ARM64_REG_SP
self.REG_RA = UC_ARM64_REG_LR
self.REG_RES = UC_ARM64_REG_X0
self.REG_ARGS = [UC_ARM64_REG_X0, UC_ARM64_REG_X1, UC_ARM64_REG_X2, UC_ARM64_REG_X3,
UC_ARM64_REG_X4, UC_ARM64_REG_X5, UC_ARM64_REG_X6, UC_ARM64_REG_X7]
elif self.uc_arch == UC_ARCH_MIPS:
self.size = 4
self.pack_fmt = "<I"
self.REG_PC = UC_MIPS_REG_PC
self.REG_SP = UC_MIPS_REG_SP
self.REG_RA = UC_MIPS_REG_RA
self.REG_RES = [UC_MIPS_REG_V0, UC_MIPS_REG_V1,UC_MIPS_REG_V1]
self.REG_ARGS = [UC_MIPS_REG_A0, UC_MIPS_REG_A1, UC_MIPS_REG_A2, UC_MIPS_REG_A3] | StarcoderdataPython |
3242012 | from django import forms
class PlaceholderForm(forms.Form):
"""
A base form for automatically adding placeholder text.
Forms that extend this form will by default have all text, password, and
date input widgets display placeholder text equal to their label. This can
be overridden per form field by simply specifying a different placeholder
attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
if type(field.widget) in (forms.TextInput, forms.PasswordInput, forms.EmailInput, forms.DateInput):
field.widget.attrs.update(
{ 'placeholder': field.widget.attrs.get('placeholder', field.label or field_name) }
)
class PlaceholderFormMixin(object):
"""
A form mixin for automatically adding placeholder text.
Forms that use this mixin will by default have all text, password, and
date input widgets display placeholder text equal to their label. This can
be overridden per form field by simply specifying a different placeholder
attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
if type(field.widget) in (forms.TextInput, forms.PasswordInput, forms.EmailInput, forms.DateInput):
field.widget.attrs.update(
{ 'placeholder': field.widget.attrs.get('placeholder', field.label or field_name) }
)
_bootstrap_formgroup = """
<div class="form-group %(css_classes)s">
<div class="input-group">
%(label)s
%(field)s%(help_text)s
</div>
</div>
"""
class BootstrapFormMixin(object):
"""
A form mixin to generate Bootstrap input-groups
"""
def as_inputgroup(self):
"Returns this form rendered as Bootstrap input-groups."
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update(
{ 'class': field.widget.attrs.get('class', '') + ' form-control' }
)
bf = self[field_name]
bf.label_tag = self.style_label_tag(bf.label_tag)
return self._html_output(
normal_row=_bootstrap_formgroup,
error_row='%s',
row_ender='</div>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def style_label_tag(self, label_tag):
def inner(contents=None, attrs=None, label_suffix=None):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' input-group-addon'
else:
attrs['class'] = 'input-group-addon'
return label_tag(contents, attrs, label_suffix)
return inner
| StarcoderdataPython |
119933 |
## bisenetv2
cfg = dict(
model_type='bisenetv2',
num_aux_heads=4,
lr_start = 5e-2,
weight_decay=5e-4,
warmup_iters = 1000,
max_iter = 150000,
im_root='./datasets/coco',
train_im_anns='./datasets/coco/train.txt',
val_im_anns='./datasets/coco/val.txt',
scales=[0.5, 1.5],
cropsize=[512, 512],
ims_per_gpu=8,
use_fp16=True,
use_sync_bn=False,
respth='./res',
)
| StarcoderdataPython |
1661238 | from django.conf.urls import url
from project import views
urlpatterns = [
url(r'^urlinfo/', views.urlinfo),
url(r'^urlinfoselect/', views.urlinfoselect),
url(r'^baseinfo/', views.baseinfo),
url(r'^getproname/', views.getproname),
url(r'^getmodelname/', views.getmodelname),
]
| StarcoderdataPython |
106245 | import argparse
import asyncio
import logging
from pathlib import Path
from dvdp.ha_433 import HA433Light
from dvdp.recorder_433 import RECORDINGS_DIR, get_recordings
from dvdp.ha_mqtt.client import MQTTClient
def main():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG,
)
parser = argparse.ArgumentParser(
'Allow Home assistant to control 433 devices over MQTT.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'brokerip',
help=f'IP of broker.',
type=str
)
parser.add_argument(
'--name',
'-n',
help=f'Name of client.',
default='devices_433',
type=str
)
parser.add_argument(
'--username',
'-u',
help='Username to use when connecting to MQTT broker.',
type=str,
)
parser.add_argument(
'--password',
'-p',
help='Password to use when connecting to MQTT broker.',
type=str,
)
parser.add_argument(
'--pin',
default=14,
help='BCM pin on rasberry pi used for transmission',
type=int,
)
parser.add_argument(
'--recordings',
'-r',
help='Directory containing recordings.',
type=Path,
default=RECORDINGS_DIR,
)
args = parser.parse_args()
broker_ip = args.brokerip
client_name = args.name
pin = args.pin
source_dir = args.recordings
recordings = get_recordings(source_dir)
username = args.username
password = args.password
mqtt_client = MQTTClient(broker_ip, client_name, username, password)
devices = [
HA433Light(name, mqtt_client, pin, source_dir)
for name in recordings.keys()
]
loop = asyncio.get_event_loop()
tasks = [
loop.create_task(device.start())
for device in devices
]
loop.run_until_complete(
asyncio.gather(
*tasks,
)
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
75043 | <filename>deprecated/db_functions.py
# This script stores common database operations
import sys
import MySQLdb
global DEBUG
DEBUG = 1
global c, conn
conn = MySQLdb.connect(host="localhost", \
user="root", \
passwd="<PASSWORD>", \
db='twins') # use correct db
c = conn.cursor()
conn.autocommit(True)
# get all package name
# global packages
def get_packages():
sql = "SELECT package FROM apps ORDER BY package"
try:
c.execute(sql)
if DEBUG == 1: print "SUCCESS !!! " + sql
except:
print "Something wrong" + sql
sys.exit(1)
packages = []
for p in c.fetchall():
packages.append(p[0])
if DEBUG == 1: print "Found " + str(len(packages)) + " packages."
return packages
def get_filenames():
sql = "SELECT filename FROM apps ORDER BY package"
try:
c.execute(sql)
if DEBUG == 1: print "SUCCESS !!! " + sql
except:
print "Something wrong" + sql
sys.exit(1)
filenames = []
for p in c.fetchall():
filenames.append(p[0])
if DEBUG == 1: print "Found " + str(len(filenames)) + " filenames."
return filenames
def package2filename():
sql = "SELECT package, filename FROM apps ORDER BY package"
try:
c.execute(sql)
if DEBUG == 1: print "SUCCESS !!! " + sql
except:
print "Something wrong" + sql
sys.exit(1)
p2f = {}
for p in c.fetchall():
p2f[p[0]]=p[1]
return p2f
def filename2package():
sql = "SELECT package, filename FROM apps ORDER BY package"
try:
c.execute(sql)
if DEBUG == 1: print "SUCCESS !!! " + sql
except:
print "Something wrong" + sql
sys.exit(1)
f2p = {}
for f in c.fetchall():
f2p[f[1]]=f[0]
return f2p
| StarcoderdataPython |
115856 | import gym
import tensorflow as tf
from rl.agent import util
class A3CModel(tf.keras.Model):
def __init__(self, env):
super().__init__()
self.action_is_continuous, self.action_size, self.action_low, self.action_high = util.parse_env(
env
)
# Policy (actor) layer
self.policy_dense1 = tf.keras.layers.Dense(32, activation="relu")
if self.action_is_continuous:
# According to A3C paper, they use a linear and softplus layer for
# continuous output to emulate mu and sigma^2 (mean and variance)
# of a normal distribution. To make things simple, I'll emulate
# standard deviation sigma instead of sigma^2. This should be
# interesting...
self.policy_output1 = tf.keras.layers.Dense(
self.action_size, activation="tanh"
)
self.policy_output2 = tf.keras.layers.Dense(
self.action_size, activation="softplus"
)
else:
self.policy_output1 = tf.keras.layers.Dense(self.action_size)
# Value layers
self.value_dense1 = tf.keras.layers.Dense(32, activation="relu")
self.value_output = tf.keras.layers.Dense(1)
def call(self, inputs):
# Forward pass on the two paths on the network
with tf.variable_scope("value_scope"):
v1 = self.value_dense1(inputs)
values = self.value_output(v1)
with tf.variable_scope("actor_scope"):
x = self.policy_dense1(inputs)
logits1 = self.policy_output1(x)
if self.action_is_continuous:
logits2 = self.policy_output2(x)
return (logits1, logits2), values
return logits1, values
| StarcoderdataPython |
1629935 | <filename>src/umassstembot/coronavirus.py
from bs4 import BeautifulSoup
import overlay as over
import dateutil.parser
import dateutil.utils
import discord
import requests
import os
import tempfile
FINNHUB_CORONA_TOKEN = os.environ.get('FINNHUB_API_TOKEN_5')
us_areas = {'AL': ['Alabama', '4903185'],
'AK': ['Alaska', '731545'],
'AZ': ['Arizona', '7278717'],
'AR': ['Arkansas', '3017804'],
'CA': ['California', '39512223'],
'CO': ['Colorado', '5758736'],
'CT': ['Connecticut', '3565278'],
'DE': ['Delaware', '973764'],
'FL': ['Florida', '21477737'],
'GA': ['Georgia', '10617423'],
'HI': ['Hawaii', '1415872'],
'ID': ['Idaho', '1787065'],
'IL': ['Illinois', '12671821'],
'IN': ['Indiana', '6732219'],
'IA': ['Iowa', '3155070'],
'KS': ['Kansas', '2913314'],
'KY': ['Kentucky', '4467673'],
'LA': ['Louisiana', '4648794'],
'ME': ['Maine', '1344212'],
'MD': ['Maryland', '6045680'],
'MA': ['Massachusetts', '6892503'],
'MI': ['Michigan', '9986857'],
'MN': ['Minnesota', '5639632'],
'MS': ['Mississippi', '2976149'],
'MO': ['Missouri', '6137428'],
'MT': ['Montana', '1068778'],
'NE': ['Nebraska', '1934408'],
'NV': ['Nevada', '3080156'],
'NH': ['New Hampshire', '1359711'],
'NJ': ['New Jersey', '8882190'],
'NM': ['New Mexico', '2096829'],
'NY': ['New York', '19453561'],
'NC': ['North Carolina', '10488084'],
'ND': ['North Dakota', '762062'],
'OH': ['Ohio', '11689100'],
'OK': ['Oklahoma', '3956971'],
'OR': ['Oregon', '4217737'],
'PA': ['Pennsylvania', '12801989'],
'RI': ['Rhode Island', '1059361'],
'SC': ['South Carolina', '5148714'],
'SD': ['South Dakota', '884659'],
'TN': ['Tennessee', '6829174'],
'TX': ['Texas', '28995881'],
'UT': ['Utah', '3205958'],
'VT': ['Vermont', '623989'],
'VA': ['Virginia', '8535519'],
'WA': ['Washington', '7614893'],
'WV': ['West Virginia', '1792147'],
'WI': ['Wisconsin', '5822434'],
'WY': ['Wyoming', '578759'],
'DC': ['District of Columbia', '705749']
}
UMASS_CASE_EPOCH = dateutil.parser.parse("2020-08-14 00:00:00")
async def coronavirus(ctx, sort_by_percentage):
"""Generate coronavirus statistics
Args:
- ctx: context that the command occured use this to access the message and other attributes
- sort_by_percentage: true when we want to sort by percentage infected, false when we want to sort by # of cases
- args: optional, if state is passed in return the states cases and deaths, if nothing then return the top 15
"""
if sort_by_percentage:
argument = ctx.message.content[7:].strip().strip('\"') # after '$covidp' remove spaces
else:
argument = ctx.message.content[6:].strip().strip('\"') # after '$covid' remove spaces
try:
data = requests.get('https://finnhub.io/api/v1/covid19/us?&token=' + FINNHUB_CORONA_TOKEN).json()
except:
print(requests.get('https://finnhub.io/api/v1/covid19/us?&token=' + FINNHUB_CORONA_TOKEN))
await ctx.channel.send(embed=discord.Embed(
description="API limit reached, please wait before running the command again.",
color=discord.Color.red()))
return
only_states_data = [block for block in data if block['state'] in get_states()]
if len(argument) < 1:
embed = discord.Embed(title='Coronavirus Statistics', color=discord.Color.teal())
i = 1
case_count = 0
pop_count = 0
death_count = 0
for state in sorted(only_states_data,
key=lambda state: state['case'] if not sort_by_percentage else (state['case']/get_pop(state['state'].strip())),
reverse=True): # iterate through the state blocks sorted by case number
case_count += state['case']
death_count += state['death']
pop_count += get_pop(state['state'].strip())
if i < 16:
state_name, cases_output, deaths_output = build_top_corona_output(state)
embed.add_field(
name = str(i) + '. ' + state_name + '\n',
value = 'Cases: ' + cases_output + 'Deaths: ' + deaths_output,
inline=True)
i += 1
embed.description = '-------= U.S Totals =-------\n' \
'Cases: {:,d} '.format(case_count) + '(' + str(round((case_count/pop_count) * 100, 4)) + '%)' + '\n' \
'Deaths: {:,d} '.format(death_count) + '(' + str(round((death_count/pop_count) * 100, 4)) + '%)'
await ctx.send(embed=embed)
else:
try:
state = us_areas[argument.upper()][0]
population = int(us_areas[argument.upper()][1])
except:
state = capitalize_all_words(argument)
if get_abbrev(state) == '':
await ctx.channel.send(embed=discord.Embed(
description="Invalid state, make sure you use the full name not the abbreviation",
color=discord.Color.red()))
return
population = get_pop(state)
description = ''
for block in only_states_data:
if str(block['state']).strip() == state:
pop_percentage = round((block['case']/population) * 100, 4)
description = 'Cases: ' + '{:,d}'.format(block['case'])
description += '\nInfected Percentage: ' + str(pop_percentage) + '%'
description += '\nDeaths: ' + '{:,d}'.format(block['death'])
description += '\nFatality Rate: ' + '{:,.2f}%'.format((block['death']/block['case']) * 100)
break
embed = discord.Embed(
title=state + ' Coronavirus Statistics',
description=description,
color=discord.Color.teal())
await ctx.send(embed=embed)
def get_states():
"""Returns a list of U.S states
"""
states = []
for abbrev, value in us_areas.items():
states.append(value[0])
return states
def get_abbrev(state):
"""Returns the abbreviation of the passed in state
Args:
- state: state in which to get the abbreviation
"""
abbrev_list = [key for key, value in us_areas.items() if state in value]
return abbrev_list[0] if len(abbrev_list) > 0 else ''
def get_pop(state):
"""Returns the population of the passed in state
Args:
- state: state in which to get the population
"""
abbrev = get_abbrev(state)
return int(us_areas[abbrev][1]) if abbrev != '' else -1
def capitalize_all_words(str):
"""Capitalizes all words in the string
Args:
- str: passed in string to be capitalized
"""
string_list = str.split()
output = ''
for string in string_list:
output += string.capitalize() + ' '
output = output[:-1]
return output
def build_top_corona_output(state):
"""Returns the outputs needed for the top coronavirus embed
Args:
- state: state in which to get the statistics
"""
pop = get_pop(state['state'].strip())
cases_perc = state['case']/pop * 100
fatal_perc = state['death']/state['case'] * 100 # calculate fatality rate
cases_output = '{:,d}'.format(state['case']) + ' ({:,.2f}%)'.format(cases_perc) + '\n' # format integers with commas
deaths_output = '{:,d}'.format(state['death']) + ' ({:,.2f}%)'.format(fatal_perc)
return state['state'], cases_output, deaths_output
async def umass_coronavirus(ctx):
"""Generate UMass-specific coronavirus statistics
Args:
- ctx: context that the command occured use this to access the message and other attributes
"""
try:
data = requests.get('https://www.umass.edu/coronavirus/confirmed-cases-covid-19-umass-amherst').text
except:
print(requests.get('https://www.umass.edu/coronavirus/confirmed-cases-covid-19-umass-amherst'))
await ctx.channel.send(embed=discord.Embed(
description="Could not access reporting page, please wait before running the command again.",
color=discord.Color.red()))
return
total_cases = 0
most_recent_report = None
most_recent_count = None
soup = BeautifulSoup(data, 'html.parser')
for accordion in soup.select(".field-group-accordion-wrapper"):
date = accordion.select(".field--name-node-title")[0].get_text().strip()
date = dateutil.parser.parse(date)
case_count = accordion.select(".field--name-field__of-reported-cases")[0] \
.select(".field__item")[0] \
.get_text()
case_count = int(case_count)
if date >= UMASS_CASE_EPOCH:
if most_recent_report is None or date > most_recent_report:
most_recent_report = date
most_recent_count = case_count
total_cases += case_count
embed = discord.Embed(title='UMass Coronavirus Statistics', color=discord.Color.teal())
embed.description = "{} cases since {}".format(total_cases, UMASS_CASE_EPOCH.strftime("%Y-%m-%d")) + \
"\n" + \
"Most recent report: {} case(s) on {}.".format(most_recent_count, most_recent_report.strftime("%Y-%m-%d"))
await ctx.send(embed=embed)
async with ctx.typing():
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, 'days_elapsed.png')
image = over.draw_outbreak_sign((dateutil.utils.today() - most_recent_report).days)
image.save(path)
await ctx.send(file=discord.File(path))
| StarcoderdataPython |
3217494 | <reponame>bogdanvuk/pygears
from pygears import alternative, TypeMatchError, gear
from pygears.typing import Union
from pygears.lib import fmap as common_fmap
from pygears.lib.mux import mux
from pygears.lib.demux import demux_ctrl
from pygears.lib.ccat import ccat
from pygears.lib.shred import shred
def unionmap_check(dtype, f, mapping):
if not issubclass(dtype, Union):
return False
try:
num_f = len(f)
except TypeError:
raise TypeMatchError(
f'Union fmap argument "f" needs to be a sequence, received {f}')
if mapping is None:
num_types = len(list(dtype.types))
else:
num_types = max(mapping.values()) + 1
if num_types != num_f:
raise TypeMatchError(
'Number of union types different from the number of fmap functions'
)
return True
@alternative(common_fmap)
@gear(enablement=b'unionmap_check(din, f, mapping)')
def unionmap(din,
*,
f,
fdemux=demux_ctrl,
fmux=mux,
balance=None,
mapping=None,
use_dflt=True):
if mapping:
fdemux = fdemux(mapping=mapping)
fmux = fmux(mapping=mapping)
demux_dout = din | fdemux
ctrl = demux_dout[0]
branches = demux_dout[1:]
dout = []
for i, fd in enumerate(f):
if fd is None:
if balance is None:
dout.append(branches[i])
else:
dout.append(branches[i] | balance)
else:
dout.append(fd(branches[i]))
if dout[-1] is None or isinstance(dout[-1], tuple):
ret = 'none' if dout[-1] is None else f'{len(dout[-1])} outputs'
raise TypeMatchError(
f'Gear "{fd}" passed to the unionmap should have a single output, but returned {ret}'
)
# Situation where there is a default branch because of mapping
if len(branches) == len(dout) + 1 and mapping is not None:
if use_dflt:
dout.append(branches[-1])
else:
branches[-1] | shred
elif len(branches) > len(dout):
raise Exception
if balance is not None:
ctrl = ctrl | balance
if len(dout) == 1:
return ccat(*dout, ctrl) | Union
else:
return fmux(ctrl, *dout)
| StarcoderdataPython |
31013 | # from django.db.models.signals import post_save
# from django.dispatch import receiver
# from onadata.apps.logger.models import XForm
#
# from onadata.apps.fsforms.models import FieldSightXF
#
#
# @receiver(post_save, sender=XForm)
# def save_to_fieldsight_form(sender, instance, **kwargs):
# FieldSightXF.objects.create(xf=instance)
| StarcoderdataPython |
1752026 | from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<object_id>\d+)/$',
view = 'basic.bookmarks.views.bookmark_detail',
name = 'bookmark_detail',
),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$',
view = 'basic.bookmarks.views.bookmark_archive_day',
name = 'bookmark_archive_day',
),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
view = 'basic.bookmarks.views.bookmark_archive_month',
name = 'bookmark_archive_month',
),
url(r'^(?P<year>\d{4})/$',
view = 'basic.bookmarks.views.bookmark_archive_year',
name = 'bookmark_archive_year',
),
url(r'^$',
view = 'basic.bookmarks.views.bookmark_index',
name = 'bookmark_index',
),
) | StarcoderdataPython |
167459 | <reponame>ufkapano/planegeometry
#!/usr/bin/python
import unittest
from fractions import Fraction
from planegeometry.structures.points import Point
from planegeometry.structures.segments import Segment
from planegeometry.algorithms.bentleyottmann2 import BentleyOttmann
class TestBentleyOttmann(unittest.TestCase):
def setUp(self):
self.segments1 = []
self.segments1.append(Segment(3, -4, 15, -13))
self.segments1.append(Segment(17, -7, 25, 6))
self.segments1.append(Segment(20, 3, 24, -2))
self.segments1.append(Segment(5, -9, 8, -4))
self.segments1.append(Segment(14, 3, 27, 10))
self.segments1.append(Segment(-16, 2, -12, -3))
self.segments1.append(Segment(-9, 5, -4, -9))
self.segments1.append(Segment(-19, -10, -10, 10))
self.segments1.append(Segment(-5, 5, -1, 7))
self.segments1.append(Segment(4, 15, 11, -3))
self.segments1.append(Segment(-3, 9, -2, 5))
self.segments1.append(Segment(2, 4, 6, 13))
self.segments2 = []
self.segments2.append(Segment(9, 11, 0, 2))
self.segments2.append(Segment(4, 0, 11, 7))
self.segments2.append(Segment(10, 2, 1, 11))
self.segments2.append(Segment(2, 6, 7, 1))
self.segments3 = []
self.segments3.append(Segment(-10, 2, -2, 5))
self.segments3.append(Segment(2, -2, 13, 4))
self.segments4 = []
self.segments4.append(Segment(-10, 10, 20, 3))
self.segments4.append(Segment(-9, 4, -2, 3))
self.segments4.append(Segment(-3, 2, 17, 7))
self.segments4.append(Segment(3, 5, 6, 2))
self.segments4.append(Segment(-6, 6, 7, 13))
self.segments4.append(Segment(-15, 7, 5, 8))
def test_run(self):
points1 = [Point(Fraction(-1808, 125), Fraction(2, 25)),
Point(Fraction(-7, 3), Fraction(19, 3)),
Point(Fraction(722, 135), Fraction(173, 15)),
Point(Fraction(187, 29), Fraction(-191, 29)),
Point(Fraction(501, 23), Fraction(71, 92))]
self.assertEqual(BentleyOttmann(self.segments1).run(), points1)
points2 = [Point(3, 5), Point(5, 7), Point(6, 2), Point(8, 4)]
self.assertEqual(BentleyOttmann(self.segments2).run(), points2)
self.assertEqual(BentleyOttmann(self.segments3).run(), [])
points4 = [Point(Fraction(-385, 127), Fraction(965, 127)),
Point(Fraction(-610, 301), Fraction(350, 43)),
Point(Fraction(-5, 17), Fraction(263, 34)),
Point(Fraction(21, 5), Fraction(19, 5)),
Point(Fraction(295, 29), Fraction(307, 58))]
self.assertEqual(BentleyOttmann(self.segments4).run(), points4)
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF
| StarcoderdataPython |
1602556 | import os
from contextlib import contextmanager
@contextmanager
def cwd(path):
old_path = os.getcwd()
os.chdir(path() if callable(path) else path)
try:
yield
finally:
os.chdir(old_path)
def in_dir(path):
def decorator(fn):
def wrapper(*args, **kwargs):
with cwd(path):
return fn(*args, **kwargs)
return wrapper
return decorator
def preserve_cwd(func=None):
def decorator(fn):
def wrapper(*args, **kwargs):
with cwd(os.getcwd()):
return fn(*args, **kwargs)
return wrapper
if callable(func):
return decorator(func)
else:
return decorator
| StarcoderdataPython |
3236717 | import random # because we'll need this ha, ha, ha
# the vocabulary could just be a single list but I've organized more-or-less
# in case I want to try a few grammaticaly rules later:
people = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'Jaybez',
'<NAME>',
'mar lady',
'mar mate',
'ar yuth',
'mayun',
'these do-gooders',
'may un mar lady',
'me Dad',
'me Mum',
'<NAME>',
'er upstairs',
'Slash from Guns \'n Roses',
]
places = [
'up Fegg Hayes',
'on Silverdale',
'up Anley duck',
'up Almerend',
'dayn Castle',
'up Albermarl reowd',
'in Fenton',
'me caravan in North Wayulz',
'Jamage Industriul Estate',
'on the A34',
'on the dee reowd',
'along the A500',
'Anley Museum',
'dayn the pub',
'dayn the club',
'dayn the tayn',
'dayn pit',
'me kayne-slice',
'dayn Westport Lake',
'up Unity House',
'up Potteries Shopping Center',
'dayn Fezzie Park',
'up at Victoria Orrl',
'up Auger\'s Bonk',
'bin up Neck End',
'over Porthill Bonk',
'over Adeley way, lark',
'Pot Banks',
'the colliery',
'Jamland',
'th\'appy-clappy',
]
put_downs = [
'are anna gorra clew',
'dooin me-yed in',
'conner stand im',
'conner stand er',
'yer wonner fight',
'oh yer startin',
'gerrin on me nerves',
'serves thee sen rayt',
'yull catch yer deather cold',
'rayt mard you are',
'yull get chin-cough',
'oh av yerd im',
'dead sad you are'
]
greetings = [
'ay up duck.',
'oow reet.',
'ay up mar mate.',
'oow at.',
'say thee!',
]
beverages = [
'a pint o Bass',
'a pint o Mayuld',
'a seowuderanlime',
'a brew',
'a birrer Kaynesul Pop',
'four tins of Steller',
]
cuisine = [
'oatcake',
'cheesey oatcake',
'bacon cheese oatcake',
'dead nice oatcakes',
'mmmmm, oatcakes',
'sausage, bacon and cheese oatcake',
'dead nice Balti tharriz',
'chayz butty',
'amunchayz butty',
'bacon bap',
'packeet o sult un vineeger crisps playz, duck.',
]
stop_words = [
'may',
'lark',
'ast',
'dunner',
'dunnerafter',
'yer woh',
'conner',
'anner',
'reet',
'onner',
'onneravvin',
'worrabite',
'bostid',
'abite',
'thee sen',
'me sen',
'a saggermaker\'s bottom knocker',
'ruddy',
'ruddy great',
'dirty great',
'fer',
'me duck',
'worrabite',
]
verbs = [
'gowin',
'avvin',
'ramblin ter me sen',
'ramblin on and on',
'gerrin',
'sez',
'put th\'binz ite',
'put ke\'ul on duck',
'graggin',
'werritin',
'shermozzlin',
'lozerkin',
'fang owd',
'purrer dayn!',
'purrim dayn!',
'get theesen',
'gowin wom',
'slopin off',
]
questions = [
'ast tha thote abite snappin?',
'ast tha got thee buz fair',
'av yer gorra neow meowbiyul',
'ast tha gorra wotch',
'ast tha got thee specs?',
'worrer yer doin neow?',
]
institutions = [
'Potteries Motor Traction',
'Scragg\'s Coaches',
'Stoke Poly',
'the Mitch',
'Stoke City',
'the Vale',
'on Radio Stoke',
'Signul',
'up the Crem',
'in the Creowun',
'deowun the Roe-Buck',
'in the Vine',
'the Snayd Arms',
'the Rigger',
'up Shelton Bar',
'in th\'Sen-nul',
'E.R.F',
'Foden\'s',
'Caudwell Communications',
'at Rists',
'<NAME>\'s Teapots',
'Spode\'s',
'Wedgewood\'s',
'the Adams family of potters',
]
random_statements = [
'cost kick a bo againt a wo an then it eet wi thee yed till eet bosses',
'ar anner gorra peowund!',
'thars anuther thray peowund up the wall',
'ar well, thars wun less day ter live',
'ahm swealterin',
'ahm frayzin ar ahm',
'conner get meeyed reowund eet ar conner',
'werretin on abite sommert',
'any reowd',
'pick up thee muskeet',
'it\'s black over Bill\'s Mother\'s',
'saves yer mauwlin with eet any reowd',
]
# the corpus is really just a list of the above lists:
corpus = [people, places, put_downs, greetings, beverages,
cuisine, stop_words, verbs, questions, institutions, random_statements]
# oh yeah, this is tricky to explain:
sentence_endings = ( '.', '. ', '?', '? ', '!', '! ')
# :
def capitalize(line):
"""Capitalize the first char in a string
string.capitalize messes up deliberate upper cases in the strings so
here's a dedicated function.
Args:
line (str): The string to capitalize
Returns:
str: The capitalized string
"""
return line[0].upper() + line[1:len(line)]
def generate_paragraph():
"""Generate a paragraph of North Staffordshire Gibberish
Args:
None
Returns:
str: A paragraph of North Staffordshire Gibberish
"""
text = ''
sentence_word_count = 0
for i in range(0, random.randint(20, 40)):
# chose a random section from the corpus
section = random.choice(corpus)
# chose a random string from the section
word = random.choice(section)
# if the paragraph so far ends with a completed sentence then
# the new selection needs to be capitalized:
if text == '' or text.endswith(sentence_endings):
word = capitalize(word)
# now it can be appeneded to the paragraph
text += word
# reset the counter if we just finished a sentence:
if text.endswith(sentence_endings):
sentence_word_count = 0
else:
# otherwise increment it:
sentence_word_count += 1
# if there are more than 5 "words" (keeping in mind a word
# can actually contain several words) then reset the counter:
if sentence_word_count > 6:
sentence_word_count = 0
# And if the sentence is complete just add a space
if text.endswith(sentence_endings):
text += ' '
else:
# otherwise give it a full-stop:
text += '. '
else:
# if the sentence isn't yet 5 words increment the counter
# and add a space
text += ' '
sentence_word_count += 1
# when the paragraph is complete make sure it has a proper punctuation
# at the end before returning:
if text.endswith(sentence_endings):
return text.strip()
else:
return text.strip() + '.'
def generate_potters():
"""Generate a list of 3 paragraphs of North Staffordshire Gibberish
Args:
None
Returns:
list(str): A list of strings, each containing a paragraph
"""
paragraphs = []
for i in range(0, 3):
paragraphs.append(generate_paragraph())
return paragraphs | StarcoderdataPython |
1604131 | <gh_stars>0
import numpy.testing as npt
from numpy import (absolute, all, arange, array, cos, linspace, log, sin)
from ..nearshockapproximator import (NearShockApproximator,
NearShockFifthOrderApproximator)
class TestNearShockApproximator:
def test__two_points_away_from_shock__should_give_fifth_order(self):
r = 2.0
powers = arange(4, 8)
n_list = r**powers + 1
error_list = []
for n in n_list:
x, dx = linspace(0, 1.2, num=n, retstep=True)
y = sin(x)
approx = NearShockApproximator(dx)
result = approx.approximate_two_points_away_from_shock(y)
desired = cos(x[-3])
error = absolute(result - desired)
error_list.append(error)
errors = array(error_list)
observed_orders = log(errors[0:-1] / errors[1:]) / log(r)
min_order = 4.90
npt.assert_(all(observed_orders >= min_order))
def test__one_point_away_from_shock__should_give_fourth_order(self):
r = 2.0
powers = arange(2, 7)
n_list = 10.0 * r**powers
error_list = []
for n in n_list:
x, dx = linspace(0, 1.2, num=n, retstep=True)
y = sin(x)
approx = NearShockApproximator(dx)
result = approx.approximate_one_point_away_from_shock(y)
desired = cos(x[-2])
error = absolute(result - desired)
error_list.append(error)
errors = array(error_list)
observed_orders = log(errors[0:-1] / errors[1:]) / log(r)
min_order = 3.90
npt.assert_(all(observed_orders >= min_order))
def test__on_shock__should_give_fifth_order(self):
r = 2.0
powers = arange(2, 6)
n_list = 10.0 * r**powers
error_list = []
for n in n_list:
x, dx = linspace(0, 1.2, num=n, retstep=True)
y = sin(x)
approx = NearShockApproximator(dx)
result = approx.approximate_on_shock(y)
desired = cos(x[-1])
error = absolute(result - desired)
error_list.append(error)
errors = array(error_list)
observed_orders = log(errors[0:-1] / errors[1:]) / log(r)
min_order = 4.95
npt.assert_(all(observed_orders >= min_order))
class TestNearShockFifthOrderApproximator:
def test__one_point_away_from_shock__should_give_fifth_order(self):
r = 2.0
powers = arange(0, 5)
n_list = 10.0 * r**powers
error_list = []
for n in n_list:
x, dx = linspace(0, 1.2, num=n, retstep=True)
y = sin(x)
approx = NearShockFifthOrderApproximator(dx)
result = approx.approximate_one_point_away_from_shock(y)
desired = cos(x[-2])
error = absolute(result - desired)
error_list.append(error)
errors = array(error_list)
observed_orders = log(errors[0:-1] / errors[1:]) / log(r)
min_order = 5.0
npt.assert_(all(observed_orders >= min_order))
| StarcoderdataPython |
3342864 | # This file is Copyright 2009, 2010 <NAME>.
#
# This file is part of the Python-on-a-Chip program.
# Python-on-a-Chip is free software: you can redistribute it and/or modify
# it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1.
#
# Python-on-a-Chip is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# A copy of the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1
# is seen in the file COPYING in this directory.
## @file
# @copybrief sizeof
## @package sizeof
# @brief Provides PyMite's sizeof module.
#
# <b>USAGE</b>
#
# \code sizeof.sizeof(obj) \endcode
#
# Prints the size of the given object. If obj is an integer from 0..31,
# the size of the object type represented by that integer will be returned.
"""__NATIVE__
#include "pm.h"
"""
def sizeof(obj):
"""__NATIVE__
pPmObj_t pobj;
pPmObj_t psize;
int32_t n;
PmReturn_t retval = PM_RET_OK;
int32_t static size[] = {
sizeof(PmObj_t), /* None type */
sizeof(PmInt_t),
sizeof(PmFloat_t),
sizeof(PmString_t),
sizeof(PmTuple_t),
sizeof(PmCo_t),
sizeof(PmFunc_t), /* Module Obj uses func struct */
sizeof(PmClass_t),
sizeof(PmFunc_t),
sizeof(PmClass_t), /* Class instance */
0, /* CIM */
0, /* NIM */
sizeof(PmCo_t), /* NOB */
sizeof(PmThread_t),
sizeof(PmClass_t), /* Exception instance */
sizeof(PmBoolean_t),
sizeof(PmCodeImgObj_t),
sizeof(PmList_t),
sizeof(PmDict_t),
0,
0,
0,
0,
0,
0,
sizeof(PmFrame_t),
sizeof(PmBlock_t),
sizeof(Segment_t),
sizeof(Seglist_t),
sizeof(PmSeqIter_t),
sizeof(PmNativeFrame_t),
};
/* If wrong number of args, raise TypeError */
if (NATIVE_GET_NUM_ARGS() != 1)
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pobj = NATIVE_GET_LOCAL(0);
if (OBJ_GET_TYPE(pobj) == OBJ_TYPE_INT)
{
n = ((pPmInt_t)pobj)->val;
if ((n >= 0) && (n < 32))
{
/* Return the size of the type represented by the integer */
retval = int_new(size[n], &psize);
}
else
{
/* Return the size of an integer object */
retval = int_new(OBJ_GET_SIZE(pobj), &psize);
}
}
else
{
/* Return the size of the given non-integer object */
retval = int_new(OBJ_GET_SIZE(pobj), &psize);
}
NATIVE_SET_TOS(psize);
return retval;
"""
pass
def print_sizes():
types = (
'NON',
'INT',
'FLT',
'STR',
'TUP',
'COB',
'MOD',
'CLO',
'FXN',
'CLI',
'CIM',
'NIM',
'NOB',
'THR',
0,
'BOL',
'CIO',
'LST',
'DIC',
0, 0, 0, 0, 0, 0,
'FRM',
'BLK',
'SEG',
'SGL',
'SQI',
'NFM',
0,
)
for i in range(32):
if types[i] != 0:
print "sizeof(", types[i], ") = ", sizeof(i)
#:mode=c:
| StarcoderdataPython |
1700750 | from mldp.steps.transformers.base_transformer import BaseTransformer
from mldp.utils.helpers.validation import validate_field_names
from mldp.utils.helpers.nlp.sequences import compute_windows
from mldp.steps.transformers.nlp.helpers import create_new_field_name
from mlutils.helpers.general import listify
import numpy as np
class WindowSlider(BaseTransformer):
"""
Runs a rolling slider over a sequence. Creates a separate field for each
field to which the slider was applied.
Assumes 2D data, namely batch_size x sequences, where sequences can be
of different sizes.
"""
def __init__(self, field_names, window_size=5, step_size=1,
only_full_windows=False, new_window_field_name_suffix='window',
**kwargs):
"""
:param field_names: str or list of str (str) corresponding to fields
which should be slided over.
:param window_size: self-explanatory.
:param step_size: self-explanatory.
:param only_full_windows: if set to True guarantees that all windows
will be of the same size.
:param new_window_field_name_suffix: suffix for all newly created fields.
"""
try:
validate_field_names(field_names)
except Exception as e:
raise e
super(WindowSlider, self).__init__(**kwargs)
self.field_names = listify(field_names)
self.window_size = window_size
self.step_size = step_size
self.only_full_windows = only_full_windows
self.new_windw_fn_suffix = new_window_field_name_suffix
def _transform(self, data_chunk):
for fn in self.field_names:
field_vals = data_chunk[fn]
tmp = np.empty(len(field_vals), dtype='object')
for i, el in enumerate(field_vals):
window_elms = compute_windows(el,
window_size=self.window_size,
step_size=self.step_size,
only_full_windows=self.only_full_windows)
tmp[i] = window_elms
new_fn = create_new_field_name(fn, suffix=self.new_windw_fn_suffix)
data_chunk[new_fn] = tmp
return data_chunk
| StarcoderdataPython |
99496 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 31 12:44:45 2022
@author: Danie
DEPRICATED FILE DO NOT USE
Old biolerplate work for when stage 3 classification was just going to be selecting a set of representative
points create by ModelsA B and C. Has terrible accuracy and should absolutley not be used.
"""
import tensorflow as tf
import numpy as np
import matplotlib as plt
import os
import csv
from test_iterator import TestIterator
import pandas as pd
import plotly.express as px
import plotly
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn import svm
import random
def getLabel(base_dir, csvf, test_image):
label = 0
with open(os.path.join(base_dir, csvf), "r") as f:
reader = csv.reader(f)
for row in reader:
if row[0] == test_image+".JPG" or row[0] == test_image+".JPEG":
label = row[1]
break
return label
if __name__=="__main__":
base_dir = "E:\\Coding\\Dataset"
test_dir = "images_test"
label_csv = "test_labels.csv"
networkA = 'network_A_1'
networkB = 'network_B_1'
networkC = 'network_C_1'
base_dir = "E:\\Coding\\Dataset"
batch_size = 32
csvf = "test_labels.csv"
svc = svm.SVC()
totdf = pd.DataFrame()
if os.path.exists(os.path.join(os.getcwd(),networkA+'_best_weights.h5')):
modelA = tf.keras.models.load_model(networkA+'_best_weights.h5')
if os.path.exists(os.path.join(os.getcwd(),networkB+'_best_weights.h5')):
modelB = tf.keras.models.load_model(networkB+'_best_weights.h5')
if os.path.exists(os.path.join(os.getcwd(),networkC+'_best_weights.h5')):
modelC = tf.keras.models.load_model(networkC+'_best_weights.h5')
file_list = os.listdir(os.path.join(base_dir, "Test"))
random.shuffle(file_list)
for index, folder in tqdm(enumerate(file_list)):
if index > 10:
break
image_dir = os.path.join(base_dir, "Test", folder)
predictionsDict = {"File": [], "x": [], "y": [], "z": [], "label": []}
label = getLabel(base_dir, csvf, folder)
testIter = TestIterator(batch_size, label, image_dir, return_name=True)
for i in range(len(testIter)):
x, y, name = testIter.__getitem__(0)
predictionsA = np.array(modelA(x, training=False))
predictionsB = np.array(modelB(x, training=False))
predictionsC = np.array(modelC(x, training=False))
for idx, pred in enumerate(zip(predictionsA, predictionsB, predictionsC, y)):
# if pred[0] >= 0.3 or pred[1] >= 0.3 or pred[2] >= 0.3:
predictionsDict["File"].append(name[idx])
predictionsDict["x"].append(pred[0])
predictionsDict["y"].append(pred[1])
predictionsDict["z"].append(pred[2])
predictionsDict["label"].append(pred[3])
df=pd.DataFrame(data=predictionsDict)
if((df.max().drop('File') > 0.5).any()):
df.drop(df[(df.x < 0.5) & (df.y < 0.5) & (df.z < 0.5)].index, inplace=True)
df = df.head(n=4)
else:
df.drop(df[(df.x > 0.5) & (df.y > 0.5) & (df.z > 0.5)].index, inplace=True)
df = df.head(n=4)
totdf = totdf.append(df)
fig = plt.figure(figsize=(12, 9))
ax = Axes3D(fig)
Y = [value.item() for value in totdf['y'].to_numpy()]
X = [value.item() for value in totdf['x'].to_numpy()]
Z = [value.item() for value in totdf['z'].to_numpy()]
label = totdf['label'].tolist()
coords = list(zip(X,Y,Z))
svc.fit(coords, label)
ax.scatter(X, Y, Z)
plt.show()
| StarcoderdataPython |
1767807 | <gh_stars>1-10
from . import node
import json
import copy
class Edge :
"""
Edge
======
A simple edge, embedding a `id_a`, a `id_b` and a `modality`.
"""
def __init__(self, id_edge: str, a: node.Node, b: node.Node, modality: str = "1", directed: bool = False):
"""
Nodes are given instead of just their `id`, so we can ensure each id correspond to an actual node,
even if we only store their id in the struct
"""
self.id = str(id_edge)
self.id_a = str(a.id)
self.id_b = str(b.id)
self.modality = str(modality)
self.directed = directed
self.meta = {}
self.data = {}
def __str__(self):
return self.__repr__()
def __repr__(self):
return json.dumps(self.__dict__, indent=4, sort_keys=True)
def __copy__(self):
return copy.deepcopy(self)
def other_end(self, id_base):
if self.id_a == id_base :
return self.id_b
elif self.id_b == id_base :
return self.id_a
else :
return None
def replace_end(self, id_old:str, id_new) -> bool:
if self.id_a == id_old :
self.id_a = id_new
return True
elif self.id_b == id_old :
self.id_b = id_new
return True
else :
return False
| StarcoderdataPython |
3334621 | <reponame>objectnf/training-code<filename>Lead-to-New-Language/Python/tool - pyinstaller/basic - syntaxSugar01.py
a = 10; b = 5 # 单行定义多个变量
c = [a, b][a < b] # 将a、b中的小值赋值给c
# 若 a < b 为 True, 取 [a, b][1]; 若 a < b 为 False, 取 [a, b][0]
print("c = " + str(c))
# 也可以写成如下形式:
c = a if a < b else b
del c # 清除变量
sum = 1
for i in range(2, 100):
for j in range(2, i):
if i % j == 0:
break
# 当配对的for正常执行完毕时执行else内的代码;break跳出循环时则不执行
else:
sum += i
print("0 ~ 100内的素数之和为:" + str(sum))
# 函数接受字典和列表作为参数
list_arg = [1, 2, 3, 4, 5]
dict_arg = {"name": "小明", "age": "12"}
def print_args(*args, **kwargs): # 使用*和**注明形参类型
print(args)
print(kwargs)
print_args(*list_arg, **dict_arg)
# 列表推导
list_a = [1, 4, 5, 3, 8, 10, 2] # 这是一个列表
# 做一些操作,比如想要拿出所有的偶数元素组成一个新的列表
# 可以用如下写法
list_b = [i for i in list_a if i % 2 == 0] # 对list_a中的每一个元素,若该元素模2为0则设置该元素
print("list_b is " + str(list_b))
# 同理用于元组、集合、字典
# lambda表达式
# 是不是很像Wolfram
# 冒号前面是参数,后面是语句
f = lambda x: x+1
print("f(12): " + str(f(12)))
# 如此可以方便的排序列表,比如不管正负按绝对值排序
list_c = [15, -8, 12, 23, -7, -14, 4, 0, -3, -9, 2]
list_c.sort(key = lambda x: abs(x))
print("list_c is " + str(list_c))
# 链式调用
# 当返回的数据结构一致时可以这样用
class Number():
def __init__(self, val):
self.number = val
def add(self):
self.number += 1
return self
def minus(self):
self.number -= 2
return self
num_tmp = Number(10)
print(num_tmp.add().minus().add().add().add().add().minus().add().number)
| StarcoderdataPython |
3363240 | from basetestcase import FormTestCase
from _test_app.forms import MyForm, MyBaseFormSet, MyFormSet
from _test_app.models import MyModel
class FormTestCaseTest(FormTestCase):
def form(self, *args, **kwargs):
form = MyForm(*args, **kwargs)
return form
def test_field_rendered(self):
self.form_field_test(
'my_field',
help_text='This is help text.',
label='My Field:',
widget_attrs={'maxlength': '255'}
)
def test_MyFormSet(self):
formset = MyFormSet()
self.formset_test(
baseformset=MyBaseFormSet,
field_data={'my_field': 'My Field'},
form=MyForm,
formset=MyFormSet,
) | StarcoderdataPython |
4816670 | <filename>run.py<gh_stars>1-10
from kernel_matrix_benchmarks.main import main
from multiprocessing import freeze_support
if __name__ == "__main__":
# Freeze_support is a Windows-only function that ensures compatibility
# with a ".py -> .exe" packaging method. On Linux, this has zero consequences.
freeze_support()
# The actual code:
main()
| StarcoderdataPython |
110693 | <reponame>etianen/py.sh<gh_stars>1-10
class StyleMapping:
def __init__(self, opts):
self._opts = opts
def __getitem__(self, key):
return getattr(self._opts, "style_{}".format(key), "").encode().decode("unicode_escape")
def apply_styles(opts, command):
return command.format_map(StyleMapping(opts))
| StarcoderdataPython |
143920 | <reponame>archerckk/PyTest<gh_stars>0
'''
1.能使用随机数
2.用户猜对了提示信息
3.用户猜错能显示用户猜大了还是猜小了
4.机会一共只有3次
5.用户用完三次机会或者猜对了游戏结束
'''
import random
target=random.randint(1,10)
times=3
while times!=0:
tmp=input('请输入你要猜的数字:')
while not tmp.isdigit():
tmp=input('你的输入有误,请重新输入:')
guess=int(tmp)
times-=1
if target==guess:
print('好牛叉,这就猜对了')
break
else:
if guess>target:
print('你猜的数字大了')
elif guess<target:
print('你猜的数字小了')
if times>0:
print('你还有%d次机会'%times)
else:
print('你的3次机会已经用完')
print('游戏结束')
| StarcoderdataPython |
1744766 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from queue import Queue
def coroutine(func):
def start(*args, **kwargs):
rc = func(*args, **kwargs)
rc.next()
return rc
return start
@coroutine
def threaded(target):
messages = Queue() # message queue
def run_target():
while True:
item = messages.get() # A threads loop forever.pulling items out of
# the message queue and sending to the
# target
if item is GeneratorExit: # handle close so that threads shuts down correctly
target.close()
return
else:
target.send(item)
Thread(target=run_target).start()
try:
while True:
item = yield # receive items and pass them into the
# threads (via the queue)
messages.put(item)
except GeneratorExit:
messages.put(GeneratorExit)
def main():
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
1744234 | <filename>flask_cognito.py
from collections import OrderedDict
from functools import wraps
from flask import _request_ctx_stack, current_app, jsonify, request
from werkzeug.local import LocalProxy
from cognitojwt import CognitoJWTException, decode as cognito_jwt_decode
from jose.exceptions import JWTError
import logging
log = logging.getLogger(__name__)
CONFIG_DEFAULTS = {
'COGNITO_CHECK_TOKEN_EXPIRATION': True,
'COGNITO_JWT_HEADER_NAME': 'Authorization',
'COGNITO_JWT_HEADER_PREFIX': 'Bearer',
}
# user from pool
current_cognito_jwt = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'cogauth_cognito_jwt', None))
# unused - could be a way to add mapping of cognito user to application user
current_user = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'cogauth_current_user', None))
# access initialized cognito extension
_cog = LocalProxy(lambda: current_app.extensions['cognito_auth'])
class CognitoAuthError(Exception):
def __init__(self, error, description, status_code=401, headers=None):
self.error = error
self.description = description
self.status_code = status_code
self.headers = headers
def __repr__(self):
return f'CognitoAuthError: {self.error}'
def __str__(self):
return f'{self.error} - {self.description}'
class CognitoAuth(object):
identity_callback = None
def __init__(self, app=None, identity_handler=None):
self.app = app
if app is not None:
self.init_app(app, identity_handler=identity_handler)
def init_app(self, app, identity_handler=None):
for k, v in CONFIG_DEFAULTS.items():
app.config.setdefault(k, v)
# required configuration
self.region = self._get_required_config(app, 'COGNITO_REGION')
self.userpool_id = self._get_required_config(app, 'COGNITO_USERPOOL_ID')
self.jwt_header_name = self._get_required_config(app, 'COGNITO_JWT_HEADER_NAME')
self.jwt_header_prefix = self._get_required_config(app, 'COGNITO_JWT_HEADER_PREFIX')
self.identity_callback = identity_handler
# optional configuration
self.check_expiration = app.config.get('COGNITO_CHECK_TOKEN_EXPIRATION', True)
self.app_client_id = app.config.get('COGNITO_APP_CLIENT_ID')
# save for localproxy
app.extensions['cognito_auth'] = self
# handle CognitoJWTExceptions
# TODO: make customizable
app.errorhandler(CognitoAuthError)(self._cognito_auth_error_handler)
def _get_required_config(self, app, config_name):
val = app.config.get(config_name)
if not val:
raise Exception(f"{config_name} not found in app configuration but it is required.")
return val
def identity_handler(self, callback):
if self.identity_callback is not None:
raise Exception(
f"Trying to override existing identity_handler on CognitoAuth. You should only set this once.")
self.identity_callback = callback
return callback
def get_token(self):
"""Get token from request."""
auth_header_name = _cog.jwt_header_name
auth_header_prefix = _cog.jwt_header_prefix
# get token value from header
auth_header_value = request.headers.get(auth_header_name)
if not auth_header_value:
# no auth header found
return None
parts = auth_header_value.split()
if not auth_header_prefix:
if len(parts) > 1:
raise CognitoAuthError('Invalid Cognito JWT Header', 'Token contains spaces')
return auth_header_value
if parts[0].lower() != auth_header_prefix.lower():
raise CognitoAuthError('Invalid Cognito JWT header',
f'Unsupported authorization type. Header prefix "{parts[0].lower()}" does not match "{auth_header_prefix.lower()}"')
elif len(parts) == 1:
raise CognitoAuthError('Invalid Cognito JWT header', 'Token missing')
elif len(parts) > 2:
raise CognitoAuthError('Invalid Cognito JWT header', 'Token contains spaces')
return parts[1]
def get_user(self, jwt_payload):
"""Get application user identity from Cognito JWT payload."""
if not self.identity_callback:
return None
return self.identity_callback(jwt_payload)
def _cognito_auth_error_handler(self, error):
log.info('Authentication Failure', exc_info=error)
return jsonify(OrderedDict([
('error', error.error),
('description', error.description),
])), error.status_code, error.headers
def decode_token(self, token):
"""Decode token."""
try:
return cognito_jwt_decode(
token=token,
region=self.region,
app_client_id=self.app_client_id,
userpool_id=self.userpool_id,
testmode=not self.check_expiration,
)
except (ValueError, JWTError):
raise CognitoJWTException('Malformed Authentication Token')
def cognito_auth_required(fn):
"""View decorator that requires a valid Cognito JWT token to be present in the request."""
@wraps(fn)
def decorator(*args, **kwargs):
_cognito_auth_required()
return fn(*args, **kwargs)
return decorator
def cognito_check_groups(groups: list):
def decorator(function):
def wrapper(*args, **kwargs):
_cognito_check_groups(groups)
return function(*args, **kwargs)
return wrapper
return decorator
## This adds an alias to the above function to resolve issue #16
cognito_group_permissions = cognito_check_groups
def _cognito_check_groups(groups: list):
"""
Does the actual work of verifying the user group to restrict access to some resources.
:param groups a list with the name of the groups of Cognito Identity Pool
:raise an exception if there is no group
"""
if 'cognito:groups' not in current_cognito_jwt or current_cognito_jwt['cognito:groups'] is None:
raise CognitoAuthError('Not Authorized',
'User doesn\'t have access to this resource',
status_code=403)
if all([i not in current_cognito_jwt['cognito:groups'] for i in groups]):
raise CognitoAuthError('Not Authorized',
'User doesn\'t have access to this resource',
status_code=403)
def _cognito_auth_required():
"""Does the actual work of verifying the Cognito JWT data in the current request.
This is done automatically for you by `cognito_jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
"""
token = _cog.get_token()
if token is None:
auth_header_name = _cog.jwt_header_name
auth_header_prefix = _cog.jwt_header_prefix
raise CognitoAuthError('Authorization Required',
f'Request does not contain a well-formed access token in the "{auth_header_name}" header beginning with "{auth_header_prefix}"')
try:
# check if token is signed by userpool
payload = _cog.decode_token(token=token)
except CognitoJWTException as e:
log.info('Authentication Failure', exc_info=e)
raise CognitoAuthError('Invalid Cognito Authentication Token', str(e)) from e
_request_ctx_stack.top.cogauth_cognito_jwt = payload
_request_ctx_stack.top.cogauth_current_user = _cog.get_user(payload)
| StarcoderdataPython |
3367463 | from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
#from builtins import str
from builtins import range
from quantities.quantity import Quantity
from quantities import mV, nA
import sciunit
from sciunit import Test,Score
try:
from sciunit import ObservationError
except:
from sciunit.errors import ObservationError
import hippounit.capabilities as cap
from sciunit.utils import assert_dimensionless# Converters.
from sciunit.scores import BooleanScore,ZScore # Scores.
try:
import numpy
except:
print("NumPy not loaded.")
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
#from neuron import h
import collections
import efel
import os
import multiprocessing
import multiprocessing.pool
import functools
import math
from scipy import stats
import json
from hippounit import plottools
import collections
try:
import pickle as pickle
except:
import pickle
import gzip
try:
import copy_reg
except:
import copyreg
from types import MethodType
from quantities import mV, nA, ms, V, s
from hippounit import scores
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
try:
copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
except:
copyreg.pickle(MethodType, _pickle_method, _unpickle_method)
class PSPAttenuationTest(Test):
"""
Tests how much synaptic potential attenuates from the dendrite (different distances) to the soma.
Parameters
----------
config : dict
dictionary loaded from a JSON file, containing the parameters of the simulation
observation : dict
dictionary loaded from a JSON file, containing the experimental mean and std values for the features to be tested
force_run : boolean
If True and the pickle files containing the model's response to the simulation exists, the simulation won't be run again, traces are loaded from the pickle file
base_directory : str
Results will be saved here
show_plot : boolean
If False, plots are not displayed but still saved
save_all : boolean
If False, only the JSON files containing the absolute feature values, the feature error scores and the final scores, and a log file are saved, but the figures and pickle files are not.
num_of_dend_locations : int
Number of randomly selected dendritic locations to be tested
random_seed: int
random seed for random dendritic location selection
trunk_origin : list
first element : name of the section from which the trunk originates, second element : position on section (E.g. ['soma[5]', 1]). If not set by the user, the end of the default soma section is used.
"""
def __init__(self, config = {},
observation = {},
name="PSP attenuation test" ,
force_run=False,
base_directory= None,
show_plot=True,
num_of_dend_locations = 15,
random_seed = 1,
save_all = True,
trunk_origin = None):
observation = self.format_data(observation)
Test.__init__(self,observation,name)
self.required_capabilities += (cap.ProvidesRandomDendriticLocations, cap.ReceivesEPSCstim)
self.force_run = force_run
self.show_plot = show_plot
self.save_all = save_all
self.base_directory = base_directory
self.path_temp_data = None #added later, because model name is needed
self.path_figs = None
self.path_results = None
self.trunk_origin = trunk_origin
self.logFile = None
self.test_log_filename = 'test_log.txt'
self.npool = multiprocessing.cpu_count() - 1
self.config = config
self.num_of_dend_locations = num_of_dend_locations
self.random_seed = random_seed
description = "Tests how much synaptic potential attenuates from the dendrite (different distances) to the soma."
score_type = scores.ZScore_PSPAttenuation
def format_data(self, observation):
for key, val in list(observation.items()):
try:
observation[key] = float(val)
except Exception as e:
quantity_parts = val.split(" ")
number = float(quantity_parts[0])
units = " ".join(quantity_parts[1:])
observation[key] = Quantity(number, units)
return observation
def run_stimulus(self, model, locations_weights, tau1, tau2):
dend, xloc, weight = locations_weights
traces = {}
if self.base_directory:
self.path_temp_data = self.base_directory + 'temp_data/' + 'PSP_attenuation/' + model.name + '/'
else:
self.path_temp_data= model.base_directory + 'temp_data/' + 'PSP_attenuation/'
try:
if not os.path.exists(self.path_temp_data) and self.save_all:
os.makedirs(self.path_temp_data)
except OSError as e:
if e.errno != 17:
raise
pass
file_name = self.path_temp_data + 'stimulus_at_' + dend+ '(' + str(xloc) + ')_weight_' + str(weight) + '.p'
if self.force_run or (os.path.isfile(file_name) is False):
print("input at: " + dend + "(" + str(xloc) + ") with weight: " + str(weight))
t, v, v_dend = model.run_EPSC_stim_get_vm([dend, xloc], weight, tau1, tau2)
if self.save_all:
pickle.dump([t, v, v_dend], gzip.GzipFile(file_name, "wb"))
else:
t, v, v_dend = pickle.load(gzip.GzipFile(file_name, "rb"))
traces[dend, xloc] = [t, v, v_dend] # dictionary, the key is the dendritic location
return traces
def calculate_weights(self, traces_no_input, EPSC_amp):
locations_weights = []
for key, value in traces_no_input.items():
s = int(len(value[2])*0.9)
Vm = numpy.mean(value[2][s:]) #calculate mean at the last 10% of the trace, measured at the dendrite
weight = - EPSC_amp/Vm
locations_weights.append([key[0], key[1], weight])
return locations_weights
def analyse_traces(self, model, traces_dict_no_input, traces_dict, locations_distances):
if self.base_directory:
self.path_figs = self.base_directory + 'figs/' + 'PSP_attenuation/' + model.name + '/'
else:
self.path_figs = model.base_directory + 'figs/' + 'PSP_attenuation/'
try:
if not os.path.exists(self.path_figs) and self.save_all:
os.makedirs(self.path_figs)
except OSError as e:
if e.errno != 17:
raise
pass
attenuation_values = {}
EPSP_amp_values = {}
'''
num_of_subplots = len(traces_dict.keys())
nrows = int(numpy.ceil(numpy.sqrt(num_of_subplots)))
ncols = int(numpy.ceil(numpy.sqrt(num_of_subplots)))
i=0
'''
dend_depols = {}
soma_depols = {}
for key, value in traces_dict.items():
if not numpy.array_equal(traces_dict[key][0], traces_dict_no_input[key][0]): #if the time vectors are not equal, the traces are resampled with fixed time step
dt = 0.025
time_vector = numpy.arange(traces_dict[key][0][0], traces_dict[key][0][-1], dt) #from the first to the last element of the original time vector
interp_trace_soma = numpy.interp(time_vector, traces_dict[key][0], traces_dict[key][1])
interp_trace_soma_no_input = numpy.interp(time_vector, traces_dict_no_input[key][0], traces_dict_no_input[key][1])
interp_trace_dend = numpy.interp(time_vector, traces_dict[key][0], traces_dict[key][2])
interp_trace_dend_no_input = numpy.interp(time_vector, traces_dict_no_input[key][0], traces_dict_no_input[key][2])
dend_depol = interp_trace_dend - interp_trace_dend_no_input
soma_depol = interp_trace_soma - interp_trace_soma_no_input
dend_depols[key] = dend_depol
soma_depols[key] = soma_depol
print("Voltage traces are resampled using linear interpolation")
else:
dend_depol = traces_dict[key][2] - traces_dict_no_input[key][2]
soma_depol = traces_dict[key][1] - traces_dict_no_input[key][1]
dend_depols[key] = dend_depol
soma_depols[key] = soma_depol
time_vector = traces_dict[key][0]
max_dend_depol = max(dend_depol)
max_soma_depol = max(soma_depol)
attenuation = max_soma_depol / max_dend_depol
attenuation_values[key] = attenuation
EPSP_amp_values[key] = {'soma' : max_soma_depol, 'dendrite' : max_dend_depol}
'''
plt.figure()
#plt.subplot(nrows, ncols , i+1)
plt.plot(traces_dict[key][0], soma_depol, label='SOMA')
plt.plot(traces_dict[key][0], dend_depol, label=key[0]+'('+str(key[1])+')')
plt.legend(loc = 0)
#i+=1
'''
#sorted_locations_distances = collections.OrderedDict(sorted(locations_distances.items(), key=lambda x: x[1])) # keys are the dendritic locations, values are they distances from soma
""" Plotting the traces"""
distances = self.config['target_distances']
tolerance = self.config['tolerance']
for dist in distances:
d = {key:value for (key,value) in list(locations_distances.items()) if value >= dist - tolerance and value <= dist + tolerance }
sorted_d = collections.OrderedDict(sorted(list(d.items()), key=lambda x: x[1])) # keys are the dendritic locations, values are they distances from soma
columns = 2
width_ratios=[1]*columns
frames = len(list(d.keys()))
if int(numpy.ceil(frames/float(columns))) < 5:
rows = 5
else:
rows = int(numpy.ceil(frames/float(columns)))
height_ratios=[1]*rows
#axs=[]
fig = plt.figure(figsize = (210/25.4, 297/25.4))
gs = matplotlib.gridspec.GridSpec(rows, columns, height_ratios=height_ratios, width_ratios=width_ratios)
gs.update(top=0.92, bottom=0.04, left=0.07, right=0.97, hspace=0.75, wspace=0.3)
#fig, axes = plt.subplots(nrows=int(round(len(traces_results)/2.0)), ncols=2)
#fig.tight_layout()
fig.suptitle('Input at ' + str(dist) + '$\pm$' + str(tolerance) + ' um from soma')
i=0
ax = None # needed to be inicialized, because for some distances we may won't have a figure : 'ax' referenced before assignment error
for key, value in sorted_d.items():
label_added = False
#dend_depol = traces_dict[key][2] - traces_dict_no_input[key][2]
#soma_depol = traces_dict[key][1] - traces_dict_no_input[key][1]
#plt.subplot(gs[i])
ax = fig.add_subplot(gs[i])
if not label_added:
plt.plot(time_vector, soma_depols[key] , label='SOMA')
plt.plot(time_vector, dend_depols[key], label='dendrite')
label_added = True
else:
plt.plot(time_vector, soma_depols[key])
plt.plot(time_vector, dend_depols[key])
plt.title(key[0]+'('+str("%.2f" % key[1])+') ' + str("%.2f" % locations_distances[key]) + ' um')
plt.xlabel("ms")
plt.ylabel("mV")
i+=1 # next subplot
#lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')
if ax:
handles, labels = ax.get_legend_handles_labels()
lgd = fig.legend(handles, labels, loc = 'upper right')
if self.save_all:
plt.savefig(self.path_figs + 'traces_input_around_' + str(dist)+ '_um' + '.pdf', dpi=600, bbox_inches='tight')
#print attenuation_values
return attenuation_values, EPSP_amp_values
def calcs_and_plots(self, model, attenuation_values, locations_distances, EPSP_amp_values):
if self.base_directory:
self.path_figs = self.base_directory + 'figs/' + 'PSP_attenuation/' + model.name + '/'
else:
self.path_figs = model.base_directory + 'figs/' + 'PSP_attenuation/'
try:
if not os.path.exists(self.path_figs) and self.save_all:
os.makedirs(self.path_figs)
except OSError as e:
if e.errno != 17:
raise
pass
print("The figures are saved in the directory: ", self.path_figs)
distances = self.config['target_distances']
tolerance = self.config['tolerance']
observation = self.observation
obs_means = []
obs_stds = []
PSP_attenuation_features = {}
PSP_attenuation_mean_features = {}
EPSP_amps = {}
""" Plot EPSP amplitudes on soma and dendrite"""
plt.figure()
i=0 # not to have legend for all the dots
for key, value in EPSP_amp_values.items():
EPSP_amps[key] = {'EPSP_amp_soma' : value['soma'], 'EPSP_amp_dendrite' : value['dendrite'], 'distance' : locations_distances[key]}
if i==0:
plt.plot(locations_distances[key], value['dendrite'], label = 'dendrite', color= 'black', marker='^', linestyle='none' )
plt.plot(locations_distances[key], value['soma'], label = 'soma', color= 'black', marker='o', linestyle='none' )
i += 1
else:
plt.plot(locations_distances[key], value['soma'], color= 'black', marker='o', linestyle='none' )
plt.plot(locations_distances[key], value['dendrite'], color= 'black', marker='^', linestyle='none' )
plt.xlabel('Synapse distance from soma (um)')
plt.ylabel('Peak amplitude (mV)')
plt.title('EPSPs')
lgd = plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')
if self.save_all:
plt.savefig(self.path_figs + 'EPSP_amplitudes'+ '.pdf', dpi=800, bbox_extra_artists=(lgd,), bbox_inches='tight')
"""Plot attenuation values"""
for dist in distances:
obs_means.append(observation['mean_attenuation_soma/dend_'+str(dist)+'_um'])
obs_stds.append(observation['std_attenuation_soma/dend_'+str(dist)+'_um'])
plt.figure()
for key, value in attenuation_values.items():
PSP_attenuation_features[key] = {'attenuation_soma/dendrite' : value, 'distance' : locations_distances[key]}
plt.plot(locations_distances[key], value, label = key[0]+'('+str(key[1])+') at '+ str(locations_distances[key]) + ' um', marker='o', linestyle='none' )
plt.errorbar(distances, obs_means, yerr = obs_stds, label = 'experiment', marker='o', linestyle='none', color='r')
plt.xlabel('Distance from soma (um)')
plt.ylabel('Attenuation soma/dendrite')
plt.title('PSP attenuation')
lgd = plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')
if self.save_all:
plt.savefig(self.path_figs + 'PSP_attenuation'+ '.pdf', dpi=800, bbox_extra_artists=(lgd,), bbox_inches='tight')
""" Calculate and plot the mean of attenuation values"""
label_added = False
plt.figure()
for dist in distances:
att = numpy.array([])
for key, value in locations_distances.items():
if value >= dist - tolerance and value < dist + tolerance:
att = numpy.append(att, attenuation_values[key])
mean_att = numpy.mean(att)
std_att = numpy.std(att)
PSP_attenuation_mean_features['mean_attenuation_soma/dend_'+str(dist)+'_um']={'mean': float(mean_att), 'std' : float(std_att)}
if not label_added:
plt.errorbar(dist, mean_att, yerr = std_att, marker='o', linestyle='none', color = 'b', label=model.name)
label_added = True
else:
plt.errorbar(dist, mean_att, yerr = std_att, marker='o', linestyle='none', color = 'b')
plt.errorbar(distances, obs_means, yerr = obs_stds, label = 'experiment', marker='o', linestyle='none', color='r')
plt.xlabel('Distance from soma (um)')
plt.ylabel('Mean attenuation soma/dendrite')
plt.title(' Mean PSP attenuation')
lgd = plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')
if self.save_all:
plt.savefig(self.path_figs + 'mean_PSP_attenuation'+ '.pdf', dpi=800, bbox_extra_artists=(lgd,), bbox_inches='tight')
#print PSP_attenuation_features
return PSP_attenuation_features, PSP_attenuation_mean_features, EPSP_amps
""" observation contains ratio numbers, have no unit"""
'''
def validate_observation(self, observation):
for key, value in observation.iteritems():
try:
assert type(observation[key]) is Quantity
except Exception as e:
raise ObservationError(("Observation must be of the form "
"{'mean':float*mV,'std':float*mV}"))
'''
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
efel.reset()
if self.base_directory:
self.path_results = self.base_directory + 'results/' + 'PSP_attenuation/' + model.name + '/'
else:
self.path_results = model.base_directory + 'results/' + 'PSP_attenuation/'
try:
if not os.path.exists(self.path_results):
os.makedirs(self.path_results)
except OSError as e:
if e.errno != 17:
raise
pass
distances = self.config['target_distances']
tolerance = self.config['tolerance']
dist_range = [min(distances) - tolerance, max(distances) + tolerance]
tau1 = self.config['tau_rise']
tau2 = self.config['tau_decay']
EPSC_amp = self.config['EPSC_amplitude']
locations, locations_distances = model.get_random_locations_multiproc(self.num_of_dend_locations, self.random_seed, dist_range, self.trunk_origin) # number of random locations , seed
#print dend_locations, actual_distances
print('Dendritic locations to be tested (with their actual distances):', locations_distances)
weight = 0.0
locations_weights = []
for locs in locations:
locs.append(weight)
locations_weights.append(locs)
#print locations_weights
""" run model without an input"""
pool = multiprocessing.Pool(self.npool, maxtasksperchild=1)
run_stimulus_ = functools.partial(self.run_stimulus, model, tau1 = tau1, tau2 = tau2)
traces_no_input = pool.map(run_stimulus_, locations_weights, chunksize=1)
pool.terminate()
pool.join()
del pool
traces_dict_no_input = dict(list(i.items())[0] for i in traces_no_input) # merge list of dicts into single dict
locations_weights = self.calculate_weights(traces_dict_no_input, EPSC_amp)
"""run model with inputs"""
pool = multiprocessing.Pool(self.npool, maxtasksperchild=1)
run_stimulus_ = functools.partial(self.run_stimulus, model, tau1 = tau1, tau2 = tau2)
traces = pool.map(run_stimulus_, locations_weights, chunksize=1)
pool.terminate()
pool.join()
del pool
traces_dict = dict(list(i.items())[0] for i in traces) # merge list of dicts into single dict
filepath = self.path_results + self.test_log_filename
self.logFile = open(filepath, 'w') # if it is opened before multiprocessing, the multiporeccing won't work under python3
self.logFile.write('Dendritic locations to be tested (with their actual distances):\n'+ str(locations_distances)+'\n')
self.logFile.write("---------------------------------------------------------------------------------------------------\n")
#plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models in a for loop
plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models
attenuation_values, EPSP_amp_values = self.analyse_traces(model, traces_dict_no_input, traces_dict, locations_distances)
PSP_attenuation_model_features, PSP_attenuation_mean_model_features, EPSP_amps = self.calcs_and_plots(model, attenuation_values, locations_distances, EPSP_amp_values)
prediction = PSP_attenuation_mean_model_features
'''
for key, value in traces_dict.items():
plt.figure()
plt.plot(value[0], value[1], label='SOMA')
plt.plot(value[0], value[2], label=key[0]+'('+str(key[1])+')')
plt.legend(loc = 0)
'''
PSP_attenuation_model_features_json = {}
for key, value in PSP_attenuation_model_features.items():
PSP_attenuation_model_features_json[str(key)]=value
file_name_features = self.path_results + 'PSP_attenuation_model_features.json'
json.dump(PSP_attenuation_model_features_json, open(file_name_features, "w"), indent=4)
EPSP_amps_json = {}
for key, value in EPSP_amps.items():
EPSP_amps_json[str(key)]=value
file_name_EPSP_amps = self.path_results + 'EPSP_amps.json'
json.dump(EPSP_amps_json, open(file_name_EPSP_amps, "w"), indent=4)
if self.save_all:
file_name_features_p = self.path_results + 'PSP_attenuation_model_features.p'
pickle.dump(PSP_attenuation_model_features, gzip.GzipFile(file_name_features_p, "wb"))
file_name_EPSP_amps_p = self.path_results + 'EPSP_amps.p'
pickle.dump(EPSP_amps, gzip.GzipFile(file_name_EPSP_amps_p, "wb"))
file_name_mean_features = self.path_results + 'PSP_attenuation_mean_model_features.json'
json.dump(prediction, open(file_name_mean_features, "w"), indent=4)
efel.reset()
return prediction
def compute_score(self, observation, prediction, verbose=False):
"""Implementation of sciunit.Test.score_prediction."""
distances = self.config['target_distances']
score_avg, errors= scores.ZScore_PSPAttenuation.compute(observation,prediction, distances)
file_name=self.path_results+'PSP_attenuation_errors.json'
json.dump(errors, open(file_name, "w"), indent=4)
keys = []
values = []
plt.figure()
for key, value in errors.items():
keys.append(key)
values.append(value)
y=list(range(len(keys)))
y.reverse()
plt.plot(values, y, 'o')
plt.yticks(y, keys)
plt.title('PSP attenuation errors')
if self.save_all:
plt.savefig(self.path_figs + 'PSP_attenuation_errors'+ '.pdf', bbox_inches='tight')
if self.show_plot:
plt.show()
score_json= {'score' : score_avg}
file_name_score = self.path_results + 'PSP_att_final_score.json'
json.dump(score_json, open(file_name_score, "w"), indent=4)
score=scores.ZScore_PSPAttenuation(score_avg)
self.logFile.write(str(score)+'\n')
self.logFile.write("---------------------------------------------------------------------------------------------------\n")
self.logFile.close()
self.logFile = self.path_results + self.test_log_filename
return score
def bind_score(self, score, model, observation, prediction):
score.related_data["figures"] = [self.path_figs + 'PSP_attenuation.pdf', self.path_figs + 'mean_PSP_attenuation.pdf',
self.path_figs + 'PSP_attenuation_errors.pdf', self.path_results + 'PSP_attenuation_model_features.json',
self.path_results + 'PSP_attenuation_mean_model_features.json', self.path_results + 'PSP_attenuation_errors.json',
self.path_results + 'PSP_att_final_score.json', self.path_results + self.test_log_filename]
score.related_data["results"] = [self.path_results + 'PSP_attenuation_model_features.json', self.path_results + 'PSP_attenuation_mean_model_features.json', self.path_results + 'PSP_attenuation_errors.json', self.path_results + 'PSP_attenuation_model_features.p', self.path_results + 'PSP_att_final_score.json']
return score
| StarcoderdataPython |
3284042 | <filename>tests/multi_process/zeromq_queue.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the the zeromq queue."""
import unittest
from plaso.lib import errors
from plaso.multi_process import zeromq_queue
from tests import test_lib as shared_test_lib
class ZeroMQPullBindQueue(zeromq_queue.ZeroMQPullQueue):
"""A Plaso queue backed by a ZeroMQ PULL socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQPushConnectQueue(zeromq_queue.ZeroMQPushQueue):
"""A Plaso queue backed by a ZeroMQ PUSH socket that connects to a port.
This queue may only be used to push items, not to pop.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQRequestBindQueue(zeromq_queue.ZeroMQRequestQueue):
"""A Plaso queue backed by a ZeroMQ REQ socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQBufferedReplyConnectQueue(zeromq_queue.ZeroMQBufferedReplyQueue):
"""A Plaso queue backed by a ZeroMQ REP socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQQueuesTest(shared_test_lib.BaseTestCase):
"""Tests for ZeroMQ queues."""
# pylint: disable=protected-access
_QUEUE_CLASSES = frozenset([
zeromq_queue.ZeroMQPushBindQueue, ZeroMQPullBindQueue,
ZeroMQRequestBindQueue])
def _testItemTransferred(self, push_queue, pop_queue):
"""Tests than item can be transferred between two queues."""
item = 'This is an item going from {0:s} to {1:s}.'.format(
push_queue.name, pop_queue.name)
push_queue.PushItem(item)
popped_item = pop_queue.PopItem()
self.assertEqual(item, popped_item)
def testBufferedReplyQueue(self):
"""Tests for the buffered reply queue."""
test_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='bufferedreply_bind', delay_open=False, linger_seconds=1)
test_queue.PushItem('This is a test item.')
test_queue.Close(abort=True)
with self.assertRaises(errors.QueueAlreadyClosed):
test_queue.PushItem('This shouldn\'t work')
def testPushPullQueues(self):
"""Tests than an item can be transferred between push and pull queues."""
push_queue = zeromq_queue.ZeroMQPushBindQueue(
name='pushpull_pushbind', delay_open=False, linger_seconds=1)
pull_queue = zeromq_queue.ZeroMQPullConnectQueue(
name='pushpull_pullconnect', delay_open=False, port=push_queue.port,
linger_seconds=1)
self._testItemTransferred(push_queue, pull_queue)
push_queue.Close()
pull_queue.Close()
pull_queue = ZeroMQPullBindQueue(
name='pushpull_pullbind', delay_open=False, linger_seconds=1)
push_queue = ZeroMQPushConnectQueue(
name='pushpull_pushconnect', delay_open=False, port=pull_queue.port,
linger_seconds=1)
self._testItemTransferred(push_queue, pull_queue)
push_queue.Close()
pull_queue.Close()
def testQueueStart(self):
"""Tests that delayed creation of ZeroMQ sockets occurs correctly."""
for queue_class in self._QUEUE_CLASSES:
queue_name = 'queuestart_{0:s}'.format(queue_class.__name__)
test_queue = queue_class(
name=queue_name, delay_open=True, linger_seconds=1)
message = '{0:s} socket already exists.'.format(queue_name)
self.assertIsNone(test_queue._zmq_socket, message)
test_queue.Open()
self.assertIsNotNone(test_queue._zmq_socket)
test_queue.Close()
def testRequestAndBufferedReplyQueues(self):
"""Tests REQ and buffered REP queue pairs."""
reply_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='requestbufferedreply_replybind', delay_open=False,
linger_seconds=1)
request_queue = zeromq_queue.ZeroMQRequestConnectQueue(
name='requestbufferedreply_requestconnect', delay_open=False,
port=reply_queue.port, linger_seconds=1)
self._testItemTransferred(reply_queue, request_queue)
reply_queue.Close()
request_queue.Close()
request_queue = ZeroMQRequestBindQueue(
name='requestbufferedreply_requestbind', delay_open=False,
linger_seconds=1)
reply_queue = ZeroMQBufferedReplyConnectQueue(
name='requestbufferedreply_replyconnect', delay_open=False,
port=request_queue.port, linger_seconds=0)
self._testItemTransferred(reply_queue, request_queue)
reply_queue.Close()
request_queue.Close()
def testEmptyBufferedQueues(self):
"""Tests the Empty method for buffered queues."""
queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='requestbufferedreply_replybind', delay_open=False,
linger_seconds=1, buffer_max_size=3, timeout_seconds=2,
buffer_timeout_seconds=1)
try:
while True:
queue.PushItem('item', block=False)
except errors.QueueFull:
# Queue is now full
pass
with self.assertRaises(errors.QueueFull):
queue.PushItem('item', block=False)
queue.Empty()
# We should now be able to push another item without an exception.
queue.PushItem('item')
queue.Empty()
queue.Close()
def testSocketCreation(self):
"""Tests that ZeroMQ sockets are created when a new queue is created."""
for queue_class in self._QUEUE_CLASSES:
queue_name = 'socket_creation_{0:s}'.format(queue_class.__name__)
test_queue = queue_class(
name=queue_name, delay_open=False, linger_seconds=1)
self.assertIsNotNone(test_queue._zmq_socket)
test_queue.Close()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4838102 | <reponame>bencarlisle15/AlexaControlledSamsungTV<filename>tvconfig.py<gh_stars>1-10
device_name = "Pi" #What shows up under devices in alexasmarttv.tk. not that important unless you have multiple devices (not tvs) on your account
volume_step_size = 5 #how much your tv volume should go up by when you say 'Alexa, turn up the volume on my tv'
tvs = [
{
'host': "xxx.xxx.xxx.xxx", #ip address of tv
'tv_model' : "xxxxxxxxx", #9 digit tv model located on the back of your tv
'tv_mac_address': "xx:xx:xx:xx:xx", #mac address of your tv
'tv_name' : 'TV', #leave as TV to refrence this by just 'TV'. ex: 'Alexa, turn on the TV'. Change to eg:'Kitchen TV' if you want to say 'Alexa turn on the kitchen TV', You cannot have multiple tvs have the same name
'prefer_HD': True, #if you say 'change the channel to ESPN', always attempt to use the HD channel number'
}
]
| StarcoderdataPython |
Subsets and Splits