id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
157955 | import time
from unittest import TestCase
from fsmodels.models import Field, ValidationError
def generic_validator(x):
return x == 1, {'detail': 'x must be 1.'}
class TestField(TestCase):
def test___init__(self):
# no required fields
f = Field()
with self.assertRaises((ValidationError,)):
# validation must be callable
Field(validation=1)
# validation can be None or a function that returns a tuple(boolean, dict)
f = Field(validation=generic_validator)
def test_validate(self):
f = Field(default=1, required=True)
with self.assertRaises((ValidationError,)):
# if the field is required, value passed to validate cannot be None
f.validate(None)
f2 = Field(required=False)
# if the field is not required then the value passed to validate can be None
f2.name = 'named'
self.assertTrue(f2.validate(None)[0], "Field.validate did not return true.")
with self.assertRaises((ValidationError,)):
# field value must return True from validation function
f = Field(validation=generic_validator)
f.validate(2)
f = Field(validation=generic_validator)
# alternatively, we can prevent validation from raising an error.
self.assertFalse(f.validate(None, raise_error=False)[0], "Field.validate function should return false.")
def test_default(self):
f = Field()
# default is none by default
self.assertIsNone(f.default())
f = Field(default=1)
# Field.default() should return simple values
self.assertEqual(f.default(), 1, "Field.default does not return default parameter")
f = Field(default=time.time)
# Field.default() can be passed as a callable argument
self.assertAlmostEqual(f.default(), time.time(), 1, "Field.default does not call default function.")
f = Field(default=lambda x, y=1: x*y)
# Field.default() can have arbitrary arguments and keyword arguments according to the user-defined parameter.
self.assertEqual(f.default(2, y=3), 6, "Field.default does not accept arbitrary args and kwargs.")
| StarcoderdataPython |
1712136 | <reponame>JayMonari/py-personal
def square_of_sum(number: int) -> int:
return sum([n for n in range(1, number + 1)]) ** 2
def sum_of_squares(number: int) -> int:
return sum([n**2 for n in range(1, number + 1)])
def difference_of_squares(number):
return square_of_sum(number) - sum_of_squares(number)
| StarcoderdataPython |
75414 | <reponame>mattn/vint<gh_stars>1-10
import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_loader import register_policy
from vint.linting.policy.autocmd_event import AutoCmdEvents
@register_policy
class ProhibitAutocmdWithNoGroup(AbstractPolicy):
def __init__(self):
super(ProhibitAutocmdWithNoGroup, self).__init__()
self.description = 'autocmd should execute in augroup or execute with a group'
self.reference = ':help :autocmd'
self.level = Level.WARNING
self.is_inside_of_augroup = False
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
autocmd family should be called with any groups.
"""
# noed.ea.cmd is empty when line jump command such as 1
cmd_name = node['ea']['cmd'].get('name', None)
is_autocmd = cmd_name == 'autocmd'
if is_autocmd and not self.is_inside_of_augroup:
matched = re.match(r'au(?:tocmd)?!?\s+(\S+)', node['str'])
if not matched:
# Looks like autocmd with a bang
return True
has_no_group = matched.group(1) in AutoCmdEvents
return not has_no_group
is_augroup = cmd_name == 'augroup'
if is_augroup:
matched = re.match(r'aug(?:roup)?\s+END', node['str'])
is_augroup_end = bool(matched)
self.is_inside_of_augroup = not is_augroup_end
return True
| StarcoderdataPython |
3368443 | <filename>baseCoverter.py<gh_stars>0
import random
def convertQues(rng):
# num = random.randint(0, rng)
num = rng
modes = ["hex", "bin"]
mode = random.choice(modes)
wrongAnswer = True
if mode == "hex":
while wrongAnswer:
inp = input(f"What is hex {hex(num)[2:]} in decimal: ")
if int(inp, 10) == num:
wrongAnswer = False
else:
print("Wrong answer. Please try again")
if mode == 'bin':
while wrongAnswer:
inp = input(f"What is binary {bin(num)[2:]} in decimal: ")
if int(inp, 10) == num:
wrongAnswer = False
else:
print("Wrong answer. Please try again")
while True:
r = random.randint(0, 256)
print(f"{r}, {bin(r)}, {hex(r)}")
convertQues(r)
| StarcoderdataPython |
78801 | <reponame>jetannenbaum/micropython_ir
# sony.py Encoder for IR remote control using synchronous code
# Sony SIRC protocol.
# Author: <NAME>
# Copyright <NAME> 2020 Released under the MIT license
from micropython import const
from ir_tx import IR
class SONY_ABC(IR):
def __init__(self, pin, bits, freq, verbose):
super().__init__(pin, freq, 3 + bits * 2, 30, verbose)
if bits not in (12, 15, 20):
raise ValueError('bits must be 12, 15 or 20.')
self.bits = bits
def tx(self, addr, data, ext):
self.append(2400, 600)
bits = self.bits
v = data & 0x7f
if bits == 12:
v |= (addr & 0x1f) << 7
elif bits == 15:
v |= (addr & 0xff) << 7
else:
v |= (addr & 0x1f) << 7
v |= (ext & 0xff) << 12
for _ in range(bits):
self.append(1200 if v & 1 else 600, 600)
v >>= 1
# Sony specifies 40KHz
class SONY_12(SONY_ABC):
valid = (0x1f, 0x7f, 0) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 12, freq, verbose)
class SONY_15(SONY_ABC):
valid = (0xff, 0x7f, 0) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 15, freq, verbose)
class SONY_20(SONY_ABC):
valid = (0x1f, 0x7f, 0xff) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 20, freq, verbose)
| StarcoderdataPython |
3379885 | from typing import List, Tuple
import numpy as np
from pyrep.objects.shape import Shape
from pyrep.objects.joint import Joint
from pyrep.objects.object import Object
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition
class OpenDoor(Task):
def init_task(self) -> None:
self.door_main = Shape('door_main')
self.door_main.set_dynamic(False)
door_joint = Joint('door_frame_joint')
handle_joint = Joint('door_handle_joint')
self.register_success_conditions(
[JointCondition(door_joint, np.deg2rad(25))])
self.door_unlock_cond = JointCondition(
handle_joint, np.deg2rad(25))
def init_episode(self, index: int) -> List[str]:
self.door_unlocked = False
return ['open the door',
'grip the handle and slide the door open',
'use the handle to move the door open']
def variation_count(self) -> int:
return 1
def step(self) -> None:
if not self.door_unlocked:
self.door_unlocked = self.door_unlock_cond.condition_met()[0]
if self.door_unlocked:
self.door_main.set_dynamic(True)
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0, 0, -3.14 / 4.], [0, 0, 3.14 / 4.]
def boundary_root(self) -> Object:
return Shape('boundary_root')
| StarcoderdataPython |
154887 | <reponame>saurabsa/azure-cli-old
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
import mock
from azure.cli.core._util import CLIError
from azure.cli.command_modules.vm._validators import (validate_ssh_key,
_is_valid_ssh_rsa_public_key,
_figure_out_storage_source,
_validate_admin_username,
_validate_admin_password)
class TestActions(unittest.TestCase):
def test_generate_specfied_ssh_key_files(self):
_, private_key_file = tempfile.mkstemp()
public_key_file = private_key_file + '.pub'
args = mock.MagicMock()
args.ssh_key_value = public_key_file
args.generate_ssh_keys = True
# 1 verify we generate key files if not existing
validate_ssh_key(args)
generated_public_key_string = args.ssh_key_value
self.assertTrue(bool(args.ssh_key_value))
self.assertTrue(_is_valid_ssh_rsa_public_key(generated_public_key_string))
self.assertTrue(os.path.isfile(private_key_file))
# 2 verify we load existing key files
# for convinience we will reuse the generated file in the previous step
args2 = mock.MagicMock()
args2.ssh_key_value = generated_public_key_string
args2.generate_ssh_keys = False
validate_ssh_key(args2)
# we didn't regenerate
self.assertEqual(generated_public_key_string, args.ssh_key_value)
# 3 verify we do not generate unless told so
_, private_key_file2 = tempfile.mkstemp()
public_key_file2 = private_key_file2 + '.pub'
args3 = mock.MagicMock()
args3.ssh_key_value = public_key_file2
args3.generate_ssh_keys = False
with self.assertRaises(CLIError):
validate_ssh_key(args3)
# 4 verify file naming if the pub file doesn't end with .pub
_, public_key_file4 = tempfile.mkstemp()
public_key_file4 += '1' # make it nonexisting
args4 = mock.MagicMock()
args4.ssh_key_value = public_key_file4
args4.generate_ssh_keys = True
validate_ssh_key(args4)
self.assertTrue(os.path.isfile(public_key_file4 + '.private'))
self.assertTrue(os.path.isfile(public_key_file4))
def test_figure_out_storage_source(self):
test_data = 'https://av123images.blob.core.windows.net/images/TDAZBET.vhd'
src_blob_uri, src_disk, src_snapshot = _figure_out_storage_source('tg1', test_data)
self.assertFalse(src_disk)
self.assertFalse(src_snapshot)
self.assertEqual(src_blob_uri, test_data)
def test_validate_admin_username_linux(self):
# pylint: disable=line-too-long
err_invalid_char = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -'
self._verify_username_with_ex('!@#', 'linux', err_invalid_char)
self._verify_username_with_ex('dav[', 'linux', err_invalid_char)
self._verify_username_with_ex('Adavid', 'linux', err_invalid_char)
self._verify_username_with_ex('-ddavid', 'linux', err_invalid_char)
self._verify_username_with_ex('', 'linux', 'admin user name can not be empty')
self._verify_username_with_ex('david', 'linux',
"This user name 'david' meets the general requirements, but is specifically disallowed for this image. Please try a different value.")
_validate_admin_username('d-avid1', 'linux')
_validate_admin_username('david1', 'linux')
_validate_admin_username('david1.', 'linux')
def test_validate_admin_username_windows(self):
# pylint: disable=line-too-long
err_invalid_char = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .'
self._verify_username_with_ex('!@#', 'windows', err_invalid_char)
self._verify_username_with_ex('dav[', 'windows', err_invalid_char)
self._verify_username_with_ex('dddivid.', 'windows', err_invalid_char)
self._verify_username_with_ex('john', 'windows',
"This user name 'john' meets the general requirements, but is specifically disallowed for this image. Please try a different value.")
_validate_admin_username('ADAVID', 'windows')
_validate_admin_username('d-avid1', 'windows')
_validate_admin_username('david1', 'windows')
def test_validate_admin_password_linux(self):
# pylint: disable=line-too-long
err_length = 'The pssword length must be between 12 and 72'
err_variety = 'Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character'
self._verify_password_with_ex('te', 'linux', err_length)
self._verify_password_with_ex('P12' + '3' * 70, 'linux', err_length)
self._verify_password_with_ex('<PASSWORD>', 'linux', err_variety)
_validate_admin_password('<PASSWORD>', 'linux')
_validate_admin_password('<PASSWORD>!@#', 'linux')
def test_validate_admin_password_windows(self):
# pylint: disable=line-too-long
err_length = 'The pssword length must be between 12 and 123'
err_variety = 'Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character'
self._verify_password_with_ex('P1', 'windows', err_length)
self._verify_password_with_ex('<PASSWORD>' + '3' * 120, 'windows', err_length)
self._verify_password_with_ex('<PASSWORD>', 'windows', err_variety)
_validate_admin_password('<PASSWORD>!!!', 'windows')
_validate_admin_password('<PASSWORD>' + '1' * 70, 'windows')
def _verify_username_with_ex(self, admin_username, is_linux, expected_err):
with self.assertRaises(CLIError) as context:
_validate_admin_username(admin_username, is_linux)
self.assertTrue(expected_err in str(context.exception))
def _verify_password_with_ex(self, admin_password, is_linux, expected_err):
with self.assertRaises(CLIError) as context:
_validate_admin_password(admin_password, is_linux)
self.assertTrue(expected_err in str(context.exception))
| StarcoderdataPython |
192541 | <filename>beagle/header/mpu_accel_fsr_t.py<gh_stars>0
ACCEL_FSR_2G = 0
ACCEL_FSR_4G = 1
ACCEL_FSR_8G = 2
ACCEL_FSR_16G = 3
| StarcoderdataPython |
3272806 | <filename>python/testData/completion/tupleParameterNamesNotSuggested.py
def func((foo, bar), baz):
pass
func(b<caret>) | StarcoderdataPython |
3267946 | <reponame>TejasAvinashShetty/krotov<gh_stars>0
r"""Routines for `check_convergence` in :func:`krotov.optimize.optimize_pulses`
A `check_convergence` function may be used to determine whether an optimization
is converged, and thus can be stopped before the maximum number of
iterations (`iter_stop`) is reached. A function suitable for
`check_convergence` must receive a :class:`.Result` object, and return a value
that evaluates as True or False in a Boolean context, indicating whether the
optimization has converged or not.
The :class:`.Result` object that the `check_convergence` function receives as
an argument will be up-to-date for the current iteration. That is, it will
already contain the current values from :func:`.optimize_pulses`'s `info_hook`
in :attr:`.Result.info_vals`, the current :attr:`~.Result.tau_vals`, etc. The
:attr:`.Result.optimized_controls` attribute will contain the current optimized
pulses (defined on the intervals of :attr:`~.Result.tlist`). The
`check_convergence` function should not modify the :class:`.Result` object it
receives in any way. The proper place for custom modifications after each
iteration in :func:`.optimize_pulses` is through the `info_hook` routine.
It is recommended that a `check_convergence` function returns None (which is
False in a Boolean context) if the optimization has not yet converged. If the
optimization has converged, `check_convergence` should return a message string
(which is True in a Boolean context). The returned string will be included in
the final :attr:`.Result.message`.
A typical usage for `check_convergence` is ending the optimization when the
optimization functional falls below a specified limit. Such a
`check_convergence` function can be generated by :func:`value_below`.
By default, this assumes that the `info_hook` passed to
:func:`.optimize_pulses` returns the value of the functional,
which is then stored in :attr:`.Result.info_vals`. Alternatively,
:func:`value_below` could be told to look at the :attr:`.Result.tau_vals`.
Similarly, one might stop the optimization when there is an insufficient
improvement between iterations. The :func:`delta_below` function generates a
`check_convergence` function for this purpose. Multiple convergence conditions
("stop optimization when :math:`J_T` reaches :math:`10^{-5}`, or if
:math:`\Delta J_T < 10^{-6}`") can be defined via :func:`Or`.
While Krotov's method is guaranteed to monotonically converge in the continuous
limit, this no longer strictly holds when time is discretized (in particular if
:attr:`~.PulseOptions.lambda_a` is too small). You can use
:func:`check_monotonic_error` or :func:`check_monotonic_fidelity` as a
`check_convergence` function that stops the optimization when monotonic
convergence is lost.
"""
from operator import xor
import glom
__all__ = [
'Or',
'value_below',
'delta_below',
'check_monotonic_error',
'check_monotonic_fidelity',
]
def Or(*funcs):
"""Chain multiple `check_convergence` functions together in a logical Or.
Each parameter must be a function suitable to pass to
:func:`~krotov.optimize.optimize_pulses` as `check_convergence`. It
must receive a :class:`.Result` object and should return None or a string
message.
Returns:
callable: A function ``check_convergence(result)`` that returns the
result of the first "non-passing" function in `*funcs`. A "non-passing"
result is one that evaluates to True in a Boolean context (should be a
string message)
"""
def check_convergence(result):
for func in funcs:
msg = func(result)
if bool(msg) is True:
return msg
return None
return check_convergence
def value_below(limit, spec=('info_vals', glom.T[-1]), name=None, **kwargs):
"""Constructor for routine that checks if a value is below `limit`
Args:
limit (float or str): A float value (or str-representation of a float)
against which to compare the value extracted from :class:`.Result`
spec: A :func:`~glom.glom` specification of the :class:`.Result`
attribute from which to extract the value to compare against
`limit`. Defaults to a spec extracting the last value in
:attr:`.Result.info_vals`.
name (str or None): A name identifying the checked value, used for the
message returned by the `check_convergence` routine. Defaults to
``str(spec)``.
**kwargs: Keyword arguments to pass to :func:`~glom.glom`
Returns:
callable: A function ``check_convergence(result)`` that extracts the
value specified by `spec` from the :class:`.Result` object, and checks
it against `limit`. If the value is below the `limit`, it returns an
appropriate message string. Otherwise, it returns None.
Note:
If you find :func:`~glom.glom`-specifications intimidating: A simple
callable acting on :class:`.Result` is also a valid `spec`. However, if
you use a callable `spec`, make sure to pass an appropriate `name`.
The `spec` in the following example is equivalent to the default
``spec=('info_vals', glom.T[-1])``.
Example:
>>> check_convergence = value_below(
... limit='1e-4',
... spec=lambda r: r.info_vals[-1],
... name='J_T'
... )
>>> r = krotov.result.Result()
>>> r.info_vals.append(1e-4)
>>> check_convergence(r) # returns None
>>> r.info_vals.append(9e-5)
>>> check_convergence(r)
'J_T < 1e-4'
"""
if name is None:
name = str(spec)
def check_convergence(result):
v = glom.glom(result, spec, **kwargs)
if v < float(limit):
return "%s < %s" % (name, limit)
else:
return None
return check_convergence
def delta_below(
limit,
spec1=('info_vals', glom.T[-1]),
spec0=('info_vals', glom.T[-2]),
absolute_value=True,
name=None,
**kwargs
):
r"""Constructor for a routine that checks if
$\Abs{v_1 - v_0} < \varepsilon$
Args:
limit (float or str): A float value (or str-representation of a float)
for $\varepsilon$
spec1: A :func:`~glom.glom` specification of the :class:`.Result`
attribute from which to extract $v_1$. Defaults to a spec
extracting the last value in :attr:`.Result.info_vals`.
spec0: A :func:`~glom.glom` specification of the :class:`.Result`
attribute from which to extract $v_0$. Defaults to a spec
extracting the last-but-one value in :attr:`.Result.info_vals`.
absolute_value (bool): If False, check for $v_1 - v_0 < \varepsilon$,
instead of the absolute value.
name (str or None): A name identifying the delta, used for the
message returned by the `check_convergence` routine. Defaults to
``"Δ({spec1},{spec0}"``.
**kwargs: Keyword arguments to pass to :func:`~glom.glom`
Note:
You can use :func:`delta_below` to implement a check for strict
monotonic convergence, e.g. when `info_hook` returns the optimization
error, by flipping `spec0` and `spec1`, setting `limit` to zero, and
setting `absolute_value` to False. See :func:`check_monotonic_error`.
Example:
>>> check_convergence = delta_below(limit='1e-4', name='ΔJ_T')
>>> r = krotov.result.Result()
>>> r.info_vals.append(9e-1)
>>> check_convergence(r) # None
>>> r.info_vals.append(1e-1)
>>> check_convergence(r) # None
>>> r.info_vals.append(4e-4)
>>> check_convergence(r) # None
>>> r.info_vals.append(2e-4)
>>> check_convergence(r) # None
>>> r.info_vals.append(1e-6)
>>> check_convergence(r) # None
>>> r.info_vals.append(1e-7)
>>> check_convergence(r)
'ΔJ_T < 1e-4'
"""
if name is None:
name = "Δ(%s,%s)" % (spec1, spec0)
def check_convergence(result):
delayed_exc = None
try:
v1 = glom.glom(result, spec1, **kwargs)
except (AttributeError, KeyError, IndexError, glom.GlomError) as exc:
v1 = None
delayed_exc = exc
try:
v0 = glom.glom(result, spec0, **kwargs)
except (AttributeError, KeyError, IndexError, glom.GlomError) as exc:
v0 = None
delayed_exc = exc
if xor((v1 is None), (v0 is None)):
# After the first iteration, there may not be enough data to get
# *both* v1 and v0. In this case, we just pass the check...
return None
else:
# ... However, if we can access neither v1 nor v0, then something
# is definitely wrong, and we should re-raise the original
# exception
if delayed_exc is not None:
raise delayed_exc
delta = v1 - v0
if absolute_value:
delta = abs(delta)
if delta < float(limit):
return "%s < %s" % (name, limit)
else:
return None
return check_convergence
_monotonic_convergence = delta_below(
limit=0,
spec1=('info_vals', glom.T[-2]),
spec0=('info_vals', glom.T[-1]),
absolute_value=False,
name="Loss of monotonic convergence; error decrease",
)
_monotonic_fidelity = delta_below(
limit=0,
spec1=('info_vals', glom.T[-1]),
spec0=('info_vals', glom.T[-2]),
absolute_value=False,
name="Loss of monotonic convergence; fidelity increase",
)
def check_monotonic_error(result):
"""Check for monotonic convergence with respect to the error
Check that the last value in :attr:`.Result.info_vals` is
smaller than the last-but-one value. If yes, return None. If no, return an
appropriate error message.
This assumes that the `info_hook` passed to :func:`.optimize_pulses`
returns the value of the functional, which is then available in
:attr:`.Result.info_vals`.
Example:
>>> r = krotov.result.Result()
>>> r.info_vals.append(9e-1)
>>> check_monotonic_error(r) # None
>>> r.info_vals.append(1e-1)
>>> check_monotonic_error(r) # None
>>> r.info_vals.append(2e-1)
>>> check_monotonic_error(r)
'Loss of monotonic convergence; error decrease < 0'
See also:
Use :func:`check_monotonic_fidelity` for when `info_hook` returns a
"fidelity", that is, a measure that should *increase* in each
iteration.
"""
# This is a wrapper for `_monotonic_convergence` just so that we can have
# `check_monotonic_convergence` with a nice docstring.
return _monotonic_convergence(result)
def check_monotonic_fidelity(result):
"""Check for monotonic convergence with respect to the fidelity
This is like :func:`check_monotonic_error`, but looking for a monotonic
*increase* in the values in :attr:`.Result.info_vals`. Thus, it is assumed
that the `info_hook` returns a fidelity, not an error.
Example:
>>> r = krotov.result.Result()
>>> r.info_vals.append(0.0)
>>> check_monotonic_fidelity(r) # None
>>> r.info_vals.append(0.2)
>>> check_monotonic_fidelity(r) # None
>>> r.info_vals.append(0.15)
>>> check_monotonic_fidelity(r)
'Loss of monotonic convergence; fidelity increase < 0'
"""
return _monotonic_fidelity(result)
| StarcoderdataPython |
1614332 | import time
from typing import Any, List, Union
from urllib.parse import urlparse
import pymemcache
from freiner.errors import FreinerConfigurationError
from freiner.types import Host
MemcachedClient = Union[pymemcache.Client, pymemcache.PooledClient, pymemcache.HashClient]
class MemcachedStorage:
"""
Rate limit storage with memcached as backend.
Depends on the `pymemcache` library.
"""
MAX_CAS_RETRIES = 10
def __init__(self, client: MemcachedClient) -> None:
self._client: MemcachedClient = client
@classmethod
def from_uri(cls, uri: str, **options: Any) -> "MemcachedStorage":
"""
:param uri: URI of the form `memcached://host:port,host:port`or `memcached:///run/path/to/sock`.
:param options: All remaining keyword arguments are passed directly to the constructor
of :class:`pymemcache.client.base.Client`.
:raises FreinerConfigurationError: When no hosts could be parsed from the supplied URI.
"""
parsed_uri = urlparse(uri)
hosts: List[Union[Host, str]] = []
for loc in parsed_uri.netloc.strip().split(","):
if not loc:
continue
host, port = loc.split(":")
hosts.append((host, int(port)))
else:
# filesystem path to UDS
if parsed_uri.path and not parsed_uri.netloc and not parsed_uri.port:
hosts = [parsed_uri.path]
if not hosts:
raise FreinerConfigurationError(f"No Memcached hosts parsed from URI: {uri}")
if len(hosts) > 1:
client = pymemcache.HashClient(hosts, **options)
else:
client = pymemcache.Client(*hosts, **options)
return cls(client)
def get(self, key: str) -> int:
"""
Retrieve the current request count for the given rate limit key.
:param key: The key to get the counter value for.
"""
return int(self._client.get(key) or 0)
def clear(self, key: str) -> None:
"""
Resets the rate limit for the given key.
:param key: The key to clear rate limits for.
"""
self._client.delete(key)
def incr(self, key: str, expiry: int, elastic_expiry: bool = False) -> int:
"""
Increments the counter for the given rate limit key.
:param key: The key to increment.
:param expiry: Amount in seconds for the key to expire in.
:param elastic_expiry: Whether to keep extending the rate limit window every hit.
:return: The number of hits currently on the rate limit for the given key.
"""
if self._client.add(key, 1, expiry, noreply=False):
self._set_expiry(key, expiry)
return 1
if not elastic_expiry:
return self._client.incr(key, 1) or 1
# TODO: There is a timing issue here.
# This code makes the assumption that because client.add() failed, the key must exist.
# That isn't necessarily true. It can expire between us calling client.add() and us
# calling client.gets(). If that happens, 'cas' will be None. If we pass cas=None to
# client.cas(), it gets very unhappy.
# This issue shows up occasionally in the test suite, both locally and on Github Actions.
# If it shows up in testing, it absolutely will show up in the real world.
# I believe the solution will be to "restart" the logic flow if 'cas is None'. However,
# that will require rewriting the method so that that can be achieved without recursion,
# and without the code looking like a nightmare.
value, cas = self._client.gets(key)
retry = 0
while (
not self._client.cas(key, int(value or 0) + 1, cas, expiry)
and retry < self.MAX_CAS_RETRIES
):
value, cas = self._client.gets(key)
retry += 1
self._set_expiry(key, expiry)
return int(value or 0) + 1
def _set_expiry(self, key: str, expiry: int):
self._client.set(key + "/expires", expiry + time.time(), expire=expiry, noreply=False)
def get_expiry(self, key: str) -> float:
"""
Retrieve the expected expiry time for the given rate limit key.
:param key: The key to get the expiry time for.
:return: The time at which the current rate limit for the given key ends.
"""
return float(self._client.get(key + "/expires") or time.time())
def check(self) -> bool:
"""
Check if the connection to the storage backend is healthy.
"""
try:
self._client.get("freiner-check")
return True
except: # noqa
return False
__all__ = [
"MemcachedClient",
"MemcachedStorage",
]
| StarcoderdataPython |
3366115 | """
ID: fufa0001
LANG: PYTHON3
TASK: ride
"""
import string
fin = open ('ride.in', 'r')
fout = open ('ride.out', 'w')
ufo,group = fin.read().splitlines()
ufo = ufo.strip()
group = group.strip()
ufo_num = 1
for char in ufo:
#print(f"{char}: {string.ascii_uppercase.index(char)}")
ufo_num *= string.ascii_uppercase.index(char)+1
group_num = 1
for char in group:
#print(f"{char}: {string.ascii_uppercase.index(char)}")
group_num *= string.ascii_uppercase.index(char)+1
if group_num % 47 == ufo_num % 47:
#print("GO")
fout.write("GO\n")
else:
fout.write("STAY\n")
fout.close()
| StarcoderdataPython |
3203174 | <filename>caiotte/items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScholarItem(scrapy.Item):
username = scrapy.Field()
name = scrapy.Field()
email = scrapy.Field()
title = scrapy.Field()
org = scrapy.Field()
biography = scrapy.Field()
study_field = scrapy.Field()
page_url = scrapy.Field()
img_url = scrapy.Field()
class FriendShip(scrapy.Item):
first_user = scrapy.Field()
second_user = scrapy.Field()
class ScholarPaper(scrapy.Item):
scholar = scrapy.Field()
papers = scrapy.Field()
| StarcoderdataPython |
130956 | from typing import Union, Tuple
from duration import Duration
class Transition:
# Initialization and instance variables
def __init__(self, source: str, destination: str, sgate: str = None, dgate: str = None, distribution: Union[dict, int] = 0) -> None:
self.source = source
self.source_gate = sgate
self.destination = destination
self.destination_gate = dgate
self.delay = Duration(distribution)
# Public methods
def get_next(self) -> Tuple[str, str, int]:
return self.destination, self.destination_gate, self.delay.generate()
# Private methods
def __repr__(self):
return ', '.join("%s: %s" % item for item in vars(self).items())
| StarcoderdataPython |
39256 | <gh_stars>100-1000
import math
import numpy as np
from keras import backend as K
from keras.layers import Conv2D, Concatenate, Activation, Add
from keras.engine import InputSpec
def logsoftmax(x):
''' Numerically stable log(softmax(x)) '''
m = K.max(x, axis=-1, keepdims=True)
return x - m - K.log(K.sum(K.exp(x - m), axis=-1, keepdims=True))
def pixelcnn_loss(target, output, img_rows, img_cols, img_chns, n_components):
''' Keras PixelCNN loss function. Use a lambda to fill in the last few
parameters
Args:
img_rows, img_cols, img_chns: image dimensions
n_components: number of mixture components
Returns:
log-loss
'''
assert img_chns == 3
# Extract out each of the mixture parameters (multiple of 3 b/c of image channels)
output_m = output[:, :, :, :3*n_components]
output_invs = output[:, :, :, 3*n_components:6*n_components]
output_logit_weights = output[:, :, :, 6*(n_components):]
# Repeat the target to match the number of mixture component shapes
x = K.reshape(target, (-1, img_rows, img_cols, img_chns))
slices = []
for c in range(img_chns):
slices += [x[:, :, :, c:c+1]] * n_components
x = K.concatenate(slices, axis=-1)
x_decoded_m = output_m
x_decoded_invs = output_invs
x_logit_weights = output_logit_weights
# Pixels rescaled to be in [-1, 1] interval
offset = 1. / 127.5 / 2.
centered_mean = x - x_decoded_m
cdfminus_arg = (centered_mean - offset) * K.exp(x_decoded_invs)
cdfplus_arg = (centered_mean + offset) * K.exp(x_decoded_invs)
cdfminus_safe = K.sigmoid(cdfminus_arg)
cdfplus_safe = K.sigmoid(cdfplus_arg)
# Generate the PDF (logistic) in case the `m` is way off (cdf is too small)
# pdf = e^(-(x-m)/s) / {s(1 + e^{-(x-m)/s})^2}
# logpdf = -(x-m)/s - log s - 2 * log(1 + e^(-(x-m)/s))
# = -mid_in - invs - 2 * softplus(-mid_in)
mid_in = centered_mean * K.exp(x_decoded_invs)
log_pdf_mid = -mid_in - x_decoded_invs - 2. * K.tf.nn.softplus(-mid_in)
# Use trick from PixelCNN++ implementation to protect against edge/overflow cases
# In extreme cases (cdfplus_safe - cdf_minus_safe < 1e-5), use the
# log_pdf_mid and assume that density is 1 pixel width wide (1/127.5) as
# the density: log(pdf * 1/127.5) = log(pdf) - log(127.5)
# Add on line of best fit (see notebooks/blog post) to the difference between
# edge case and the standard case
edge_case = log_pdf_mid - np.log(127.5) + 2.04 * x_decoded_invs - 0.107
# ln (sigmoid(x)) = x - ln(e^x + 1) = x - softplus(x)
# ln (1 - sigmoid(x)) = ln(1 / (1 + e^x)) = -softplus(x)
log_cdfplus = cdfplus_arg - K.tf.nn.softplus(cdfplus_arg)
log_1minus_cdf = -K.tf.nn.softplus(cdfminus_arg)
log_ll = K.tf.where(x <= -0.999, log_cdfplus,
K.tf.where(x >= 0.999, log_1minus_cdf,
K.tf.where(cdfplus_safe - cdfminus_safe > 1e-5,
K.log(K.maximum(cdfplus_safe - cdfminus_safe, 1e-12)),
edge_case)))
# x_weights * [sigma(x+0.5...) - sigma(x-0.5 ...) ]
# = log x_weights + log (...)
# Compute log(softmax(.)) directly here, instead of doing 2-step to avoid overflow
pre_result = logsoftmax(x_logit_weights) + log_ll
result = []
for chn in range(img_chns):
chn_result = pre_result[:, :, :, chn*n_components:(chn+1)*n_components]
v = K.logsumexp(chn_result, axis=-1)
result.append(v)
result = K.batch_flatten(K.stack(result, axis=-1))
return -K.sum(result, axis=-1)
def sigmoid(x):
# Protect overflow
if x < -20:
return 0.0
elif x > 20:
return 1.0
return 1 / (1 + math.exp(-x))
def logistic_cdf(x, loc, scale):
return sigmoid((x - loc) / scale)
def compute_pvals(m, invs):
pvals = []
for i in range(256):
if i == 0:
pval = logistic_cdf((0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
elif i == 255:
pval = 1. - logistic_cdf((254.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
else:
pval = (logistic_cdf((i + 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
- logistic_cdf((i - 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs)))
pvals.append(pval)
return pvals
def compute_mixture(ms, invs, weights, n_comps):
components = []
for i in range(n_comps):
pvals = compute_pvals(ms[i], invs[i])
arr = np.array(pvals)
components.append(weights[i] * arr)
return np.sum(components, axis=0)
class PixelConv2D(Conv2D):
def __init__(self, ptype, *args, **kwargs):
# ptype corresponds to pixel type and mask type, e.g. ra, ga, ba, rb, gb, bb
assert ptype[0] in ['r', 'g', 'b'], ptype
assert ptype[1] in ['a', 'b'], ptype
self.ptype = ptype
super(PixelConv2D, self).__init__(*args, **kwargs)
def build_mask(self, kernel_shape):
# kernel_shape = kern_dim x kern_dim x total_filters
# = kern_dim x kern_dim x r_g_b_filters x filters_per_channel
assert kernel_shape[0] == kernel_shape[1], \
"{} must be equal in first two dims".format(kernel_shape)
assert kernel_shape[0] % 2 == 1, \
"{} must be odd size in first two dims".format(kernel_shape)
assert kernel_shape[2] % 3 == 0, \
"{} must be divisible by 3".format(kernel_shape)
data = np.ones(kernel_shape)
data.shape
mid = data.shape[0] // 2
if self.ptype[0] == 'r':
filt_prev = 0
filt_thres = int(data.shape[2] / 3)
elif self.ptype[0] == 'g':
filt_prev = int(data.shape[2] / 3)
filt_thres = int(2 * data.shape[2] / 3)
else:
assert self.ptype[0] == 'b', self.ptype
filt_prev = int(2 * data.shape[2] / 3)
filt_thres = data.shape[2]
for k1 in range(data.shape[0]):
for k2 in range(data.shape[1]):
for chan in range(data.shape[2]):
if (self.ptype[1] == 'a'
and filt_prev <= chan < filt_thres
and k1 == mid and k2 == mid):
# Handle the only difference between 'a' and 'b' ptypes
data[k1, k2, chan, :] = 0
elif k1 > mid or (k1 >= mid and k2 > mid) or chan >= filt_thres:
# Turn off anything:
# a) Below currrent pixel
# b) Past the current pixel (scanning left from right, up to down)
# c) In a later filter
data[k1, k2, chan, :] = 0
return K.constant(np.ravel(data), dtype='float32', shape=kernel_shape)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel_mask = self.build_mask(kernel_shape)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
masked_kernel = self.kernel * self.kernel_mask
outputs = K.conv2d(
inputs,
masked_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def conv_block(input_tensor, filters, kernel_size, name, is_first=False):
outs = []
for t in ['rb', 'gb', 'bb']:
if is_first:
t = t[0] + 'a'
x = PixelConv2D(t, filters, kernel_size,
name='res' + name + t, padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
def resnet_block(input_tensor, filters, stage, block, kernel=3):
name_base = str(stage) + block + '_branch'
filters1, filters2, filters3 = filters
x = input_tensor
x = conv_block(x, filters1, (1, 1), name=name_base + '_a-1x1')
x = conv_block(x, filters2, (kernel, kernel),
name=name_base + '_b-{}x{}'.format(kernel, kernel))
x = conv_block(x, filters3, (1, 1), name=name_base + '_c-1x1')
x = Add()([x, input_tensor])
return x
def final_block(input_tensor, filters, in_filters, name, kernel_size=(1, 1)):
outs = []
for t in ['rb', 'gb', 'bb']:
x = PixelConv2D(t, filters, kernel_size,
name='final' + name + '_' + t,
padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
| StarcoderdataPython |
3362673 | <reponame>ramyasaimullapudi/WolfTrackPlus
class User:
"""
This class is a controller for the user database. It inherits properties from flask_restful.Resource
"""
def get(self, email, password):
"""
gets defails of the specific user
:param email: email of the user
:param password: <PASSWORD> user
:return: returns the details of the user
"""
def get_auth_user_dao(self, email):
"""
Checks if the user is passing the correct authentication request
:param email: email of the user
:return: checks if the user is authenticated
"""
def post(self, name, email, password, gender, location):
"""
Create a new user with the given details
:param name: name of the user
:param email: email or the user
:param password: <PASSWORD>
:param gender: gender of the user it can be male or female
:param location: location string of the user
:return: returns the new user object that is created
"""
def put(self):
"""
Modifies the user details
:return: returns a modified user details
"""
def delete(self):
"""
Delete the user From the database
:return: returns the deleted user details from the database
"""
def edit_profile(self, user_id, name, gender, location):
"""
Changing the user details in the database
:param user_id: user ID of the user
:param name: Modified name of the user
:param gender: modified gender of the user
:param location: modified location of the user
:return: returns the new details of the user
"""
| StarcoderdataPython |
3358352 | <reponame>dmoiseenko/kon-config<gh_stars>0
import yaml
def create_yaml_file_by_dictionary(file_path, dictionary):
with open(file_path, "w") as outfile:
yaml.dump(dictionary, outfile, default_flow_style=False)
def read_yaml_file_to_dictionary(file_path):
with open(file_path) as file:
y = yaml.safe_load(file)
return y
def update_yaml_file(file_path, field_path, new_value):
with open(file_path) as f:
y = yaml.safe_load(f)
obj = y
key_list = field_path.split(".")
for k in key_list[:-1]:
obj = obj[k]
obj[key_list[-1]] = new_value
with open(file_path, "w") as f:
f.write(yaml.dump(y, default_flow_style=False))
def get_yaml_file_field_value(file_path, field_path):
with open(file_path) as f:
y = yaml.safe_load(f)
obj = y
key_list = field_path.split(".")
for k in key_list[:-1]:
obj = obj[k]
return obj[key_list[-1]]
| StarcoderdataPython |
19498 | <gh_stars>1-10
from datetime import datetime
from kivy.app import App
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.button import Button
from electrum.gui.kivy.i18n import _
from electrum.bitcoin import Token
from electrum.util import parse_token_URI, InvalidTokenURI
from .choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum.gui.kivy.i18n._
<AddTokenDialog>
id: popup
title: _('Add Token')
contract_addr: ''
BoxLayout:
orientation: 'vertical'
BoxLabel:
text: _('Contract Address')
SendReceiveBlueBottom:
size_hint: 1, None
height: self.minimum_height
BlueButton:
text: popup.contract_addr
shorten: True
on_release: Clock.schedule_once(lambda dt: app.show_info(_('Copy and paste the contract address using the Paste button, or use the camera to scan a QR code.')))
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
text: _('Paste')
on_release: popup.do_paste()
IconButton:
id: qr
size_hint: 0.6, 1
on_release: Clock.schedule_once(lambda dt: app.scan_qr(on_complete=popup.on_qr))
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
AddTokenItem:
my_addr: app.wallet.get_addresses_sort_by_balance()[0]
title: _('My Address:')
description: str(self.my_addr)
action: partial(root.address_select_dialog, self)
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.add_token()
popup.dismiss()
''')
class AddTokenDialog(Factory.Popup):
def __init__(self, app):
Factory.Popup.__init__(self)
self.app = app
self.wallet = self.app.wallet
self.addresses = self.wallet.get_addresses_sort_by_balance()
self.my_address = self.wallet.get_addresses_sort_by_balance()[0]
self._address_select_dialog = None
self.contract_addr = ''
def address_select_dialog(self, item, dt):
shorten_addresses = []
for address in self.addresses:
shorten_address = ''
shorten_address = address[0:7] + '.....' + address[-7:]
shorten_addresses.append(shorten_address)
address_number = self.addresses.index(self.my_address)
if self._address_select_dialog is None:
def cb(addr):
return_number = shorten_addresses.index(addr)
my_address = self.addresses[return_number]
item.my_addr = my_address
self.my_address = my_address
self._address_select_dialog = ChoiceDialog(_('My Address'), shorten_addresses, shorten_addresses[address_number], cb)
self._address_select_dialog.open()
def add_token(self):
contract_addr = self.contract_addr
bind_addr = self.my_address
if contract_addr == '':
self.app.show_info(_("Contract Address is empty"))
return
try:
r = self.app.network.run_from_another_thread(self.app.network.get_token_info(contract_addr))
name = r.get('name')
decimals = r.get('decimals')
symbol = r.get('symbol')
if not name or not symbol or not isinstance(decimals, int) or decimals is None:
self.app.show_info(_("token info not valid: {} {} {}").format(name, symbol, decimals))
return
token = Token(contract_addr, bind_addr, name, symbol, decimals, 0)
self.app.set_token(token)
except BaseException as e:
import traceback, sys
traceback.print_exc(file=sys.stderr)
self.app.show_info(e)
def search_token(self, contract_addr):
try:
token_data = self.app.network.run_from_another_thread(self.app.network.get_token_info(contract_addr))
except:
try:
token_data = self.app.network.run_from_another_thread(self.app.network.get_token_info(contract_addr))
except:
token_data = None
if token_data:
return True
return False
def do_paste(self):
from electrum.bitcoin import base_decode, is_address
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
if is_address(data) or data.startswith('vipstarcoin:'):
self.app.show_info(_("QR data is bitcoin URI."))
return
self.set_URI(data)
def set_URI(self, text):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_token_URI(text)
except InvalidTokenURI as e:
self.app.show_error(_("Error parsing URI") + f":\n{e}")
return
address = uri.get('contract_addr', '')
if not self.search_token(address):
self.app.show_error(_("token not found"))
self.contract_addr = ''
return
self.contract_addr = address
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data) or data.startswith('vipstarcoin:'):
self.app.show_info(_("QR data is bitcoin URI."))
return
if self.search_token(data) or data.startswith('vipstoken:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.app.show_info(_("QR data is transaction."))
return
# show error
self.app.show_error(_("Unable to decode QR data"))
| StarcoderdataPython |
109523 | <reponame>monkeyman79/dfsimage
"""This module contains MMBEntry class."""
from typing import Protocol, IO, Union
from .consts import MMB_INDEX_ENTRY_SIZE
from .consts import MMB_STATUS_OFFSET, MMB_STATUS_LOCKED, MMB_STATUS_UNLOCKED
from .consts import MMB_STATUS_UNINITIALIZED, MMB_STATUS_UNINITIALIZED_MASK
from .enums import OpenMode, WarnMode
from .misc import MMBWarning
from .simplewarn import warn
from .conv import bbc_to_unicode, unicode_to_bbc
class MMBFileProtocol(Protocol):
"""Protocol for MMBFile class."""
filename: str
count: int
# pylint: disable=missing-function-docstring
def is_entry_modified(self, index: int) -> bool:
...
def set_entry_modified(self, index: int, value: bool):
...
def incref(self) -> IO[bytes]:
...
def close(self, save: bool = True):
...
def open_entry(self, entry: Union[int, 'MMBEntry'], open_mode: OpenMode = None,
warn_mode: WarnMode = None, catalog_only=False):
...
@property
def is_read_only(self) -> bool:
...
# pylint: enable=missing-function-docstring
class MMBEntry:
"""Represents entry in the **MMB** file catalog."""
def __init__(self, index: int, dataview: memoryview = None,
owner: MMBFileProtocol = None):
self._modified = False
#: int: Image index
self.index = index
self._offset = (index + 1) * MMB_INDEX_ENTRY_SIZE
self._dataview = (memoryview(bytearray(MMB_INDEX_ENTRY_SIZE))
if dataview is None else dataview)
#: :class:`MMBFile`: The :class:`MMBFile` object.
self.owner = owner
def open(self, open_mode: OpenMode = None, warn_mode: WarnMode = None,
catalog_only=False):
"""Open disk from **MMB** catalog entry.
Args:
open_mode (Optional[OpenMode]): File open mode.
Default is :data:`OpenMode.ALWAYS`.
warn_mode (Optional[WarnMode]):
Warning mode for validation.
catalog_only (bool): Open image only for reading catalog data.
Returns:
An :class:`Image` object
"""
if self.owner is None:
raise ValueError("no 'MMBFile' object")
return self.owner.open_entry(self, open_mode, warn_mode, catalog_only)
@property
def modified(self) -> bool:
"""**MMB** catalog entry modified."""
if self.owner is not None:
return self.owner.is_entry_modified(self.index)
return self._modified
@modified.setter
def modified(self, value: bool):
if self.owner is not None:
self.owner.set_entry_modified(self.index, value)
else:
self._modified = value
@property
def status_byte(self) -> int:
"""Disk status byte in **MMB** catalog, no questions asked.
:meta private:
"""
return self._dataview[MMB_STATUS_OFFSET] # type: ignore
@status_byte.setter
def status_byte(self, value: int):
if self._dataview[MMB_STATUS_OFFSET] != value: # type: ignore
self._modified = True
self._dataview[MMB_STATUS_OFFSET] = value # type: ignore
@property
def locked(self) -> bool:
"""Disk locked flag in the **MMB** catalog."""
return self.status_byte == MMB_STATUS_LOCKED
@locked.setter
def locked(self, value: bool):
if not self.initialized:
raise PermissionError("image is not initialized")
if value:
# Lock image
self.status_byte = MMB_STATUS_LOCKED
else:
# Unlock image
self.status_byte = MMB_STATUS_UNLOCKED
@property
def initialized(self) -> bool:
"""Disk initialized flag in the **MMB** catalog."""
return self.status_byte & MMB_STATUS_UNINITIALIZED_MASK != MMB_STATUS_UNINITIALIZED
@initialized.setter
def initialized(self, value: bool):
if value:
if not self.initialized:
# Activate image
self.status_byte = MMB_STATUS_UNLOCKED
else:
# Deactivate image
if self.locked:
raise PermissionError("image is locked")
self.status_byte = MMB_STATUS_UNINITIALIZED
def dkill(self) -> bool:
"""Set disk status in **MMB** catalog to uninitialized."""
# Deactivate disk in the MMB index
if not self.initialized:
warn(MMBWarning("image already uninitialized"))
return False
self.initialized = False
return True
def drestore(self) -> bool:
"""Set disk status in **MMB** catalog to initialized."""
# Activate disk in the MMB index
if self.initialized:
warn(MMBWarning("image already initialized"))
return False
self.initialized = True
return True
@property
def title(self) -> str:
"""Disk title string in **MMB** catalog."""
vbytes = bytes(self._dataview[0:12])
return bbc_to_unicode(vbytes.decode("ascii").rstrip(chr(0)))
@title.setter
def title(self, value: str) -> None:
if len(value) > 12:
raise ValueError("title too long")
vbytes = unicode_to_bbc(value).ljust(12, chr(0)).encode("ascii")
if vbytes != self._dataview[0:12]:
self.modified = True
self._dataview[0:12] = vbytes # type: ignore
def __str__(self):
"""String representation."""
return "%5i %12s %1s" % (self.index, self.title,
"U" if not self.initialized
else "P" if self.locked
else " ")
def __repr__(self):
"""Textual representation."""
return "MMBEntry %s %s" % (self.owner.filename if self.owner is not None else "",
str(self))
| StarcoderdataPython |
152872 | <gh_stars>0
from .hk_print import HKPrint, HKPrintTheme
print = HKPrint() | StarcoderdataPython |
1674608 | import gym
import tensorflow as tf
import spinup
import numpy as np
#making environment lambda function
env = lambda : gym.make("quadrotor_14d_env:DiffDriveEnv-v0")
#vpg
# spinup.vpg(
# env,
# ac_kwargs={"hidden_sizes":(64,2)},
# seed = np.random.randint(100),
# steps_per_epoch=1250,
# epochs=2500,
# pi_lr=3e-4,
# logger_kwargs = {"output_dir" : "logs/vpgrandomtest"}
# )
#ppo
spinup.ppo(
env,
ac_kwargs={"hidden_sizes":(64,2)},
seed = np.random.randint(100),
steps_per_epoch=1250,
pi_lr=3e-3,
epochs=2500,
logger_kwargs = {"output_dir" : "logs/ppo-diffdrivetest-wtf"}
)
#polynomials
# spinup.vpgpolynomial(
# env,
# ac_kwargs={"order":3},
# seed = np.random.randint(100),
# steps_per_epoch=1250,
# epochs=2500,
# pi_lr=2e-5,
# l1_scaling=0.001,
# logger_kwargs = {"output_dir" : "logs/polyrandomtest"}
# ) | StarcoderdataPython |
1765207 | <filename>test/test_warn_messages.py
#! /usr/bin/python
#-*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import re
import argparse
import datetime
import pybern.products.bernbpe as bpe
stop = datetime.datetime.now(tz=datetime.timezone.utc)
start = stop - datetime.timedelta(days=5)
campaign_dir = '/home/bpe/data/GPSDATA/CAMPAIGN52/GREECE'
wlist = bpe.collect_warning_messages(campaign_dir, '005', start, stop)
print(wlist)
| StarcoderdataPython |
1715704 | <reponame>divyquartic/QuarticSDK<filename>tests/features/tag_list_data_flow/__init__.py
import pandas as pd
import pytest
from unittest import mock
from aloe import step, world
from quartic_sdk import APIClient
from quartic_sdk.core.entities import Tag, Asset
from quartic_sdk.core.entity_helpers.entity_list import EntityList
from quartic_sdk.core.iterators.tag_data_iterator import TagDataIterator
from quartic_sdk.utilities.test_helpers import (
APIHelperCallAPI,
ASSET_LIST_GET,
TAG_LIST_MULTI_GET,
ASSET_DATA_POST,
TAG_LIST_DATA_POST
)
import quartic_sdk.utilities.constants as Constants
@step("we have successfully set up client to test tags data flow")
def step_impl(context):
"""
For the first step we setup the APIClient, and the related tag_list
"""
world.client = APIClient(
"http://test_host",
username="username",
password="password")
world.tag_list = EntityList(Constants.TAG_ENTITY)
@step("we call the required methods to get the tags list data")
def step_impl(context):
"""
Now we call the different internal methods and save their values
internally in the world parameter
"""
with mock.patch('requests.get') as requests_get:
requests_get.return_value = APIHelperCallAPI(ASSET_LIST_GET)
world.client_assets = world.client.assets()
world.first_asset = world.client_assets.first()
with mock.patch('requests.get') as requests_get:
requests_get.return_value = APIHelperCallAPI(TAG_LIST_MULTI_GET)
world.first_asset_tags = world.first_asset.get_tags()
world.tag_list.add(world.first_asset_tags.get("id", 1))
world.tag_list.add(world.first_asset_tags.get("id", 2))
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(TAG_LIST_DATA_POST)
world.tag_list_data_pd = world.tag_list.data(start_time=1, stop_time=2)
world.tag_list_data_json = world.tag_list.data(
start_time=1, stop_time=2, return_type=Constants.RETURN_JSON)
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(TAG_LIST_DATA_POST)
test_transformation1 = [{
"transformation_type": "interpolation",
"column": "1",
"method": "linear"
}, {
"transformation_type": "interpolation",
"column": "2",
"method": "cubic",
"order": 2
}]
world.first_asset_data_with_correct_transformation = world.tag_list.data(
start_time=1, stop_time=2, transformations=test_transformation1)
@step("the return of tag list data works correctly for json and pandas df")
def step_impl(context):
"""
In this step we assert to ensure that the methods call the correct functions
to ensure the correct variable types and the respective data created
"""
with pytest.raises(Exception):
world.tag_list.add(world.first_asset)
assert isinstance(world.tag_list_data_pd, TagDataIterator)
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(
TAG_LIST_DATA_POST.copy())
for tag_data in world.tag_list_data_pd:
assert isinstance(tag_data, pd.DataFrame)
assert isinstance(world.tag_list_data_json, TagDataIterator)
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(
TAG_LIST_DATA_POST.copy())
for tag_data in world.tag_list_data_json:
assert isinstance(tag_data, dict)
assert isinstance(
world.first_asset_data_with_correct_transformation,
TagDataIterator)
with pytest.raises(Exception):
test_transformation2 = [{
"transformation_type": "interpolation",
"method": "linear"
}]
world.tag_data_with_incorrect_transformation = world.tag_list.data(
start_time=1, stop_time=2, transformations=test_transformation2)
with pytest.raises(Exception):
test_transformation3 = [{
"transformation_type": "interpolation",
"column": "1",
"method": "linear"
}, {
"transformation_type": "aggregation",
"aggregation_column": "1"
}]
world.tag_data_with_incorrect_transformation = world.tag_list.data(
start_time=1, stop_time=2, transformations=test_transformation3)
| StarcoderdataPython |
176711 | from sub_capture_tool import SubCaptureTool
import numpy
import cv2
import time
time.sleep(3)
sct = SubCaptureTool()
j = 0
for i in range(60):
time.sleep(1)
for seg in sct.capture():
gray = cv2.cvtColor(seg, cv2.COLOR_RGB2GRAY)
gray, img_bin = cv2.threshold(gray,128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
gray = cv2.bitwise_not(img_bin)
cv2.imwrite(f'imgs/{j}.jpg', gray)
j += 1
cv2.imshow('done', gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
1612141 | from argon2 import PasswordHasher
from argon2.exceptions import VerifyMismatchError
def encrypt_password(password):
return PasswordHasher().hash(password)
def verify_password(password, hash):
try:
return PasswordHasher().verify(hash, password)
except VerifyMismatchError:
return False
| StarcoderdataPython |
4815817 | import os
from models.Player import Player
def get_players(data):
players = []
for playerxml in data.iter('player'):
player = Player(playerxml.attrib)
players.append(player)
return players | StarcoderdataPython |
1741985 | import django
from django.contrib.contenttypes.models import ContentType
from fluent_contents.models import ContentItem
from fluent_contents.tests.utils import AppTestCase
class ModelTests(AppTestCase):
"""
Testing the data model.
"""
def test_stale_model_str(self):
"""
No matter what, the ContentItem.__str__() should work.
This would break the admin delete screen otherwise.
"""
c = ContentType()
if django.VERSION >= (1, 8):
c.save()
a = ContentItem(polymorphic_ctype=c)
self.assertEqual(str(a), "'(type deleted) 0' in 'None None'")
| StarcoderdataPython |
3362776 | <filename>python/packages/pybind_nisar/workflows/runconfig.py<gh_stars>10-100
'''
base class for processing and validating args
'''
import os
import journal
from ruamel.yaml import YAML
import yamale
import numpy as np
import pybind_isce3 as isce
from pybind_nisar.products.readers import SLC
from pybind_nisar.workflows import geogrid
import pybind_nisar.workflows.helpers as helpers
class RunConfig:
def __init__(self, args, workflow_name=''):
# argparse namespace
self.args = args
# workflow name in lower case
self.workflow_name = workflow_name
self.cfg = {}
self.user = {}
def load_yaml_to_dict(self):
"""
Load default runconfig, override with user input, and convert to dict
Leading namespaces can be stripped off down the line
"""
# assign default config and yamale schema
# assume defaults have already been yamale validated
try:
default_cfg = f'{helpers.WORKFLOW_SCRIPTS_DIR}/defaults/{self.workflow_name}.yaml'
schema = yamale.make_schema(f'{helpers.WORKFLOW_SCRIPTS_DIR}/schemas/{self.workflow_name}.yaml',
parser='ruamel')
except:
err_str = f'workflow {self.workflow_name} does not have a schema.'
raise ValueError(err_str)
# set run config type
run_config_is_txt = False
# if newlines then run_config is YAML string (primarily for unit test)
if self.args.run_config_path is not None:
if '\n' in self.args.run_config_path:
run_config_is_txt = True
# validate yaml file taken from command line
try:
if run_config_is_txt:
data = yamale.make_data(content=self.args.run_config_path, parser='ruamel')
else:
data = yamale.make_data(self.args.run_config_path, parser='ruamel')
except yamale.YamaleError as e:
err_str = f'Yamale unable to load {self.workflow_name} runconfig yaml {self.args.run_config_path} for validation.'
raise yamale.YamaleError(err_str) from e
try:
yamale.validate(schema, data)
except yamale.YamaleError as e:
err_str = f'Validation fail for {self.workflow_name} runconfig yaml {self.args.run_config_path}.'
raise yamale.YamaleError(err_str) from e
# load default config
parser = YAML(typ='safe')
with open(default_cfg, 'r') as f:
self.cfg = parser.load(f)
# load user config based on input type
if run_config_is_txt:
self.user = parser.load(self.args.run_config_path)
else:
with open(self.args.run_config_path) as f_yaml:
self.user = parser.load(f_yaml)
# copy user suppiled config into default config
helpers.deep_update(self.cfg, self.user)
def load_geocode_yaml_to_dict(self):
'''
Modify config dict for geocoded related workflows.
'''
self.load_yaml_to_dict()
# remove top 2 levels of dict to reduce boiler plate
self.cfg = self.cfg['runconfig']['groups']
self.user = self.user['runconfig']['groups']
# attempt updating logging destination only if:
# CLI arg for log is True AND valid path provided in yaml
# otherwise indicate restart
if self.args.log_file and 'logging' in self.cfg:
log_path = self.cfg['logging']['path']
helpers.check_log_dir_writable(log_path)
# check logging write mode. default to 'a'/append if no mode specified.
if 'write_mode' in self.cfg['logging']:
write_mode = self.cfg['logging']['write_mode']
else:
write_mode = 'a'
journal.debug.journal.device = "journal.file"
journal.debug.journal.device.log = open(log_path, write_mode)
else:
self.args.restart = True
# remove default frequency(s) if not chosen by user
default_freqs = self.cfg['processing']['input_subset']['list_of_frequencies']
user_freqs = self.user['processing']['input_subset']['list_of_frequencies'].keys()
discards = [freq for freq in default_freqs if freq not in user_freqs]
for discard in discards:
del default_freqs[discard]
def prep_paths(self):
'''
Prepare input and output paths
'''
error_channel = journal.error('RunConfig.load')
# check input file value
input_path = self.cfg['InputFileGroup']['InputFilePath']
# check input HDF5(s) in cfg
if isinstance(input_path, list):
n_inputs = len(input_path)
if self.workflow_name in ['gcov', 'gslc']:
if n_inputs != 1:
err_str = f'{n_inputs} inputs provided. Only one input file is allowed.'
error_channel.log(err_str)
raise ValueError(err_str)
elif self.workflow_name == 'insar':
if n_inputs == 2:
secondary_path = input_path[1]
if not os.path.isfile(secondary_path):
err_str = f'{secondary_path} secondary RSLC not found.'
error_channel.log(err_str)
raise ValueError(err_str)
self.cfg['InputFileGroup']['SecondaryFilePath'] = secondary_path
else:
err_str = f"{n_inputs} provided. 2 input files are required for INSAR workflow."
error_channel.log(err_str)
raise FileNotFoundError(err_str)
else:
err_str = f'{self.workflow_name} unsupported'
error_channel.log(err_str)
raise ValueError(err_str)
input_path = input_path[0]
if not os.path.isfile(input_path):
err_str = f'Reference SLC not found {input_path}'
error_channel.log(err_str)
raise ValueError(err_str)
if not isinstance(input_path, str):
err_str = 'String type not provided for path to YAML.'
error_channel.log(err_str)
raise ValueError(err_str)
if not os.path.isfile(input_path):
err_str = f"{input_path} input not found."
error_channel.log(err_str)
raise FileNotFoundError(err_str)
self.cfg['InputFileGroup']['InputFilePath'] = input_path
# ensure validity of DEM inputs
helpers.check_dem(self.cfg['DynamicAncillaryFileGroup']['DEMFile'])
# check if each product type has an output
output_hdf5 = self.cfg['ProductPathGroup']['SASOutputFile']
output_dir = os.path.dirname(output_hdf5)
helpers.check_write_dir(output_dir)
helpers.check_write_dir(self.cfg['ProductPathGroup']['ScratchPath'])
def prep_frequency_and_polarizations(self):
'''
check frequency and polarizations and fix as needed
'''
error_channel = journal.error('RunConfig.prep_frequency_and_polarizations')
input_path = self.cfg['InputFileGroup']['InputFilePath']
freq_pols = self.cfg['processing']['input_subset']['list_of_frequencies']
slc = SLC(hdf5file=input_path)
for freq in freq_pols.keys():
if freq not in slc.frequencies:
err_str = f"Frequency {freq} invalid; not found in source frequencies."
error_channel.log(err_str)
raise ValueError(err_str)
# first check polarizations from source hdf5
rslc_pols = slc.polarizations[freq]
# use all RSLC polarizations if None provided
if freq_pols[freq] is None:
freq_pols[freq] = rslc_pols
continue
# use polarizations provided by user
# check if user provided polarizations match RSLC ones
for usr_pol in freq_pols[freq]:
if usr_pol not in rslc_pols:
err_str = f"{usr_pol} invalid; not found in source polarizations."
error_channel.log(err_str)
raise ValueError(err_str)
def prep_geocode_cfg(self):
'''
check geocode config and initialize as needed
'''
geocode_dict = self.cfg['processing']['geocode']
# check for user provided EPSG and grab from DEM if none provided
if geocode_dict['outputEPSG'] is None:
geocode_dict['outputEPSG'] = isce.io.Raster(self.cfg['DynamicAncillaryFileGroup']['DEMFile']).get_epsg()
# make geogrids for each frequency
geogrids = {}
# for each frequency check source RF polarization values and make geogrids
freq_pols = self.cfg['processing']['input_subset']['list_of_frequencies']
for freq in freq_pols.keys():
# build geogrids only if pols not None
geogrids[freq] = geogrid.create(self.cfg, freq)
# place geogrids in cfg for later processing
self.cfg['processing']['geocode']['geogrids'] = geogrids
def prep_cubes_geocode_cfg(self):
'''
check cubes geocode config and initialize as needed
radar_grid_cubes is an optional group. If not provided,
the geocode group should be used, but with different X and Y
spacing defaults
'''
geocode_dict = self.cfg['processing']['geocode']
# check for user provided EPSG and grab geocode group EPSG if not provided
if self.cfg['processing']['radar_grid_cubes']['outputEPSG'] is None:
cubes_epsg = geocode_dict['outputEPSG']
else:
cubes_epsg = self.cfg['processing']['radar_grid_cubes']['outputEPSG']
self.cfg['processing']['radar_grid_cubes']['outputEPSG'] = cubes_epsg
if not self.cfg['processing']['radar_grid_cubes']['heights']:
self.cfg['processing']['radar_grid_cubes']['heights'] = \
list(np.arange(-1000, 9001, 500))
if cubes_epsg == 4326:
# lat/lon
default_cube_geogrid_spacing_x = 0.005
default_cube_geogrid_spacing_y = -0.005
else:
# meters
default_cube_geogrid_spacing_x = 500
default_cube_geogrid_spacing_y = -500
radar_grid_cubes_dict = self.cfg['processing']['radar_grid_cubes']
self.cfg['processing']['radar_grid_cubes']['outputEPSG'] = cubes_epsg
# build geogrid
frequency_ref = 'A'
frequency_group = None
cubes_geogrid = geogrid.create(
self.cfg, frequency_group = frequency_group,
frequency = frequency_ref,
geocode_dict = radar_grid_cubes_dict,
default_spacing_x = default_cube_geogrid_spacing_x,
default_spacing_y = default_cube_geogrid_spacing_y)
# place geogrid in cfg for later processing
self.cfg['processing']['radar_grid_cubes']['geogrid'] = cubes_geogrid
def geocode_common_arg_load(self):
'''
Workflows needing geocoding
'''
self.prep_paths()
self.prep_frequency_and_polarizations()
self.prep_geocode_cfg()
self.prep_cubes_geocode_cfg()
| StarcoderdataPython |
1600681 | <gh_stars>0
"""
MIT License
Copyright (c) 2021-present VincentRPS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import collections
class _Hold:
def __init__(self):
self._storage = collections.OrderedDict()
async def cache(self, hold: str, data: dict):
self._storage[hold] = data
async def get(self, hold: str):
self._storage.get(hold)
async def pop(self, hold: str):
self._storage.pop(hold)
class Cache:
"""Represents normally cached objects"""
def __init__(self, **custom_impl):
self.members = custom_impl.get("members") or _Hold()
self.groups = custom_impl.get("groups") or _Hold()
self.messages = custom_impl.get("messages") or _Hold()
self.channels = custom_impl.get("channels") or _Hold()
class ConnectionState:
def __init__(self, **customs):
self.cache = customs.get("cache") or Cache()
| StarcoderdataPython |
177376 | <gh_stars>0
import locale
import os
from enum import Enum
from typing import List, Tuple
from qtpy.QtCore import QEvent, Qt
from qtpy.QtGui import QKeyEvent, QResizeEvent
from qtpy.QtWidgets import (
QApplication,
QBoxLayout,
QCheckBox,
QComboBox,
QHBoxLayout,
QLabel,
QMessageBox,
QPushButton,
QTableWidget,
QTableWidgetItem,
QVBoxLayout,
QWidget,
)
from PartSeg.common_backend.progress_thread import ExecuteFunctionThread
from PartSegCore.analysis.measurement_calculation import MeasurementProfile, MeasurementResult
from PartSegCore.universal_const import Units
from ..common_gui.universal_gui_part import ChannelComboBox, EnumComboBox
from ..common_gui.waiting_dialog import WaitingDialog
from .partseg_settings import PartSettings
class FileNamesEnum(Enum):
No = 1
Short = 2
Full = 3
def __str__(self):
return self.name
class MeasurementsStorage:
"""class for storage measurements result"""
def __init__(self):
self.header = []
self.max_rows = 0
self.content = []
self.measurements = []
self.expand = False
def clear(self):
"""clear storage"""
self.header = []
self.max_rows = 0
self.content = []
self.measurements: List[MeasurementResult, bool, bool] = []
def get_size(self, save_orientation: bool):
if save_orientation:
return self.max_rows, len(self.header)
else:
return len(self.header), self.max_rows
def change_expand(self, expand):
if self.expand != expand:
self.expand = expand
self.refresh()
def refresh(self):
self.header = []
self.content = []
self.max_rows = 0
for data, add_names, add_units in self.measurements:
if self.expand:
if add_names:
self.content.append(data.get_labels())
self.header.append("Name")
values = data.get_separated()
self.max_rows = max(self.max_rows, len(values[0]))
self.content.extend(values)
self.header.extend(["Value" for _ in range(len(values))])
if add_units:
self.content.append(data.get_units())
self.header.append("Units")
else:
if add_names:
self.content.append(list(data.keys()))
self.header.append("Name")
values, units = zip(*list(data.values()))
self.max_rows = max(self.max_rows, len(values))
self.content.append(values)
self.header.append("Value")
if add_units:
self.content.append(units)
self.header.append("Units")
def add_measurements(self, data: MeasurementResult, add_names, add_units):
self.measurements.append((data, add_names, add_units))
self.refresh()
def get_val_as_str(self, x: int, y: int, save_orientation: bool) -> str:
"""get value from given index"""
if not save_orientation:
x, y = y, x
if len(self.content) <= x:
return ""
sublist = self.content[x]
if len(sublist) <= y:
return ""
val = sublist[y]
if isinstance(val, float):
return locale.str(val)
return str(val)
def get_header(self, save_orientation: bool) -> List[str]:
if save_orientation:
return [str(i) for i in range(self.max_rows)]
else:
return self.header
def get_rows(self, save_orientation: bool) -> List[str]:
return self.get_header(not save_orientation)
class MeasurementWidget(QWidget):
"""
:type settings: Settings
:type segment: Segment
"""
def __init__(self, settings: PartSettings, segment=None):
super(MeasurementWidget, self).__init__()
self.settings = settings
self.segment = segment
self.measurements_storage = MeasurementsStorage()
self.recalculate_button = QPushButton("Recalculate and\n replace measurement", self)
self.recalculate_button.clicked.connect(self.replace_measurement_result)
self.recalculate_append_button = QPushButton("Recalculate and\n append measurement", self)
self.recalculate_append_button.clicked.connect(self.append_measurement_result)
self.copy_button = QPushButton("Copy to clipboard", self)
self.copy_button.setToolTip("You cacn copy also with 'Ctrl+C'. To get raw copy copy with 'Ctrl+Shit+C'")
self.horizontal_measurement_present = QCheckBox("Horizontal view", self)
self.no_header = QCheckBox("No header", self)
self.no_units = QCheckBox("No units", self)
self.no_units.setChecked(True)
self.expand_mode = QCheckBox("Expand", self)
self.file_names = EnumComboBox(FileNamesEnum)
self.file_names_label = QLabel("Add file name:")
self.file_names.currentIndexChanged.connect(self.refresh_view)
self.horizontal_measurement_present.stateChanged.connect(self.refresh_view)
self.expand_mode.stateChanged.connect(self.refresh_view)
self.copy_button.clicked.connect(self.copy_to_clipboard)
self.measurement_type = QComboBox(self)
# noinspection PyUnresolvedReferences
self.measurement_type.currentTextChanged.connect(self.measurement_profile_selection_changed)
self.measurement_type.addItem("<none>")
self.measurement_type.addItems(list(sorted(self.settings.measurement_profiles.keys())))
self.measurement_type.setToolTip(
'You can create new measurement profile in advanced window, in tab "Measurement settings"'
)
self.channels_chose = ChannelComboBox()
self.units_choose = EnumComboBox(Units)
self.units_choose.set_value(self.settings.get("units_value", Units.nm))
self.info_field = QTableWidget(self)
self.info_field.setColumnCount(3)
self.info_field.setHorizontalHeaderLabels(["Name", "Value", "Units"])
self.measurement_add_shift = 0
layout = QVBoxLayout()
# layout.addWidget(self.recalculate_button)
v_butt_layout = QVBoxLayout()
v_butt_layout.setSpacing(1)
self.up_butt_layout = QHBoxLayout()
self.up_butt_layout.addWidget(self.recalculate_button)
self.up_butt_layout.addWidget(self.recalculate_append_button)
self.butt_layout = QHBoxLayout()
# self.butt_layout.setMargin(0)
# self.butt_layout.setSpacing(10)
self.butt_layout.addWidget(self.horizontal_measurement_present, 1)
self.butt_layout.addWidget(self.no_header, 1)
self.butt_layout.addWidget(self.no_units, 1)
self.butt_layout.addWidget(self.expand_mode, 1)
self.butt_layout.addWidget(self.file_names_label)
self.butt_layout.addWidget(self.file_names, 1)
self.butt_layout.addWidget(self.copy_button, 2)
self.butt_layout2 = QHBoxLayout()
self.butt_layout3 = QHBoxLayout()
self.butt_layout3.addWidget(QLabel("Channel:"))
self.butt_layout3.addWidget(self.channels_chose)
self.butt_layout3.addWidget(QLabel("Units:"))
self.butt_layout3.addWidget(self.units_choose)
# self.butt_layout3.addWidget(QLabel("Noise removal:"))
# self.butt_layout3.addWidget(self.noise_removal_method)
self.butt_layout3.addWidget(QLabel("Profile:"))
self.butt_layout3.addWidget(self.measurement_type, 2)
v_butt_layout.addLayout(self.up_butt_layout)
v_butt_layout.addLayout(self.butt_layout)
v_butt_layout.addLayout(self.butt_layout2)
v_butt_layout.addLayout(self.butt_layout3)
layout.addLayout(v_butt_layout)
# layout.addLayout(self.butt_layout)
layout.addWidget(self.info_field)
self.setLayout(layout)
# noinspection PyArgumentList
self.clip = QApplication.clipboard()
self.settings.image_changed[int].connect(self.image_changed)
self.previous_profile = None
def check_if_measurement_can_be_calculated(self, name):
if name == "<none>":
return "<none>"
profile: MeasurementProfile = self.settings.measurement_profiles.get(name)
if profile.is_any_mask_measurement() and self.settings.mask is None:
QMessageBox.information(
self, "Need mask", "To use this measurement set please use data with mask loaded", QMessageBox.Ok
)
self.measurement_type.setCurrentIndex(0)
return "<none>"
if self.settings.segmentation is None:
QMessageBox.information(
self,
"Need segmentation",
'Before calculating please create segmentation ("Execute" button)',
QMessageBox.Ok,
)
self.measurement_type.setCurrentIndex(0)
return "<none>"
return name
def image_changed(self, channels_num):
self.channels_chose.change_channels_num(channels_num)
def measurement_profile_selection_changed(self, text):
text = self.check_if_measurement_can_be_calculated(text)
try:
stat = self.settings.measurement_profiles[text]
is_mask = stat.is_any_mask_measurement()
disable = is_mask and (self.settings.mask is None)
except KeyError:
disable = True
self.recalculate_button.setDisabled(disable)
self.recalculate_append_button.setDisabled(disable)
if disable:
self.recalculate_button.setToolTip("Measurement profile contains mask measurements when mask is not loaded")
self.recalculate_append_button.setToolTip(
"Measurement profile contains mask measurements when mask is not loaded"
)
else:
self.recalculate_button.setToolTip("")
self.recalculate_append_button.setToolTip("")
def copy_to_clipboard(self):
s = ""
for r in range(self.info_field.rowCount()):
for c in range(self.info_field.columnCount()):
try:
s += str(self.info_field.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" # eliminate last '\t'
self.clip.setText(s)
def replace_measurement_result(self):
self.measurements_storage.clear()
self.previous_profile = ""
self.append_measurement_result()
def refresh_view(self):
self.measurements_storage.change_expand(self.expand_mode.isChecked())
self.info_field.clear()
save_orientation = self.horizontal_measurement_present.isChecked()
columns, rows = self.measurements_storage.get_size(save_orientation)
if self.file_names.get_value() == FileNamesEnum.No:
rows -= 1
shift = 1
else:
shift = 0
self.info_field.setColumnCount(columns)
self.info_field.setRowCount(rows)
self.info_field.setHorizontalHeaderLabels(self.measurements_storage.get_header(save_orientation))
self.info_field.setVerticalHeaderLabels(self.measurements_storage.get_rows(save_orientation))
if self.file_names.get_value() == FileNamesEnum.Full:
for y in range(columns):
self.info_field.setItem(
0, y, QTableWidgetItem(self.measurements_storage.get_val_as_str(0, y, save_orientation))
)
elif self.file_names.get_value() == FileNamesEnum.Short:
for y in range(columns):
self.info_field.setItem(
0,
y,
QTableWidgetItem(
os.path.basename(self.measurements_storage.get_val_as_str(0, y, save_orientation))
),
)
for x in range(1, rows + shift):
for y in range(columns):
self.info_field.setItem(
x - shift, y, QTableWidgetItem(self.measurements_storage.get_val_as_str(x, y, save_orientation))
)
def append_measurement_result(self):
try:
compute_class = self.settings.measurement_profiles[self.measurement_type.currentText()]
except KeyError:
QMessageBox.warning(
self,
"Measurement profile not found",
f"Measurement profile '{self.measurement_type.currentText()}' not found'",
)
return
channel = self.settings.image.get_channel(self.channels_chose.currentIndex())
segmentation = self.settings.segmentation
if segmentation is None:
return
base_mask = self.settings.mask
units = self.units_choose.get_value()
# FIXME find which errors should be displayed as warning
# def exception_hook(exception):
# QMessageBox.warning(self, "Calculation error", f"Error during calculation: {exception}")
kwargs = {}
for num in compute_class.get_channels_num():
if num >= self.settings.image.channels:
QMessageBox.warning(
self,
"Measurement error",
"Cannot calculate this measurement because " f"image do not have channel {num+1}",
)
return
kwargs[f"channel+{num}"] = self.settings.image.get_channel(num)
thread = ExecuteFunctionThread(
compute_class.calculate, [channel, segmentation, base_mask, self.settings.image.spacing, units], kwargs,
)
dial = WaitingDialog(thread, "Measurement calculation") # , exception_hook=exception_hook)
dial.exec()
stat: MeasurementResult = thread.result
if stat is None:
return
stat.set_filename(self.settings.image_path)
self.measurements_storage.add_measurements(
stat,
(not self.no_header.isChecked()) and (self.previous_profile != compute_class.name),
not self.no_units.isChecked(),
)
self.previous_profile = compute_class.name
self.refresh_view()
def keyPressEvent(self, e: QKeyEvent):
if e.modifiers() & Qt.ControlModifier:
selected = self.info_field.selectedRanges()
if e.key() == Qt.Key_C: # copy
s = ""
for r in range(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in range(selected[0].leftColumn(), selected[0].rightColumn() + 1):
try:
s += str(self.info_field.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" # eliminate last '\t'
self.clip.setText(s)
def update_measurement_list(self):
self.measurement_type.blockSignals(True)
available = list(sorted(self.settings.measurement_profiles.keys()))
text = self.measurement_type.currentText()
try:
index = available.index(text) + 1
except ValueError:
index = 0
self.measurement_type.clear()
self.measurement_type.addItem("<none>")
self.measurement_type.addItems(available)
self.measurement_type.setCurrentIndex(index)
self.measurement_type.blockSignals(False)
def showEvent(self, _):
self.update_measurement_list()
def event(self, event: QEvent):
if event.type() == QEvent.WindowActivate:
self.update_measurement_list()
return super().event(event)
@staticmethod
def _move_widgets(widgets_list: List[Tuple[QWidget, int]], layout1: QBoxLayout, layout2: QBoxLayout):
for el in widgets_list:
layout1.removeWidget(el[0])
layout2.addWidget(el[0], el[1])
def resizeEvent(self, a0: QResizeEvent) -> None:
if self.width() < 800 and self.butt_layout2.count() == 0:
self._move_widgets(
[(self.file_names_label, 1), (self.file_names, 1), (self.copy_button, 2)],
self.butt_layout,
self.butt_layout2,
)
elif self.width() > 800 and self.butt_layout2.count() != 0:
self._move_widgets(
[(self.file_names_label, 1), (self.file_names, 1), (self.copy_button, 2)],
self.butt_layout2,
self.butt_layout,
)
| StarcoderdataPython |
27347 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
CSV related help functions
"""
from __future__ import unicode_literals
oldstr = str
from builtins import str
import csv
import io
import six
from catatom2osm.config import eol, encoding, delimiter
def dict2csv(csv_path, a_dict, sort=None):
"""
Writes a dictionary to a csv file, optinally sorted by key (sort=0) or
value (sort=1)
"""
with io.open(csv_path, 'w', encoding=encoding) as csv_file:
dictitems = list(a_dict.items())
if sort in [0, 1]:
dictitems.sort(key=lambda x:x[sort])
for (k, v) in dictitems:
csv_file.write("%s%s%s%s" % (k, delimiter, v, '\n'))
def csv2dict(csv_path, a_dict, encoding=encoding):
"""Read a dictionary from a csv file"""
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=oldstr(delimiter))
for row in csv_reader:
if len(row) < 2:
raise IOError(_("Failed to load CSV file '%s'") % csv_file.name)
elif six.PY2:
a_dict[row[0].decode(encoding)] = row[1].decode(encoding)
else:
a_dict[row[0]] = row[1]
return a_dict
| StarcoderdataPython |
503 | <filename>tests/bugs/core_6266_test.py
#coding:utf-8
#
# id: bugs.core_6266
# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments
# decription:
# Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.
# Confirmed bug on 3.0.6.33271.
# Checked on 3.0.6.33272 (SS/CS) - works fine.
# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build.
#
# tracker_id: CORE-6266
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import sys
# import time
# import fdb
#
# ATT_CNT=5
# ATT_DELAY=1
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = <PASSWORD>
#
# db_conn.close()
#
# con_list={}
# for i in range(0, ATT_CNT):
# if i > 0:
# time.sleep( ATT_DELAY )
#
# c = fdb.connect(dsn = dsn)
# a = c.attachment_id
# con_list[ i ] = (a, c)
# # print('created attachment ', (a,c) )
#
# con_admin = con_list[0][1]
#
# #print(con_admin.firebird_version)
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')
#
# # This DOES NOT remove all attachments (only 'last' in order of timestamp), but
# # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':
# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')
#
# con_admin.commit()
#
# cur_admin = con_admin.cursor()
# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )
# i=0
# for r in cur_admin:
# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )
# i += 1
# print('Number of attachments that remains alive: ',i)
#
# cur_admin.close()
#
# #print('Final cleanup before quit from Python.')
#
# for k,v in sorted( con_list.items() ):
# #print('attempt to close attachment ', v[0] )
# try:
# v[1].close()
# #print('done.')
# except Exception as e:
# pass
# #print('Got exception:', sys.exc_info()[0])
# #print(e[0])
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Number of attachments that remains alive: 0
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| StarcoderdataPython |
3255294 | <filename>main.py
import sys
import kspdatareader
TEST_FILE = 'persistent.sfs'
def main():
with open(TEST_FILE) as infile:
reader = kspdatareader.KSPDataReader()
reader.process_lines(infile.readlines())
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
1673420 | <gh_stars>10-100
#import sys
# sys.path.insert(0, '/content/gdrive/MyDrive/Tese/code') # for colab
from src.classification_scripts.SupConLoss.train_supcon import FineTuneSupCon
from src.classification_scripts.ALS.train_ALSingle import FineTuneALS
from src.classification_scripts.cross_entropy.train_ce import FineTuneCE
from src.configs.setters.set_initializers import *
from src.captioning_scripts.baseline.train_baseline import TrainBaseline
from src.captioning_scripts.fusion.gpt2.train_gpt2 import TrainGPT2
from src.captioning_scripts.fusion.pegasus.train_pegasus import TrainPegasus
if TASK == 'Captioning':
if ARCHITECTURE == ARCHITECTURES.BASELINE.value:
# # initialize the class
_train = TrainBaseline(language_aux=None, pretrain = False,fine_tune_encoder=False, model_version = 'v2')
elif ARCHITECTURE == ARCHITECTURES.FUSION.value:
if AUX_LM == AUX_LMs.GPT2.value:
_train = TrainGPT2(language_aux=AUX_LM, fine_tune_encoder=False, model_version= 'v2')
elif AUX_LM == AUX_LMs.PEGASUS.value:
_train = TrainPegasus(language_aux=AUX_LM, pretrain = False, fine_tune_encoder=False, nr_inputs=1, model_version= 'v2')
# setup the vocab (size and word map)
_train._setup_vocab()
# init model
_train._init_model()
# load checkpoint if exists might need inputs variable if its pegasus ( multi input )
_train._load_weights_from_checkpoint(_train.decoder, _train.decoder_optimizer, _train.encoder, _train.encoder_optimizer, is_current_best=True, nr_inputs = _train.nr_inputs if ARCHITECTURES == ARCHITECTURES.FUSION.value
and AUX_LM == AUX_LMs.PEGASUS.value else 1)
# load dataloaders (train and val)
_train._setup_dataloaders()
# setup parameters for training
_train._setup_train(_train._train_critical if SELF_CRITICAL else _train._train, _train._validate)
elif TASK == 'Classification':
# to run extra epochs with a different loss
if EXTRA_EPOCHS:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH EXTRA EPOCHS ON {}...'.format(LOSS))
if LOSS == LOSSES.SupConLoss.value:
model = FineTuneSupCon(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
elif LOSS == LOSSES.Cross_Entropy.value:
model = FineTuneCE(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
elif LOSS == LOSSES.ALS.value:
model = FineTuneALS(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
else:
if LOSS == LOSSES.Cross_Entropy.value:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH CROSS-ENTROPY...')
model = FineTuneCE(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
elif LOSS == LOSSES.SupConLoss.value:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH SUPERVISED CONTRASTIVE LOSS...')
model = FineTuneSupCon(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
| StarcoderdataPython |
1685030 | <filename>src/dwell/climate/__init__.py
from .gridding import *
| StarcoderdataPython |
1761559 | <filename>tryalgo/knuth_morris_pratt.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Find a substring by Knuth-Morris-Pratt
# <NAME> et <NAME> - 2014-2018
# inspired by a code from <NAME>
# snip{
def knuth_morris_pratt(s, t):
"""Find a substring by Knuth-Morris-Pratt
:param s: the haystack string
:param t: the needle string
:returns: index i such that s[i: i + len(t)] == t, or -1
:complexity: O(len(s) + len(t))
"""
assert t != ''
len_s = len(s)
len_t = len(t)
r = [0] * len_t
j = r[0] = -1
for i in range(1, len_t):
while j >= 0 and t[i - 1] != t[j]:
j = r[j]
j += 1
r[i] = j
j = 0
for i in range(len_s):
while j >= 0 and s[i] != t[j]:
j = r[j]
j += 1
if j == len_t:
return i - len_t + 1
return -1
# snip}
| StarcoderdataPython |
24720 | import logging
logging.basicConfig(level=logging.INFO)
from flask import Flask
from application.config import Config
app = Flask(__name__)
app.config.from_object(Config)
from application.models.classifiers.CNNClassifier import CNNClassifier
from application.models.classifiers.MLPClassifier import MLPClassifier
from application.models.classifiers.NaiveBayesClassifier import NaiveBayesClassifier
from application.models.classifiers.SVMClassifier import SVMClassifier
from application.models.detectors.CasClasDetector import CasClasDetector
from application.models.detectors.MTCNNDetector import MTCNNDetector
from application.utils import get_urls_list
logging.info("Loading models...")
MODELS = {"mtcnn": MTCNNDetector(),
"casclas": CasClasDetector(app.config["PRETRAINED_CASCLAS"]),
"mlp": MLPClassifier(app.config["MLP_WEIGHTS"]),
"svm": SVMClassifier(app.config["SVM"]),
"cnn": CNNClassifier(app.config["CNN_WEIGHTS"]),
"nb": NaiveBayesClassifier(app.config["CATEGORICAL_NB"])}
IMG_URLS = get_urls_list(app.config["OFFLINE_IMG_URLS"])
from application import routes
| StarcoderdataPython |
1668327 | from reducer import Reducer
from bounder import Bounder
from normalizer import Normalizer
from gridifier import Gridifier
from pB_approximator import pB_Approximator
from trimmer import Trimmer
from balancer import Balancer
from squeezer import Squeezer
from corrector import Corrector
import numpy as np
import tensorflow as tf
class Pipeline():
def __init__(self, const, base_snapshots):
self._const = const
self._snapshot_cnt = len(base_snapshots)
self._bounder = Bounder(base_snapshots, self._const.outlier_cutoff)
base_snapshots = self._bounder.bound_snapshots(base_snapshots)
self._normalizer = Normalizer(base_snapshots)
base_snapshots = self._normalizer.normalize_snapshots(base_snapshots)
self._minima = np.amin(base_snapshots, axis=0)
self._maxima = np.amax(base_snapshots, axis=0)
@property
def const(self):
return self._const
@property
def lower_bound(self):
return self._bounder.lower_bound
@property
def upper_bound(self):
return self._bounder.upper_bound
@property
def mean(self):
return self._normalizer.mean
@property
def std(self):
return self._normalizer.std
@property
def minima(self):
return self._minima
@property
def maxima(self):
return self._maxima
def reduce_property(self, property_list):
return np.array([property_list[used_position]
for used_position in self._const._used_list_positions])
@property
def r_lower_bound(self):
return self.reduce_property(self.lower_bound)
@property
def r_upper_bound(self):
return self.reduce_property(self.upper_bound)
@property
def r_mean(self):
return self.reduce_property(self.mean)
@property
def r_std(self):
return self.reduce_property(self.std)
@property
def r_minima(self):
return self.reduce_property(self.minima)
@property
def r_maxima(self):
return self.reduce_property(self.maxima)
@property
def snapshot_cnt(self):
return self._snapshot_cnt
def bound_normalize(self, snapshots):
snapshots = self._bounder.bound_snapshots(snapshots)
snapshots = self._normalizer.normalize_snapshots(snapshots)
return snapshots
def reduce(self, snapshots):
reducer = Reducer(self._const)
snapshots = reducer.reduce_snapshots(snapshots)
return snapshots
def gridify(self, snapshots):
gridifier = Gridifier(snapshots, self._const.resolution)
snapshots = gridifier.gridify_snapshots(snapshots)
return snapshots
def approximate(self, snapshots, dataset):
pB_dict, pBs, pB_weights = pB_Approximator.approximate_pBs(
snapshots, dataset.labels, dataset.weights)
return pB_dict, pBs, pB_weights
def trim(self, pBs, *args):
trimmer = Trimmer(pBs)
trimmed = [trimmer.trim_snapshots(arg) for arg in args]
return trimmed
def squeeze(self, pBs):
pBs = Squeezer.squeeze_pBs(pBs, self._const)
return pBs
def normalize(self, snapshots):
normalizer = Normalizer(snapshots)
snapshots = normalizer.normalize_snapshots(snapshots)
return snapshots
def pB_balance(self, pBs):
pBb_weights = Balancer.pB_balance(pBs, self._const.balance_bins)
return pBb_weights
def hypercube_balance(self, snapshots):
hcb_weights = Balancer.hypercube_balance(
snapshots, self._const.balance_bins)
return hcb_weights
def multidim_balance(self, snapshots):
mdb_weights = Balancer.multidim_balance(
snapshots, self._const.balance_bins)
return mdb_weights
def get_1D_means(self, snapshots):
return Corrector.get_means_for_1D_row(snapshots)
def get_2D_means(self, snapshots):
return Corrector.get_means_for_2D_grid(snapshots)
def pack_tf_dataset(
self, snapshots, labels, prediction_weights,
reconstruction_weights):
return tf.data.Dataset.from_tensor_slices(
({self._const.input_name: snapshots},
{self._const.output_name_1: labels,
self._const.output_name_2: snapshots},
{self._const.output_name_1: prediction_weights,
self._const.output_name_2: reconstruction_weights})) \
.shuffle(250000).batch(self._const.batch_size)
def prepare_groundTruth(self, dataset):
snapshots = self.bound_normalize(dataset.snapshots)
snapshots = self.reduce(snapshots)
g_snapshots = self.gridify(snapshots)
return g_snapshots, dataset.labels, dataset.weights
def prepare_dataset_from_bn(self, bn_snapshots, dataset):
snapshots = self.reduce(bn_snapshots)
g_snapshots = self.gridify(snapshots)
_, pBs, _ = self.approximate(g_snapshots, dataset)
ds = self.pack_tf_dataset(
snapshots=snapshots,
labels=pBs,
prediction_weights=np.ones(len(snapshots)),
reconstruction_weights=np.ones(len(snapshots)))
return ds, g_snapshots
def prepare_prediction_plotter(self, dataset):
bn_snapshots = self.bound_normalize(dataset.snapshots)
ds, g_snapshots = self.prepare_dataset_from_bn(bn_snapshots, dataset)
means_1D = self.get_1D_means(g_snapshots)
means_2D = self.get_2D_means(g_snapshots)
return ds, means_1D, means_2D
def prepare_stepper(
self, train_bn_snapshots, train_dataset,
val_bn_snapshots, val_dataset):
train_ds, _ = \
self.prepare_dataset_from_bn(train_bn_snapshots, train_dataset)
val_ds, _ = \
self.prepare_dataset_from_bn(val_bn_snapshots, val_dataset)
return train_ds, val_ds
def prepare_dataset_pickle(self, dataset):
bn_snapshots = self.bound_normalize(dataset.snapshots)
snapshots = self.reduce(bn_snapshots)
g_snapshots = self.gridify(snapshots)
_, pBs, _ = self.approximate(g_snapshots, dataset)
return snapshots, pBs, g_snapshots
| StarcoderdataPython |
1703274 | from . import tools
from .capacity import Capacity
from .dataset import Dataset
from .datasource import Datasource
from .report import Report
from .tenant import Tenant
from .token import Token
from .workspace import Workspace | StarcoderdataPython |
3360225 | import torch, sys, os, pdb
import numpy as np
from PIL import Image
from scipy.spatial import Delaunay
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from .aligned_reid_utils import load_state_dict
from models.yolo_models import Darknet
from .featurepointnet_model_util import rotate_pc_along_y
from .deep_sort_utils import non_max_suppression as deepsort_nms
import math
from .detection import Detection
def create_detector(config_path, weight_path, cuda):
detector = Darknet(config_path)
detector.load_weights(weight_path)
if cuda:
detector.cuda()
detector.eval()
return detector
def get_depth_patches(point_cloud, box_3d, ids_3d, rot_angles, num_point = 1024):
#print(ids_3d)
depth_patches = []
for i, box in enumerate(box_3d):
if ids_3d[i] == -1:
depth_patches.append(None)
continue
box_center = np.asarray([ [box[0], box[1], box[2]] ])
rotate_pc_along_y(box_center, np.pi/2 + np.squeeze(box[6]))
box_center = box_center[0]
rotate_pc_along_y(point_cloud, np.pi/2 + np.squeeze(box[6]))
x = point_cloud[:, 0]
y = point_cloud[:, 1]
z = point_cloud[:, 2]
idx_1 = np.logical_and(x >= float(box_center[0] - box[3]/2.0), x <= float(box_center[0] + box[3]/2.0))
idx_2 = np.logical_and(y <= (box_center[1]+0.1), y >= float(box_center[1] - box[4]))
idx_3 = np.logical_and(z >= float(box_center[2] - box[5]/2.0), z <= float(box_center[2] + box[5]/2.0))
idx = np.logical_and(idx_1, idx_2)
idx = np.logical_and(idx, idx_3)
depth_patch = point_cloud[idx, :]
rotate_pc_along_y(point_cloud, -(np.squeeze(box[6])+np.pi/2)) #unrotate to prep for next iteration
rotate_pc_along_y(depth_patch, -(np.squeeze(box[6])+np.pi/2))
if depth_patch.size == 0:
ids_3d[i] = -1
depth_patches.append(None)
else:
if depth_patch.shape[0] > num_point:
pc_in_box_fov = np.expand_dims(depth_patch[np.random.choice(range(depth_patch.shape[0]), size = (num_point), replace=False)], 0)
else:
pc_in_box_fov = np.expand_dims(
np.vstack([depth_patch,
depth_patch[np.random.choice(range(depth_patch.shape[0]), size = (num_point - depth_patch.shape[0]), replace=True)]])
, 0)
depth_patches.append( get_center_view_point_set(pc_in_box_fov, rot_angles[i])[0])
return depth_patches, ids_3d
def non_max_suppression_3D_prime(detections, boxes_3d, ids_3d, ids_2d, nms_thresh = 1, confidence = None):
x = [boxes_3d[i][0] for i in range(len(boxes_3d))]
z = [boxes_3d[i][2] for i in range(len(boxes_3d))]
l = [boxes_3d[i][5] for i in range(len(boxes_3d))] #[3]
w = [boxes_3d[i][3] for i in range(len(boxes_3d))] #[5]
indices = deepsort_nms(boxes_3d, nms_thresh, np.squeeze(confidence))
for i in range(len(ids_3d)):
if i not in indices:
ids_3d[i] = -1
ids_2d[i] = -1
boxes_3d[i] = None
detections[i] = None
return detections, boxes_3d, ids_2d, ids_3d
def non_max_suppression_3D(depth_patches, ids_3d, ids_2d, nms_thresh = 1, confidence = None):
#depth_patches list of patches
if len(depth_patches) == 0:
return []
pick = []
if confidence is not None:
idxs = np.argsort(confidence)
else:
idxs = list(range(len(depth_patches)))
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
overlap = np.asarray([iou_3d(depth_patches[i], depth_patches[idxs[x]]) for x in range(last)])
if np.any(overlap == -np.inf):
idxs = np.delete(idxs, [last])
continue
pick.append(i)
idxs = np.delete(
idxs, np.concatenate(
([last], np.where(overlap > nms_thresh)[0])))
for i in range(len(depth_patches)):
if i not in pick:
if ids_3d[i]!=-1:
ids_2d[i] = -1
ids_3d[i] = -1
return depth_patches, ids_3d, ids_2d
def iou_3d(patch_1, patch_2):
#Expecting patches of shape (N, 4) or (N,3) (numpy arrays)
if patch_2 is None:
return np.inf
elif patch_1 is None:
return -np.inf
# Unique points
patch_unique_1 = np.unique(patch_1, axis = 0)
patch_unique_2 = np.unique(patch_2, axis = 0)
intersection_points = 0
for point_1_idx in range(patch_unique_1.shape[0]):
point_distance = np.sqrt(np.sum((patch_unique_1[point_1_idx]-patch_unique_2)**2, axis = 1))
intersection_points += np.any(point_distance<0.3)
union_points = patch_unique_1.shape[0] + patch_unique_2.shape[0] - intersection_points
iou = intersection_points/union_points
return iou
def convert_detections(detections, features, appearance_features, detections_3d):
detection_list = []
if detections_3d is None:
detections_3d = [None] * len(detections)
for detection, feature, appearance_feature, detection_3d in zip(detections, features, appearance_features, detections_3d):
x1, y1, x2, y2, conf, _, _ = detection
box_2d = [x1, y1, x2-x1, y2-y1]
if detection_3d is not None:
x, y, z, l, w, h, theta = detection_3d
box_3d = [x, y, z, l, w, h, theta]
else:
box_3d = None
if feature is None:
detection_list.append(Detection(box_2d, None, conf, appearance_feature.cpu(), feature))
else:
detection_list.append(Detection(box_2d, box_3d, conf, appearance_feature.cpu(), feature.cpu()))
return detection_list
def combine_features(features, depth_features, ids_3d, combination_model, depth_weight = 1):
combined_features = []
appearance_features = []
for i, (appearance_feature, depth_feature) in enumerate(zip(features, depth_features)):
if ids_3d[i] == -1:
depth_feature = torch.zeros(512, device=torch.device("cuda"))
combined_features.append(torch.cat([appearance_feature, depth_feature* depth_weight]))
appearance_features.append(appearance_feature)
if combination_model is not None and len(combined_features) > 0:
combination_model.eval()
combined_feature = torch.stack(combined_features)
combined_features = combination_model(combined_feature).detach()
combined_features = list(torch.unbind(combined_features))
return combined_features, appearance_features
def filter(detections):
for i, det in enumerate(detections): #Note image is 1242 x 375
left = det[0]
top = det[1]
right = det[2]
bottom = det[3]
if (left < 10 or right > 1232) and (top < 10 or bottom > 365):
detections[i] = None
return detections | StarcoderdataPython |
1751646 | <reponame>ian-r-rose/visualization<filename>docker/src/clawpack-5.3.1/riemann/src/__init__.py<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
"""
Wave propagation Riemann solvers implemented in Python and Fortran.
"""
rp_solver_list_1d = []
rp_solver_list_2d = []
rp_solver_list_3d = []
# Import 1d Riemann solvers
from advection_1D_py import advection_1D
from vc_advection_1D_py import vc_advection_1D
from acoustics_1D_py import acoustics_1D
from burgers_1D_py import burgers_1D
from shallow_1D_py import shallow_roe_1D, shallow_hll_1D, shallow_exact_1D
from euler_1D_py import euler_roe_1D, euler_hll_1D, euler_exact_1D
from nonlinear_elasticity_1D_py import nonlinear_elasticity_1D
import static
import acoustics_1D_constants
import acoustics_variable_1D_constants
import advection_1D_constants
import burgers_1D_constants
import euler_with_efix_1D_constants
import nonlinear_elasticity_fwave_1D_constants
import reactive_euler_with_efix_1D_constants
import shallow_roe_with_efix_1D_constants
import traffic_1D_constants
import traffic_vc_1D_constants
import acoustics_2D_constants
import acoustics_mapped_2D_constants
import advection_2D_constants
import burgers_2D_constants
import euler_mapgrid_2D_constants
import euler_5wave_2D_constants
import euler_4wave_2D_constants
import kpp_2D_constants
import psystem_2D_constants
import shallow_roe_with_efix_2D_constants
import shallow_sphere_2D_constants
import vc_acoustics_2D_constants
import vc_advection_2D_constants
import vc_elasticity_2D_constants
import vc_acoustics_3D_constants
import euler_3D_constants
import burgers_3D_constants
import vc_advection_3D_constants
try:
import acoustics_1D
import acoustics_variable_1D
import acoustics_1D_ptwise
import advection_1D
import advection_1D_ptwise
import burgers_1D
import euler_with_efix_1D
import nonlinear_elasticity_fwave_1D
import reactive_euler_with_efix_1D
import shallow_roe_with_efix_1D
import traffic_1D
import traffic_vc_1D
import acoustics_2D
import acoustics_mapped_2D
import acoustics_2D_ptwise
import advection_2D
import burgers_2D
import euler_mapgrid_2D
import euler_5wave_2D
import euler_4wave_2D
import kpp_2D
import psystem_2D
import shallow_roe_with_efix_2D
import shallow_sphere_2D
import vc_acoustics_2D
import vc_advection_2D
import vc_elasticity_2D
import vc_acoustics_3D
import euler_3D
import burgers_3D
import vc_advection_3D
except ImportError as e:
import traceback
print "********************************************************************"
print 'Warning: Some Riemannn solvers were not able to be imported.'
print ' Did you run "pip install" in your clawpack directory?'
traceback.print_exc()
print "********************************************************************"
import os
if os.path.exists('./layered_shallow_water_1D.so'):
import layered_shallow_water_1D
| StarcoderdataPython |
132091 | import xmlrpc.client
s = xmlrpc.client.ServerProxy('http://localhost:9000')
print("Available Methods:")
print(s.system.listMethods())
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
resultText = s.getStringProperty("mainWindow/results", "text")
print("Result:\n{}".format(resultText))
s.quit()
| StarcoderdataPython |
3340214 | <filename>tests/accepted_test.py<gh_stars>1-10
from src.spotlight.errors import ACCEPTED_ERROR
from .validator_test import ValidatorTest
class AcceptedTest(ValidatorTest):
def test_accepted_rule_with_invalid_values_expect_error(self):
rules = {
"tos1": "accepted",
"tos2": "accepted",
"tos3": "accepted",
"tos4": "accepted",
}
data = {"tos1": "off", "tos2": 2, "tos3": False, "tos4": "no"}
expected = ACCEPTED_ERROR
errors = self.validator.validate(data, rules)
self.assertEqual(len(errors.items()), 4)
for field, errs in errors.items():
self.assertEqual(errs[0], expected.format(field=field))
def test_accepted_rule_with_valid_values_expect_no_error(self):
rules = {
"tos1": "accepted",
"tos2": "accepted",
"tos3": "accepted",
"tos4": "accepted",
}
data = {"tos1": "on", "tos2": 1, "tos3": True, "tos4": "yes"}
expected = ACCEPTED_ERROR
errors = self.validator.validate(data, rules)
self.assertEqual(len(errors.items()), 0)
for field, errs in errors.items():
self.assertEqual(errs[0], expected.format(field=field))
| StarcoderdataPython |
124790 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from RoomsManagement.models import *
from .models import *
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.contrib import messages
from . import decoraters
# Landing Page...
@decoraters.user_not_logged_in
def landing_page(request):
# Counting total number of Customers...
number_of_customers = 0
for user in User.objects.all():
if hasattr(user, 'customers'):
number_of_customers += 1
# .....................................
# Counting total number of blocks...
number_of_blocks = 0
blocks = []
for block in BlockFloor.objects.values_list('block'):
if block not in blocks:
blocks.append(block)
number_of_blocks += 1
else:
pass
# .....................................
# Counting total number of rooms...
number_of_rooms = 0
for room in Room.objects.all():
number_of_rooms += 1
# .....................................
args = {'customers_count': number_of_customers, 'blocks_count': number_of_blocks, 'rooms_count': number_of_rooms}
return render(request, 'customers/landing_page.html', args)
@decoraters.customer_login_required
def home(request):
username = request.user.username
return render(request, 'customers/home.html', {'username': username})
# Customer Login...
@decoraters.user_not_logged_in
def customer_login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
user_inst = User.objects.get(username=username)
if hasattr(user_inst, 'customers'):
if user.is_active:
login(request, user)
return redirect('customers:home')
else:
return HttpResponse('account not active')
else:
messages.error(request, f'Invalid Login details')
else:
messages.error(request, f'Invalid Login details')
return redirect(reverse('customers:login'))
else:
return render(request, 'customers/login.html')
# Management Login...
@decoraters.user_not_logged_in
def management_login_page(request):
return render(request, 'customers/management_login.html')
# Customer Logout...
@decoraters.customer_login_required
def customer_logout(request):
logout(request)
return redirect('customers:landing_page')
| StarcoderdataPython |
3214957 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# ----------------------------------------------------------------------------
"""CompletionPlugin tests."""
# Standard library imports
import pkg_resources
# Third party imports
import pytest
from qtpy.QtCore import QObject, Signal, Slot
# Local imports
from spyder.plugins.completion.api import (
SpyderCompletionProvider, CompletionRequestTypes)
class DummyCompletionReceiver(QObject):
"""Dummy class that can handle LSP responses."""
sig_response = Signal(str, dict)
@Slot(str, dict)
def handle_response(self, method, params):
self.sig_response.emit(method, params)
class FakeProvider(SpyderCompletionProvider):
COMPLETION_PROVIDER_NAME = 'fake'
CONF_DEFAULTS = [
('key1', 'value1'),
('key2', 'value2'),
('key3', 'value3'),
('key4', 4)
]
CONF_VERSION = "0.1.0"
@pytest.fixture
def completion_receiver(completion_plugin_all_started):
completion_plugin, _ = completion_plugin_all_started
receiver = DummyCompletionReceiver(None)
return completion_plugin, receiver
def test_configuration_merge(completion_plugin):
first_defaults = dict(FakeProvider.CONF_DEFAULTS)
first_version = FakeProvider.CONF_VERSION
# Check that a new completion provider configuration is registered without
# changes
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, {}
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == first_version
assert conf_values == first_defaults
assert conf_defaults == first_defaults
# Add a new value to the initial default configuration without changing the
# version
second_config = first_defaults.copy()
second_config['extra_value'] = ['value']
FakeProvider.CONF_DEFAULTS = [(k, v) for k, v in second_config.items()]
prev_config = {
FakeProvider.COMPLETION_PROVIDER_NAME: {
'version': first_version,
'values': first_defaults,
'defaults': first_defaults
}
}
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, prev_config
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == first_version
assert conf_values == second_config
assert conf_defaults == second_config
# Assert that default values cannot be changed without a bump in the minor
# version
config = first_defaults.copy()
config['key4'] = 5
third_config = first_defaults.copy()
third_config['key4'] = -1
FakeProvider.CONF_DEFAULTS = [(k, v) for k, v in third_config.items()]
prev_config = {
FakeProvider.COMPLETION_PROVIDER_NAME: {
'version': first_version,
'values': config,
'defaults': first_defaults
}
}
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, prev_config
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == first_version
assert conf_values == config
assert conf_defaults == first_defaults
# Assert that default values can be replaced with new ones when the
# minor version number is bumped.
config['key1'] = 'othervalue'
expected_config = config.copy()
expected_config['key4'] = -1
FakeProvider.CONF_VERSION = "0.1.1"
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, prev_config
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == "0.1.1"
assert conf_values == expected_config
assert conf_defaults == third_config
# Ensure that default values cannot be removed if the major version is not
# bumped
fourth_config = third_config.copy()
fourth_config.pop('key2')
FakeProvider.CONF_DEFAULTS = [(k, v) for k, v in fourth_config.items()]
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, prev_config
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == "0.1.1"
assert conf_values == expected_config
assert conf_defaults == third_config
# Remove an option when the major version is bumped.
FakeProvider.CONF_VERSION = "1.0.0"
expected_config.pop('key2')
result = completion_plugin._merge_default_configurations(
FakeProvider, FakeProvider.COMPLETION_PROVIDER_NAME, prev_config
)
(conf_version, conf_values, conf_defaults) = result
assert conf_version == "1.0.0"
assert conf_values == expected_config
assert conf_defaults == fourth_config
def test_provider_detection(completion_plugin_all):
print(completion_plugin_all.providers)
assert len(completion_plugin_all.providers) == 3
@pytest.mark.order(1)
def test_plugin_completion_gather(qtbot_module, completion_receiver):
completion, receiver = completion_receiver
# Parameters to perform a textDocument/didOpen request
params = {
'file': 'test.py',
'language': 'python',
'version': 1,
'text': "# This is some text with some classe\nimport os\n\ncla",
'response_instance': receiver,
'offset': 1,
'selection_start': 0,
'selection_end': 0,
'codeeditor': receiver,
'requires_response': False
}
with qtbot_module.waitSignal(receiver.sig_response, timeout=30000) as blocker:
completion.send_request(
'python', CompletionRequestTypes.DOCUMENT_DID_OPEN, params)
# Parameters to perform a textDocument/completion request
params = {
'file': 'test.py',
'line': 2,
'column': 3,
'offset': 50,
'selection_start': 0,
'selection_end': 0,
'current_word': 'cla',
'codeeditor': receiver,
'response_instance': receiver,
'requires_response': True
}
with qtbot_module.waitSignal(receiver.sig_response, timeout=30000) as blocker:
completion.send_request(
'python', CompletionRequestTypes.DOCUMENT_COMPLETION, params)
_, response = blocker.args
response = response['params']
provider_set = {x['provider'] for x in response}
# Assert the response contains information from all the providers
provider_set == {'LSP', 'Fallback', 'Snippets'}
@pytest.mark.order(1)
def test_plugin_first_response_request(qtbot_module, completion_receiver):
completion, receiver = completion_receiver
# Parameters to perform a textDocument/didOpen request
params = {
'file': 'test2.py',
'language': 'python',
'version': 2,
'text': "# This is some text with some classe\nimport os\n\n",
'response_instance': receiver,
'offset': 1,
'diff': '',
'selection_start': 0,
'selection_end': 0,
'codeeditor': receiver,
'requires_response': False
}
with qtbot_module.waitSignal(receiver.sig_response, timeout=30000) as blocker:
completion.send_request(
'python', CompletionRequestTypes.DOCUMENT_DID_OPEN, params)
params = {
'file': 'test2.py',
'line': 1,
'column': 8,
'offset': 43,
'diff': '',
'response_instance': receiver,
'codeeditor': receiver,
'requires_response': True
}
with qtbot_module.waitSignal(receiver.sig_response, timeout=30000) as blocker:
completion.send_request(
'python', CompletionRequestTypes.DOCUMENT_HOVER, params)
_, response = blocker.args
assert len(response['params']) > 0
| StarcoderdataPython |
129275 | # -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# parasim
# (c) 1998-2022 all rights reserved
#
# the package
import hello
# declaration
class Greet(hello.command, family='hello.cli.greet'):
"""
This is the base class for command that greet my friends
N.B.: This command is not directly usable since it doesn't have a valid definition of the
greeting to extend
"""
# commands
@hello.export(tip="greet Alec")
def alec(self, plexus, **kwds):
"""
Greet Alec
"""
# get the friend name
friend = hello.ext.libhello.alec()
# do it
plexus.info.log(f"{self.greeting} {friend}!")
# report success
return 0
@hello.export(tip="greet Ally")
def ally(self, plexus, **kwds):
"""
Greet Ally
"""
# get the friend name
friend = hello.ext.libhello.ally()
# do it
plexus.info.log(f"{self.greeting} {friend}!")
# report success
return 0
@hello.export(tip="greet Mac")
def mac(self, plexus, **kwds):
"""
Greet Mac
"""
# get the friend name
friend = hello.ext.libhello.mac()
# do it
plexus.info.log(f"{self.greeting} {friend}!")
# report success
return 0
@hello.export(tip="greet Mat")
def mat(self, plexus, **kwds):
"""
Greet Mat
"""
# get the friend name
friend = hello.ext.libhello.mat()
# do it
plexus.info.log(f"{self.greeting} {friend}!")
# report success
return 0
# my greeting
greeting = None
# end of file
| StarcoderdataPython |
3278792 | <reponame>lalapapauhuh/Python<filename>Turtle/folha.py
import turtle
wn = turtle.Screen()
wn.bgcolor('LightGrey')
risco = turtle.Turtle()
risco.color('DimGrey')
risco.pensize(12)
for size in range(1,31,1):
risco.forward(size)
risco.right(26)
risco.left(160)
risco.forward(50)
risco.up()
risco.goto(-60,-21)
risco.forward(5)
risco.down()
risco.right(65)
risco.forward(-50)
risco.right(38)
risco.forward(70)
wn.exitonclick() | StarcoderdataPython |
15548 | <reponame>ICT4H/dcs-web<gh_stars>1-10
from unittest import TestCase
from mock import patch, MagicMock
from mangrove.datastore.database import DatabaseManager
from datawinners.questionnaire.library import QuestionnaireLibrary
class TestQuestionnaireTemplate(TestCase):
def test_get_category_to_doc_mappings(self):
with patch('datawinners.questionnaire.library.get_db_manager') as get_db_manager:
mock_dbm = MagicMock(spec=DatabaseManager)
get_db_manager.return_value = mock_dbm
mock_dbm.load_all_rows_in_view.return_value = [
{'key': 'Health', 'value': {'name': 'one', 'id': 'health1'}},
{'key': 'Health', 'value': {'name': 'two', 'id': 'health2'}},
{'key': 'Agriculture', 'value': {'name': 'three', 'id': 'agri1'}}
]
library = QuestionnaireLibrary()
result = library.get_template_groupings('en')
expected = [
{'category': 'Agriculture', 'templates': [{'id': 'agri1', 'name': 'three'}]},
{'category': 'Health',
'templates': [{'id': 'health1', 'name': 'one'}, {'id': 'health2', 'name': 'two'}]}]
self.assertDictEqual(expected[0], result[0])
self.assertDictEqual(expected[1], result[1])
mock_dbm.load_all_rows_in_view.assert_called_with('by_template_category_en')
def test_template_details_with_french_loaded_when_language_is_french(self):
with patch('datawinners.questionnaire.library.get_db_manager') as get_db_manager:
mock_dbm = MagicMock(spec=DatabaseManager)
get_db_manager.return_value = mock_dbm
mock_dbm.load_all_rows_in_view.return_value = []
library = QuestionnaireLibrary()
library.get_template_groupings('fr')
mock_dbm.load_all_rows_in_view.assert_called_with('by_template_category_fr')
| StarcoderdataPython |
3211565 | sm.lockUI()
FANZY = 1500010
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("#bBleh! I almost drowned!#k")
sm.setSpeakerID(FANZY)
sm.sendSay("There must be some kind of enchantment to keep people from swimming across.")
sm.flipDialoguePlayerAsSpeaker()
sm.sendSay("#bYou could have told me that in advance!#k")
sm.setSpeakerID(FANZY)
sm.sendSay("I'm not omniscient, and you make a good test subject. We'll have to find another way.")
sm.unlockUI()
sm.startQuest(32102)
sm.completeQuest(32102)
sm.warp(101070000, 0) | StarcoderdataPython |
3281776 | <filename>app/api/profile_endpoint.py
from typing import List, Optional
from fastapi import APIRouter
from fastapi import HTTPException, Depends
from fastapi.responses import Response
from tracardi.service.storage.driver import storage
from tracardi.service.storage.factory import StorageFor
from .auth.authentication import get_current_user
from tracardi.domain.profile import Profile
from ..config import server
router = APIRouter(
dependencies=[Depends(get_current_user)]
)
@router.post("/profiles/import", tags=["profile"], include_in_schema=server.expose_gui_api)
async def import_profiles(profiles: List[Profile]):
try:
return await storage.driver.profile.save_profiles(profiles)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/profiles/refresh", tags=["profile"], include_in_schema=server.expose_gui_api)
async def refresh_profile():
try:
return await storage.driver.profile.refresh()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/profile/{id}", tags=["profile"], response_model=Profile, include_in_schema=server.expose_gui_api)
async def get_profile_by_id(id: str, response: Response):
try:
profile = Profile(id=id)
result = await StorageFor(profile).index().load()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if result is None:
response.status_code = 404
return result
@router.delete("/profile/{id}", tags=["profile"], response_model=Optional[dict], include_in_schema=server.expose_gui_api)
async def delete_profile(id: str, response: Response):
# try:
result = await storage.driver.profile.delete(id)
# except Exception as e:
# raise HTTPException(status_code=500, detail=str(e))
if result['deleted'] == 0:
response.status_code = 404
return None
return result
@router.get("/profile/logs/{id}", tags=["profile"], response_model=list, include_in_schema=server.expose_gui_api)
async def get_profile_logs(id: str):
log_records = await storage.driver.console_log.load_by_profile(id)
return list(log_records)
| StarcoderdataPython |
83178 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 11:53:09 2022
@author: Oliver
"""
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from PIL import Image
from PIL.ImageOps import grayscale
from .pattern_tools import patchmaker, align_pattern, microns_into_pattern
def histogram_patches(patches, bins=100, xlim=(140, 200), output=None):
"""
Display histogram of brightness values from all entries in a collection of images
patches : DICT
Values are numpy int arrays.
"""
brightness = np.array([])
for patch in patches.values():
brightness = np.concatenate((brightness,
patch.ravel()))
plt.hist(brightness, bins=bins)
plt.xlim(xlim)
if output is None:
plt.show()
else:
plt.savefig(output)
plt.clf()
def isolate_patches(picture, pattern_file, pattern_params, offsets,
exclude=[]):
pic = Image.open(picture)
pic = np.fliplr(np.flipud(np.array(grayscale(pic)).T))
pattern = align_pattern(pattern_file,
pattern_params['px_per_mm'] / 1e4,
pattern_params['theta'],
pattern_params['pattern_offset'])
patches = {}
for i, offset in enumerate(offsets):
if not i in exclude:
point, angle = microns_into_pattern(
offset, pattern, pattern_params['px_per_mm'] * 1e-3)
patch = patchmaker(pic,
height=pattern_params['spacing'],
width=pattern_params['pitch'],
center_y=int(point[0]),
center_x=int(point[1]),
angle=angle)
patches[i] = patch
return patches
def parse_patch(patch, threshold=170, min_size=6, return_image=False):
bw = (patch >= threshold).astype("uint8")
contours, hierarchy = cv.findContours(
bw, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
if not hierarchy is None:
# Remove inclusions, small features, and features on edges
top_hier = hierarchy.squeeze(
0)[:, -1] == -1 # must not have parent
good_size = np.array([cntr.shape[0]
for cntr in contours]) >= min_size
boxes = [cv.boundingRect(cntr) for cntr in contours] # x, y, w, h
no_touch = ~np.array([box_touches_edge(box, patch.shape)
for box in boxes])
allowed = top_hier * good_size * no_touch # AND
contours = [cnt for j, cnt in enumerate(contours) if allowed[j]]
# calculate properties
count = len(contours)
area = np.sum([cv.contourArea(cnt) for cnt in contours]) # pixels
if return_image:
return area, count, cv.drawContours(patch, contours, -1, 0, 2)
return area, count
def box_touches_edge(box, imshape):
x, y, w, h = box
hh, ww = imshape
return x <= 0 or y <= 0 or x+w >= ww or y+h >= hh
| StarcoderdataPython |
1761143 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<<EMAIL>>
# Purpose: test hyperlink object
# Created: 09.10.2010
# Copyright (C) 2010, <NAME>
# License: GPLv3
import sys
import unittest
from svgwrite.container import Hyperlink
class TestHyperlink(unittest.TestCase):
def test_constructor(self):
link = Hyperlink("http://localhost:8080")
self.assertEqual(link.tostring(), '<a target="_blank" xlink:href="http://localhost:8080" />')
def test_errors(self):
self.assertRaises(TypeError, Hyperlink, 1)
self.assertRaises(TypeError, Hyperlink, 3.1415)
self.assertRaises(TypeError, Hyperlink, (1,2))
self.assertRaises(TypeError, Hyperlink, dict(a=1))
if __name__=='__main__':
unittest.main() | StarcoderdataPython |
3357141 | <reponame>Orig5826/Basics
# -*- coding: utf_8 -*-
"""
urllib.request
"""
import urllib.request
import re
def getHtml(url, code="utf8"):
response = urllib.request.urlopen(url)
html = response.read().decode("utf8")
return html
def getInfo():
url = "http://www.juzimi.com/"
html = getHtml(url)
# print(html)
reg = r'<a href="/ju/[0-9]*" title="查看句子" class="hblinksen">(.*?)</a>'
rex = re.compile(reg)
txt = re.findall(rex, html)
ss = "\r\n".join(txt)
r = re.sub("[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\@\#\\\&\*\%]", "", ss)
return r
print(getInfo()) | StarcoderdataPython |
3232539 | <reponame>PiotrJTomaszewski/InternetRadioReciever
from mpd import MPDClient, base
import threading
DEBUG_MODE = True
class SeriousConnectionError(BaseException):
def __init__(self, arg):
self.strerror = arg
self.args = {arg}
def reconnect_on_failure(client):
def decorator(func):
def try_and_reconnect(self, *args, **kwargs):
number_of_tries = 0
while number_of_tries < 3:
try:
number_of_tries += 1
result = func(self, *args, **kwargs)
return result
except base.ConnectionError:
if client == 'active':
self.connect_active_client()
else:
self.connect_idling_client()
if DEBUG_MODE:
print('MPD_CLIENT: Connection to mpd lost. Reconnecting')
raise SeriousConnectionError('Error: Maximum numbers of connection to MPD tries reached!')
return try_and_reconnect
return decorator
class MPDConnection:
def __init__(self, host, port, new_player_status_callback, new_song_callback, playlist_callback):
self.host = host
self.port = port
self.active_client = MPDClient()
self.idling_client = MPDClient()
self.new_player_status_callback = new_player_status_callback
self.new_song_callback = new_song_callback
self.playlist_callback = playlist_callback
self.idling_thread = threading.Thread(target=self._start_idling_client, daemon=True)
self.idling_thread.start()
self.player_status = {}
self.current_song_metadata = {}
self.last_song_title = ""
self.current_playlist = {}
self.last_playlist_length = 0
def __del__(self):
# TODO: Kill idling thread
self.idling_client.disconnect()
self.active_client.disconnect()
def connect(self, client):
client.connect(self.host, self.port)
def connect_active_client(self):
self.active_client.connect(self.host, self.port)
def connect_idling_client(self):
self.idling_client.connect(self.host, self.port)
def get_playlist(self):
return self.current_playlist
@reconnect_on_failure(client='active')
def update_player_status(self):
self.player_status = self.active_client.status()
@reconnect_on_failure(client='active')
def update_playlist(self):
self.current_playlist = self.active_client.playlistid()
def get_player_status(self):
return self.player_status
@reconnect_on_failure(client='active')
def update_current_song_metadata(self):
self.current_song_metadata = self.active_client.currentsong()
def get_current_song_metadata(self):
return self.current_song_metadata
def player_state(self):
return self.player_status.get('state')
@reconnect_on_failure(client='idling')
def idle(self):
return self.idling_client.idle()
def _handle_player_event(self):
print('MPD_CLIENT: New player status')
self.update_player_status()
self.new_player_status_callback()
self.update_current_song_metadata()
# Check if there is a new song
if self.current_song_metadata.get('title') != self.last_song_title:
if DEBUG_MODE:
print('MPD_CLIENT: New song')
self.new_song_callback()
self.last_song_title = self.current_song_metadata.get('title')
def _handle_mixer_event(self):
print('MPD_CLIENT: New player status')
self.update_player_status()
self.new_player_status_callback()
def _handle_playlist_event(self):
if self.last_playlist_length != len(self.get_playlist()):
print('MPD_CLIENT: Playlist has changed')
self.update_playlist()
self.playlist_callback()
self.last_playlist_length = len(self.get_playlist())
def _start_idling_client(self):
self.connect_idling_client()
if DEBUG_MODE:
print('MPD_CLIENT: Starting idling thread')
self.update_player_status()
self.update_current_song_metadata()
self.new_player_status_callback()
self.new_song_callback()
self.last_song_title = self.current_song_metadata.get('title')
self.update_playlist()
self.last_playlist_length = len(self.get_playlist())
while True:
# Wait for a signal from server
events = self.idle()
for event in events:
if event == 'player':
self._handle_player_event()
elif event == 'mixer':
self._handle_mixer_event()
elif event == 'playlist':
self._handle_playlist_event()
if __name__ == '__main__':
def a():
pass
# mpd_init('localhost', 6600)
# mpd_connect(mpd_idling_client)
mpd_connection = MPDConnection('localhost', 6600, a, a)
# print(mpd_connection.get_playlist())
import time
time.sleep(1)
print(mpd_connection.get_player_status())
while 1:
pass
| StarcoderdataPython |
1693674 | <gh_stars>10-100
import unittest
import pytest
from dpipe.im.axes import *
class TextBroadcastToAxes(unittest.TestCase):
def test_exceptions(self):
with self.assertRaises(ValueError):
broadcast_to_axis(None, [1], [1, 2], [1, 2, 3])
with self.assertRaises(ValueError):
broadcast_to_axis([1, 2, 3], [1], [1, 2])
with self.assertRaises(ValueError):
broadcast_to_axis(None)
def test_broadcast_none():
inputs = [
[1],
[1, 2, 3],
[[1], [2], 3],
[1, [1, 2], [3]]
]
outputs = [
[[1]],
[[1], [2], [3]],
[[1], [2], [3]],
[[1, 1], [1, 2], [3, 3]]
]
for i, o in zip(inputs, outputs):
with pytest.raises(ValueError):
np.testing.assert_array_equal(o, broadcast_to_axis(None, *i)[1:])
| StarcoderdataPython |
4835543 | <reponame>btjanaka/competitive-programming-solutions<filename>leetcode/452.py
# Author: btjanaka (<NAME>)
# Problem: (LeetCode) 452
# Title: Minimum Number of Arrows to Burst Balloons
# Link: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/
# Idea: Represent the horizontal coordinates as a series of "events" telling
# when each balloon starts and ends. Sort these events by their position and go
# through them. Keep track of a list of balloons that are currently being
# covered; when we find an end event, we know we have to pop everything, so we
# mark all balloons in our list as popped and empty the list.
# Difficulty: medium
# Tags: list, sorting
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
events = [] # position of event, 0 (start) or 1 (end), index
for i in range(len(points)):
events.append((points[i][0], 0, i))
events.append((points[i][1], 1, i))
events.sort()
popped = [False for _ in range(len(points))]
cur = [] # Current list of spheres that will be popped
arrows = 0
for pos, t, i in events:
if t == 0: # Start
cur.append(i)
elif t == 1: # End
if popped[i]: continue # Avoid re-popping
arrows += 1
# Mark all as popped
while len(cur) > 0:
popped[cur.pop()] = True
return arrows
| StarcoderdataPython |
1684751 | <filename>lib/galaxy/tool_util/linters/help.py
"""This module contains a linting function for a tool's help."""
from galaxy.util import (
rst_to_html,
unicodify,
)
def lint_help(tool_xml, lint_ctx):
"""Ensure tool contains exactly one valid RST help block."""
# determine node to report for general problems with help
root = tool_xml.find("./help")
if root is None:
root = tool_xml.getroot()
helps = tool_xml.findall("./help")
if len(helps) > 1:
lint_ctx.error("More than one help section found, behavior undefined.", node=helps[1])
return
if len(helps) == 0:
lint_ctx.warn("No help section found, consider adding a help section to your tool.", node=root)
return
help = helps[0].text or ""
if not help.strip():
lint_ctx.warn("Help section appears to be empty.", node=helps[0])
return
lint_ctx.valid("Tool contains help section.", node=helps[0])
invalid_rst = rst_invalid(help)
if "TODO" in help:
lint_ctx.warn("Help contains TODO text.", node=helps[0])
if invalid_rst:
lint_ctx.warn(f"Invalid reStructuredText found in help - [{invalid_rst}].", node=helps[0])
else:
lint_ctx.valid("Help contains valid reStructuredText.", node=helps[0])
def rst_invalid(text):
"""Predicate to determine if text is invalid reStructuredText.
Return False if the supplied text is valid reStructuredText or
a string indicating the problem.
"""
invalid_rst = False
try:
rst_to_html(text, error=True)
except Exception as e:
invalid_rst = unicodify(e)
return invalid_rst
| StarcoderdataPython |
182347 | <filename>public/cantusdata/management/commands/import_data.py
from django.core.management.base import BaseCommand
from django.db import transaction
from optparse import make_option
from cantusdata.models.chant import Chant
from cantusdata.models.folio import Folio
from cantusdata.models.concordance import Concordance
from cantusdata.models.manuscript import Manuscript
from cantusdata.signals.solr_sync import solr_synchronizer
from cantusdata.helpers.chant_importer import ChantImporter
import csv
class Command(BaseCommand):
"""
Importing manuscripts will import from 'sources_export.csv'
Importing concordances will import from 'concordances'
It will import all manuscripts mentioned above
"""
# List all possible types and their model
TYPE_MAPPING = {'manuscripts': Manuscript, 'concordances': Concordance, 'chants': Chant}
# All files must be in data-dumps/
# The second item is the manuscript ID the chants are attached to
CHANT_FILE_MAPPING = {
'salzinnes': ['salzinnes-chants.csv', 133],
'st-gallen-390': ['st-gallen-390-chants.csv', 127],
'st-gallen-391': ['st-gallen-391-chants.csv', 128],
'utrecht-406': ['utrecht-406-chants.csv', 51],
'paris-12044': ['paris-12044.csv', 38]
}
MANUSCRIPT_FILE = "sources-export.csv"
CONCORDANCE_FILE = "concordances"
help = 'Usage: ./manage.py import_data {{{0}}} [chant_file [manuscript_id] ...]\n'\
'\tAdd everything you want to import as arguments. (or use --all)\n'\
"\tSelect arguments from this list: {1}\n"\
"\tTo import chants from a specific manuscript, add arguments from\n"\
"\tthis list: {2}\n" \
"\tAlternatively, put a CSV file path as argument, followed by its manuscript ID"\
.format('|'.join(TYPE_MAPPING.keys()), TYPE_MAPPING.keys(), CHANT_FILE_MAPPING.keys())
option_list = BaseCommand.option_list + (
make_option('--all',
action='store_true',
dest='all',
default=False,
help='Import all types: {0}'.format(TYPE_MAPPING.keys())),
)
# Used to specify which chant files to import
chants = []
def handle(self, *args, **options):
if options['all']:
args += tuple(self.TYPE_MAPPING.keys())
# Go through the arguments to see if some files have been specified
for index, arg in enumerate(args):
if arg in self.CHANT_FILE_MAPPING.keys():
self.chants.append(self.CHANT_FILE_MAPPING[arg])
elif arg not in self.TYPE_MAPPING.keys() and arg.endswith('.csv') and index + 1 < len(args):
self.chants.append([arg, args[index + 1]])
# If no files were specified, import all of them
if len(self.chants) == 0:
self.chants = self.CHANT_FILE_MAPPING.values()
with solr_synchronizer.get_session():
for type in self.TYPE_MAPPING.keys():
if type in args:
# Remove the trailing 's' to make the type singular
type_singular = type.rstrip('s')
# Make an array of all the manuscript IDs
manuscript_ids = [ chant[1] for chant in self.chants ]
self.stdout.write('Deleting old {0} data...'.format(type_singular))
# Special case for chants, do not delete everything and we need to delete the folios
if type == 'chants':
Chant.objects.filter(manuscript__id__in=manuscript_ids).delete()
self.stdout.write('Deleting old folio data...')
Folio.objects.filter(manuscript__id__in=manuscript_ids).delete()
else:
self.TYPE_MAPPING[type].objects.all().delete()
self.stdout.write('Importing new {0} data...'.format(type_singular))
# Call the method corresponding with the current type
getattr(self, 'import_{0}_data'.format(type_singular))(**options)
self.stdout.write("Waiting for Solr to finish...")
self.stdout.write("Done.")
@transaction.atomic
def import_manuscript_data(self, **options):
try:
csv_file = csv.DictReader(open("data_dumps/{0}".format(self.MANUSCRIPT_FILE), "rU"))
except IOError:
raise IOError("File 'data_dumps/{0}' does not exist!".format(self.MANUSCRIPT_FILE))
# Load in the csv file. This is a massive list of dictionaries.
self.stdout.write("Starting manuscript import process.")
# Create a manuscript and save it
for index, row in enumerate(csv_file):
manuscript = Manuscript()
manuscript.name = row["Title"].decode("utf-8").strip()
manuscript.siglum = row["Siglum"].decode("utf-8").strip()
manuscript.date = row["Date"].decode("utf-8").strip()
manuscript.provenance = row["Provenance"].decode("utf-8").strip()
manuscript.description = row["Description"].decode("utf-8").strip()
manuscript.save()
self.stdout.write("Successfully imported {0} manuscripts into database.".format(index))
@transaction.atomic
def import_concordance_data(self, **options):
try:
file = open("data_dumps/{0}".format(self.CONCORDANCE_FILE))
except IOError:
raise IOError("File 'data_dumps/{0}' does not exist!".format(self.CONCORDANCE_FILE))
# Every line is a new concordance
for index, line in enumerate(file.readlines()):
# This method is pretty hacky, but it seems to work
concordance = Concordance()
concordance.letter_code = line.split(" ", 1)[0].strip()
line = line.split(" ", 1)[1]
concordance.institution_city = line.split(",", 1)[0].strip()
line = line.split(",", 1)[1]
concordance.institution_name = line.split(",", 1)[0].strip()
line = line.split(",", 1)[1]
concordance.library_manuscript_name = line.split(" (", 1)[0].strip()
line = line.split(" (", 1)[1]
concordance.date = line.split(", from", 1)[0].strip()
line = line.split(", from", 1)[1]
concordance.location = line.split(")", 1)[0].strip()
line = line.split(")", 1)[1]
line = line.split(": ", 1)[1]
concordance.rism_code = line.split("]", 1)[0].strip()
concordance.save()
self.stdout.write("Successfully imported {0} concordances into database.".format(index + 1))
def import_chant_data(self, **options):
for chant in self.chants:
chant_file = chant[0]
importer = ChantImporter(self.stdout)
chant_count = importer.import_csv("data_dumps/{0}".format(chant_file))
# Save the new chants
importer.save()
self.stdout.write("Successfully imported {0} chants into database.".format(chant_count))
| StarcoderdataPython |
28634 | """
==================
welly
==================
"""
from .project import Project
from .well import Well
from .header import Header
from .curve import Curve
from .synthetic import Synthetic
from .location import Location
from .crs import CRS
from . import tools
from . import quality
def read_las(path, **kwargs):
"""
A package namespace method to be called as `welly.read_las`.
Just wraps `Project.from_las()`. Creates a `Project` from a .LAS file.
Args:
path (str): path or URL where LAS is located. `*.las` to load all files
in dir
**kwargs (): See `Project.from_las()`` for addictional arguments
Returns:
welly.Project. The Project object.
"""
return Project.from_las(path, **kwargs)
def read_df(df, **kwargs):
"""
A package namespace method to be called as `welly.read_df`.
Just wraps `Well.from_df()`. Creates a `Well` from your pd.DataFrame.
Args:
df (pd.DataFrame): Column data and column names
Optional **kwargs:
units (dict): Optional. Units of measurement of the curves in `df`.
req (list): Optional. An alias list, giving all required curves.
uwi (str): Unique Well Identifier (UWI)
name (str): Name
Returns:
Well. The `Well` object.
"""
return Well.from_df(df, **kwargs)
__all__ = [
'Project',
'Well',
'Header',
'Curve',
'Synthetic',
'Location',
'CRS',
'quality',
'tools', # Various classes in here
'read_las'
]
from pkg_resources import get_distribution, DistributionNotFound
try:
VERSION = get_distribution(__name__).version
except DistributionNotFound:
try:
from ._version import version as VERSION
except ImportError:
raise ImportError(
"Failed to find (autogenerated) _version.py. "
"This might be because you are installing from GitHub's tarballs, "
"use the PyPI ones."
)
__version__ = VERSION
| StarcoderdataPython |
198312 | <reponame>462630221/optimizer
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from typing import Sequence, Text, Any, Tuple, List, Callable, Optional, Dict, Union
import io
import unittest
import os
import numpy as np # type: ignore
try:
import torch
import torchvision as tv
has_tv = True
except:
has_tv = False
import onnx
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, shape_inference
from onnx import numpy_helper
from onnx.numpy_helper import to_array
try:
import onnxruntime as rt
has_ort = True
except:
has_ort = False
import onnxoptimizer
TensorShape = List[int]
TensorShapes = Dict[Optional[str], TensorShape]
LATEST_STABLE_OPSET_VERSION = 13
class TestOptimizer(unittest.TestCase):
def _compare(self, model_opt: onnx.ModelProto, model_ori: onnx.ModelProto, n_times: int = 5,
input_shapes: Optional[TensorShapes] = None, verbose=True) -> bool:
"""
:param input_shapes: Shapes of generated random inputs
:param model_opt: The simplified ONNX model
:param model_ori: The original ONNX model
:param n_times: Generate n random inputs
"""
def get_shape_from_value_info_proto(v: onnx.ValueInfoProto) -> List[int]:
return [dim.dim_value for dim in v.type.tensor_type.shape.dim]
def get_value_info_all(m: onnx.ModelProto, name: str) -> Optional[onnx.ValueInfoProto]:
for v in m.graph.value_info:
if v.name == name:
return v
for v in m.graph.input:
if v.name == name:
return v
for v in m.graph.output:
if v.name == name:
return v
return None
def get_shape(m: onnx.ModelProto, name: str) -> TensorShape:
"""
Note: This method relies on onnx shape inference, which is not reliable. So only use it on input or output tensors
"""
v = get_value_info_all(m, name)
if v is not None:
return get_shape_from_value_info_proto(v)
raise RuntimeError('Cannot get shape of "{}"'.format(name))
def get_elem_type(m: onnx.ModelProto, name: str) -> Optional[int]:
v = get_value_info_all(m, name)
if v is not None:
return v.type.tensor_type.elem_type
return None
def get_np_type_from_elem_type(elem_type: int) -> int:
sizes = (None, np.float32, np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, str, np.bool,
np.float16, np.double, np.uint32, np.uint64, np.complex64, np.complex128, np.float16)
assert len(sizes) == 17
size = sizes[elem_type]
assert size is not None
return size
def get_input_names(model: onnx.ModelProto) -> List[str]:
input_names = list(set([ipt.name for ipt in model.graph.input])
- set([x.name for x in model.graph.initializer]))
return input_names
def generate_rand_input(model, input_shapes: Optional[TensorShapes] = None):
if input_shapes is None:
input_shapes = {}
input_names = get_input_names(model)
full_input_shapes = {ipt: get_shape(
model, ipt) for ipt in input_names}
assert None not in input_shapes
full_input_shapes.update(input_shapes) # type: ignore
for key in full_input_shapes:
if np.prod(full_input_shapes[key]) <= 0:
raise RuntimeError(
'The shape of input "{}" has dynamic size, '
'please set an input shape manually'.format(key))
inputs = {ipt: np.array(np.random.rand(*full_input_shapes[ipt]),
dtype=get_np_type_from_elem_type(get_elem_type(model, ipt))) for ipt in
input_names}
return inputs
def forward(model, inputs=None, input_shapes: Optional[TensorShapes] = None) -> Dict[str, np.ndarray]:
if input_shapes is None:
input_shapes = {}
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)
sess_options.log_severity_level = 3
sess = rt.InferenceSession(model.SerializeToString(
), sess_options=sess_options, providers=['CPUExecutionProvider'])
if inputs is None:
inputs = generate_rand_input(model, input_shapes=input_shapes)
outputs = [x.name for x in sess.get_outputs()]
run_options = rt.RunOptions()
run_options.log_severity_level = 3
res = OrderedDict(zip(outputs, sess.run(
outputs, inputs, run_options=run_options)))
return res
if input_shapes is None:
input_shapes = {}
onnx.checker.check_model(model_opt)
for i in range(n_times):
rand_input = generate_rand_input(
model_opt, input_shapes=input_shapes)
res_ori = forward(model_ori, inputs=rand_input)
res_opt = forward(model_opt, inputs=rand_input)
for name in res_opt.keys():
if not np.allclose(res_opt[name], res_ori[name], rtol=1e-4, atol=1e-5):
if verbose:
print("Tensor {} changes after optimization. The max diff is {}.".format(
name, np.max(np.abs(res_opt[name] - res_ori[name]))))
print("After optimization:")
print(res_opt[name])
print("Before optimization:")
print(res_ori[name])
print("----------------")
return False
return True
# type: (Union[GraphProto, ModelProto], Sequence[Text], bool, **Any) -> ModelProto
def _optimized(self, graph_or_model, opts, fixed_point=False, compare_result=True, **kwargs):
if isinstance(graph_or_model, ModelProto):
orig_model = graph_or_model
else:
opset_imports = kwargs.pop('opset_imports', None)
if opset_imports is None:
opset_imports = [helper.make_opsetid("", LATEST_STABLE_OPSET_VERSION)]
orig_model = helper.make_model(
graph_or_model, producer_name='onnx-test', opset_imports=opset_imports, **kwargs)
checker.check_model(orig_model)
optimized_model = onnxoptimizer.optimize(orig_model, opts, fixed_point)
checker.check_model(optimized_model)
if compare_result and len(optimized_model.graph.node) > 0:
if has_ort:
assert self._compare(optimized_model, orig_model)
else:
print("Skip onnxruntime test because it is not installed.")
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
# NOTE(daquexian): only values that change across loop iterations should be in `input_types` and `output_types`. The pseudocode showing how loop op works is:
# loop_value_inputs = graph_value_inputs
# while cond:
# loop_value_outputs = body(loop_value_inputs)
# loop_value_inputs = loop_value_outputs
# graph_value_outputs = loop_value_outputs
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
input_types,
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types,
check_legality=True,
): # type: (...) -> List[NodeProto]
if check_legality:
assert len(input_types) == len(output_types)
zero = helper.make_tensor(
"trip_count_value", TensorProto.INT64, (), [1])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT64, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info(
"cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
# type: (GraphProto, Callable[[NodeProto], None]) -> None
def _visit_all_nodes_recursive(self, graph, fn):
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_get_available_passes(self): # type: () -> None
# FIXME does not guarantees to be listing all
graph = helper.make_graph([], "dummy_graph", [], [])
list_of_passes = onnxoptimizer.get_available_passes()
assert isinstance(list_of_passes, (list)) and len(list_of_passes) > 0
for pass_name in list_of_passes:
# If pass_name is invalid it throws a RuntimeError
self._optimized(graph, [pass_name])
def test_eliminate_identity_single_use(self): # type: () -> None
nodes = [helper.make_node("Add", ["X", "Y"], ["A"]),
helper.make_node("Identity", ["A"], ["B"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_B"], ["_B2"])],
[(TensorProto.FLOAT, (5,), "B")],
[(TensorProto.FLOAT, (5,), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("B2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node): # type: (NodeProto) -> None
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "B"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_B2"
def test_eliminate_identity_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
identity = helper.make_node("Identity", ["A"], ["B"])
graph = helper.make_graph(
[add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(
optimized_model.graph.output) == 1 and optimized_model.graph.output[0].name == 'B'
assert len(optimized_model.graph.node) == 1
def test_eliminate_identity_multiple_uses(self): # type: () -> None
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_not_fuse_non_nop_flatten(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=2)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10, 3, 1, 1))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (10, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Flatten'
def test_nop_flatten_axis0_graph_output(self):
add = helper.make_node("Add", ["X", "Y"], ["A"])
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[add, flatten],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 10)),
],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))],
# the tensor_value_info of "A" is necessary to this optimizer
value_info=[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10))]
)
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Add'
def test_nop_flatten_axis0(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 10))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_nop_flatten_axis1(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=1)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_eliminate_duplicate_initializer(self): # type: () -> None
add_1 = helper.make_node("Add", ["A", "I_0"], ["B"])
add_2 = helper.make_node("Add", ["B", "I_1"], ["C"])
i = np.random.rand(5).astype(np.float32)
graph = helper.make_graph(
[add_1, add_2],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_1", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (5,))],
[helper.make_tensor("I_0", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True),
helper.make_tensor("I_1", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_duplicate_initializer"])
assert len(optimized_model.graph.node) == 2
assert len(optimized_model.graph.initializer) == 1
assert len(optimized_model.graph.input) == 2
assert optimized_model.graph.node[0].input[1] == "I_0"
def test_nop_cast(self): # type: () -> None
cast = helper.make_node("Cast", ["A"], ["B"], to=TensorProto.FLOAT)
graph = helper.make_graph(
[cast],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_cast"])
assert len(optimized_model.graph.node) == 0
def test_nop_transpose_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans = helper.make_node("Transpose", ["A"], ["B"], perm=[0, 1])
graph = helper.make_graph(
[add, trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Identity", ["A"], ["X"]),
helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "Y"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_Y2"
def test_nop_transpose_default(self): # type: () -> None
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_nop_pad_opset10(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 0, 0])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))])
assert len(graph.node) == 1
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
pad = helper.make_node("Pad", ["A", "Pads"], ["B"])
graph = helper.make_graph(
[add, pad],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (2,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(2,),
vals=np.array([0, 0]).astype(
np.int64).tobytes(),
raw=True)])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.node) == 1
def test_nop_pad(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X", "Pads"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
assert len(graph.node) == 1
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_default_opset10(self): # type: () -> None
trans = helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 1, 1])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))])
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_nop_pad_default(self): # type: () -> None
trans = helper.make_node("Pad", ["X", "Pads"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 1, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_eliminate_unused_initializer(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
def test_eliminate_unused_initializer_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
assert len(optimized_model.graph.input) == 2
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used_default(self):
add = helper.make_node("Add", ["X", "A"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used(self):
nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Add", ["_X", "A"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 2), "X")],
[(TensorProto.FLOAT, (1, 2), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
# Add, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Add"
assert optimized_model.graph.output[0].name == "Z"
# Add
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_output(self):
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
assert "Z" in [o.name for o in optimized_model.graph.output]
def test_extract_constant_to_initializer(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(
graph, ["extract_constant_to_initializer"])
self.assertEqual(len(optimized_model.graph.initializer), 1)
init = optimized_model.graph.initializer[0]
self.assertEqual(init.name, 'A')
self.assertEqual(init.dims, [16, 1, 1])
self.assertEqual(init.data_type, TensorProto.FLOAT)
self.assertEqual(
[n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
def test_fuse_concats(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=0),
helper.make_node("Concat", ["X", "G", "Y"], ["Z"], axis=0)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("G", TensorProto.FLOAT, (4, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (22, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"], True) # two passes are needed to simplify the graph to its simplest state.
assert len(optimized_model.graph.node) == 1
assert len(optimized_model.graph.node[0].input) == 7
assert optimized_model.graph.node[0].input == [
"A", "B", "C", "G", "D", "E", "F"]
assert optimized_model.graph.node[0].op_type == "Concat"
def test_fuse_concats_different_axis(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=1),
helper.make_node("Concat", ["X", "Y"], ["Z"], axis=2)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 9, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (8, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (8, 9, 8))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"])
assert optimized_model.graph == graph
def test_fuse_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3, 4), "X")],
[(TensorProto.FLOAT, (2, 4, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
original_model = helper.make_model(graph)
shape_inference.infer_shapes(original_model)
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
shape_inference.infer_shapes(optimized_model)
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans1 = helper.make_node("Transpose", ["A"], ["B"])
trans2 = helper.make_node("Transpose", ["B"], ["C"])
graph = helper.make_graph(
[add, trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_fuse_transpose_default(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (3, 5), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_with_scalar_bias(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, ())],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Unsqueeze, Conv
assert len(optimized_model.graph.node) == 4
assert optimized_model.graph.node[0].op_type == 'Unsqueeze'
assert optimized_model.graph.node[1].op_type == 'Constant'
assert optimized_model.graph.node[2].op_type == 'Tile'
assert optimized_model.graph.node[3].op_type == 'Conv'
def test_fuse_add_bias_into_conv_use_weight_shape(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
# FIXME(daquexian): It looks like subgraph cannot get value info from parent subgraph
# nodes.extend(self._make_fake_loop_op(
# [helper.make_node("Conv", ["_X", "Y"], ["_Z"]),
# helper.make_node("Add", ["_Z", "A"], ["_B2"])],
# [(TensorProto.FLOAT, (1, 5, 3, 3), "X")],
# [(TensorProto.FLOAT, (1, 16, 1, 1), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# # Squeeze, Conv, Constant (trip count), Constant (condition), Loop
# assert len(list(optimized_model.graph.node)) == 5
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
# # Squeeze, Conv
# assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
# assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
# assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
# # Output 1 since 0 is 'cond'
# assert optimized_model.graph.node[4].attribute[0].g.output[1].name == 'B2'
# type: () -> None
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(
optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
def test_fuse_add_bias_into_conv_use_conv_shape(self): # type: () -> None
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info(
"N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_use_move_constant(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_matmul_add_bias_into_gemm(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_same_shape(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (32, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_bcast_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_matmul_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (3, 3))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 3))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_bias_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
# 3d bias for 2d matmul is not legal. So disable onnxruntime checking
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"], compare_result=False)
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_multiple_use_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
identity = helper.make_node("Identity", ["Z"], ["A1"])
add = helper.make_node("Add", ["Z", "B"], ["A2"])
graph = helper.make_graph(
[matmul, add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A1", TensorProto.FLOAT, (32, 16)),
helper.make_tensor_value_info("A2", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_no_optional_value_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_no_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_with_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
vals=np.array([0]).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_with_nonzero_optional_value(self):
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
# non-zero Constant_value -> so no pad
vals=np.array([25]).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_1d_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 1, 0, 0, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
def test_fuse_pad_into_conv_1d(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (6,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(6,),
vals=np.array([0, 0, 1, 0, 0, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
# type: () -> None
def test_fuse_pad_into_conv_existing_conv_pad_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
def test_fuse_pad_into_conv_existing_conv_pad(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_pad_feature_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 1, 0, 0, 0, 0, 0, 0]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_pad_feature_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 1, 0, 0, 0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_negative_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, -1, -1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_negative_pad_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array(
[0, 0, 0, 0, 0, 0, -1, -1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="reflect",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse(self):
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="reflect"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_consecutive_squeezes(self): # type: () -> None
nodes = [helper.make_node("Squeeze", ["X", "X_axes"], ["Y"]),
helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Squeeze", ["_X", "X_axes"], ["_Y"]),
helper.make_node("Squeeze", ["_Y", "Y_axes"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9), "X")],
[(TensorProto.FLOAT, (2, 3, 1, 8, 9), "Z2")]))
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2])],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (2, 3, 1, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
# Squeeze, Constant (trip count), Constant (cond), Loop
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 4
def test_fuse_consecutive_squeezes_default(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
squeeze3 = helper.make_node("Squeeze", ["Z", "Z_axes"], ["A"])
nodes = [squeeze1, squeeze2, squeeze3]
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)),
('Z_axes', np.array([2], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
helper.make_tensor_value_info("Z_axes", TensorProto.INT64, [1])],
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (2, 3, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6, 7]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_random(self): # type: () -> None
x_shape = [1, 1, 1, 3, 4, 1, 6, 1, 1, 9]
s1_one_indices = [i for i, a in enumerate(x_shape) if a == 1]
s1_axes = np.random.choice(s1_one_indices,
size=np.random.randint(
low=1, high=len(s1_one_indices) - 1),
replace=False).astype(np.int64)
s2_x_shape = [a for i, a in enumerate(x_shape) if i not in s1_axes]
s2_one_indices = [i for i, a in enumerate(s2_x_shape) if a == 1]
s2_axes = np.array(s2_one_indices).astype(np.int64)
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', s1_axes),
('Y_axes', s2_axes)]
]
nodes = [squeeze1, squeeze2]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, x_shape),
helper.make_tensor_value_info(
"X_axes", TensorProto.INT64, s1_axes.shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, s2_axes.shape)],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (3, 4, 6, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 2, 5, 7, 8]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_multi_uses(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
add = helper.make_node("Add", ["Y", "A"], ["Z2"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)), ]
]
graph = helper.make_graph(
[squeeze1, add, squeeze2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (1, 2, 3, 1, 1, 8, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert optimized_model.graph.node[2].op_type == "Squeeze"
assert optimized_model.graph.node[2].input[0] == "X"
assert len(list(optimized_model.graph.node)) == 3
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [
0, 4, 5]
if init.name == optimized_model.graph.node[2].input[1]:
assert list(to_array(init)) == [
0, 1, 4, 5, 6]
def test_fuse_consecutive_softmax_log_axis(self): # type: () -> None
for axis in range(3):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=axis)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
def test_fuse_consecutive_softmax_log_side_effect(self): # type: () -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert graph == optimized_model.graph
# type: () -> None
def test_fuse_consecutive_softmax_log_multiple_out(self):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
graph = helper.make_graph(
[softmax, log, exp],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert len(optimized_model.graph.output) == 2
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.output[1].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Exp"
def test_preserve_value_info(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self): # type: () -> None
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
# 'lift_lexical_references' is legacy code and I don't know how it works.
# More error occurs if I make this loop op legal.
# So don't check legality here
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")],
check_legality=False))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "lift_lexical_references" pass produces a graph that does not conform to
# the ONNX spec. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
def test_fuse_bn_into_conv_simple(self): # type: () -> None
for (tensor_type, np_type) in [(TensorProto.FLOAT, np.float32)]:
conv = helper.make_node("Conv", ["X", "W", "B"], ["Y"])
bn = helper.make_node("BatchNormalization", [
"Y", "scale", "b", "mean", "var"], ["Z"])
W = np.random.randn(3, 2, 5, 5).astype(np_type) + 2
B = np.random.randn(3,).astype(np_type) + 2
scale = np.random.randn(3,).astype(np_type) + 2
b = np.random.randn(3,).astype(np_type) + 2
mean = np.random.randn(3,).astype(np_type) + 2
var = np.abs(np.random.randn(3,).astype(np_type)) + 2
initializers = [
helper.make_tensor(name, tensor_type,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('W', W), ('B', B), ('scale', scale), ('b', b), ('mean', mean), ('var', var)]
]
graph = helper.make_graph(
[conv, bn],
"test",
[helper.make_tensor_value_info("X", tensor_type, (5, 2, 28, 28))],
[helper.make_tensor_value_info(
"Z", tensor_type, (5, 3, 24, 24))],
initializer=initializers,
value_info=[
helper.make_tensor_value_info(
"Y", tensor_type, (5, 3, 24, 24))
]
)
optimized_model = self._optimized(graph, ["fuse_bn_into_conv"])
self.assertEqual(len(optimized_model.graph.node), 1)
self.assertEqual(optimized_model.graph.node[0].op_type, 'Conv')
self.assertEqual(len(optimized_model.graph.initializer), 2)
new_W = numpy_helper.to_array(optimized_model.graph.initializer[0])
new_b = numpy_helper.to_array(optimized_model.graph.initializer[1])
f = scale / np.sqrt(var + 1e-5)
np.testing.assert_almost_equal((B - mean) * f + b, new_b)
np.testing.assert_almost_equal(
W * f[:, np.newaxis, np.newaxis, np.newaxis], new_W)
def _internal_test_deadend_elimination(self, fixed): # type: (bool) -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
exp1 = helper.make_node("Log", ["Z"], ["Z2"])
exp2 = helper.make_node("Sqrt", ["Z1"], ["Z3"])
graph = helper.make_graph(
[softmax, log, exp, exp1, exp2],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_deadend"], fixed)
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "Softmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Log"
def test_deadend_elimination_simple(self): # type: () -> None
self._internal_test_deadend_elimination(False)
def test_deadend_elimination_simple_fixed(self): # type: () -> None
self._internal_test_deadend_elimination(True)
def _get_argmax_output_shape(self, input_shape, axis, keepdims):
assert keepdims
output_shape = list(input_shape[:])
output_shape[axis] = 1
output_shape = tuple(output_shape)
return output_shape
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_no_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_with_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Softmax", "LogSoftmax"]:
for axis_n in range(3):
for axis_max in range(3):
node = helper.make_node(
node_name, ["X"], ["Y"], axis=axis_n)
argmax = helper.make_node(
"ArgMax", ["Y"], ["Z"], axis=axis_max)
output_shape = self._get_argmax_output_shape(
input_shape, axis_max, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
if axis_max == axis_n:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis_max
else:
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_multiple_out(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
node2 = helper.make_node(node_name, ["Y"], ["Z1"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
argmax_output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, argmax_output_shape),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, input_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_consecutive(self):
# type: (GraphProto, ModelProto, bool, int) -> None
input_shape = (5, 7, 11)
def _assertion(graph, optimized_model, axis_aligned, true_axis):
if axis_aligned:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == true_axis
else:
assert optimized_model.graph == graph
# no axis X no axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"])
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model, True, axis)
# no axis X axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_0)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_1)
output_shape = self._get_argmax_output_shape(
input_shape, axis_1, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model,
axis_0 == axis_1, axis_1)
# axis X axis test
for node_name_0 in ["Softmax", "LogSoftmax"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
for axis_2 in range(3):
node = helper.make_node(
node_name_0, ["X"], ["Y"], axis=axis_0)
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_1)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_2)
output_shape = self._get_argmax_output_shape(
input_shape, axis_2, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
if axis_0 == axis_1: # we can reduce both of the monotonic ops
_assertion(graph, optimized_model,
axis_1 == axis_2, axis_2)
elif axis_1 == axis_2: # we can reduce one of the monotonic ops
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[-1].op_type == "ArgMax"
assert optimized_model.graph.node[-1].attribute[0].name == "axis"
assert optimized_model.graph.node[-1].attribute[0].i == axis_2
else: # we can't reduce anything
assert optimized_model.graph == graph
def test_eliminate_nop_dropout(self): # type: () -> None
node = helper.make_node("Dropout", ["X"], ["Y"])
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False)
# we don't want to eliminate the dropoutin opset 12,
# even when it';s an optional parameter (defaults to 0)
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_dropout_opset11_graph_output(self):
node = helper.make_node("Log", ["X"], ["Y"])
node1 = helper.make_node("Dropout", ["Y"], ["Z"], ratio=0.0)
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
assert optimized_model.graph.output[0].name == 'Z'
def test_eliminate_nop_dropout_opset11(self): # type: () -> None
for ratio in [0.0, 0.5]:
node = helper.make_node("Dropout", ["X"], ["Y"], ratio=ratio)
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
if ratio > 0.0:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
def test_fuse_reduction_unsqueeze(self): # type: () -> None
# type: (Tuple[int, ...], List[int], List[int], bool) -> Tuple[int, ...]
def _calculate_post_transform_shape(input_shape, reduction_axes, unsqueeze_axes, keepdim):
post_reduce_shape = None
if keepdim:
post_reduce_shape = tuple(
[(x if i not in reduction_axes else 1) for i, x in enumerate(input_shape)])
else:
post_reduce_shape = tuple(
[x for i, x in enumerate(input_shape) if i not in reduction_axes])
post_unsqueeze_shape = list(post_reduce_shape)
for ax in unsqueeze_axes:
post_unsqueeze_shape.insert(ax, 1)
return tuple(post_unsqueeze_shape)
for reduction in ["ReduceL1", "ReduceL2", "ReduceLogSum",
"ReduceLogSumExp", "ReduceMax", "ReduceMean",
"ReduceMin", "ReduceProd", "ReduceSum", "ReduceSumSquare"]:
for axes1 in [[1], [1, 2], [2]]:
for axes2 in [[0], [0, 1], [1]]:
for keepdim in [False, True]:
input_shape = (5, 7, 9)
output_shape = _calculate_post_transform_shape(
input_shape, axes1, axes2, keepdim) # type: Tuple[int, ...]
axes2_arr = np.array(axes2, dtype=np.int64)
graph_input = [helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, axes2_arr.shape)]
graph_initializer = [
helper.make_tensor("Y_axes", TensorProto.INT64,
axes2_arr.shape, axes2_arr.tobytes(), raw=True)
]
if reduction in ("ReduceSum"):
axes1_arr = np.array(axes1, dtype=np.int64)
node = helper.make_node(
reduction, ["X", "X_axes"], ["Y"], keepdims=keepdim)
graph_input.append(
helper.make_tensor_value_info("X_axes", TensorProto.INT64, axes1_arr.shape))
graph_initializer.append(helper.make_tensor("X_axes", TensorProto.INT64,
axes1_arr.shape, axes1_arr.tobytes(), raw=True))
else:
node = helper.make_node(
reduction, ["X"], ["Y"], axes=axes1, keepdims=keepdim)
node1 = helper.make_node(
"Unsqueeze", ["Y", "Y_axes"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
graph_input,
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, output_shape)],
initializer=graph_initializer
)
optimized_model = self._optimized(
graph, ["fuse_consecutive_reduce_unsqueeze"], False)
if keepdim or axes1 != axes2:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[-1].op_type == reduction
if reduction in ("ReduceSum"):
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[-1].input[1]:
assert list(to_array(init)) == axes1
else:
assert optimized_model.graph.node[-1].attribute[0].name == "axes"
assert optimized_model.graph.node[-1].attribute[0].ints == axes1
optimized_output_shape = tuple(
x.dim_value for x in optimized_model.graph.output[0].type.tensor_type.shape.dim)
assert optimized_output_shape == output_shape
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_fasterrcnn_fpn(self): # type: () -> None
model = tv.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# maskrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_maskrcnn_fpn_opset11(self): # type: () -> None
model = tv.models.detection.maskrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# keypointrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_keypointrcnn_fpn(self): # type: () -> None
model = tv.models.detection.keypointrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_shufflenet_v2(self): # type: () -> None
model = tv.models.shufflenet_v2_x1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_mnasnet(self): # type: () -> None
model = tv.models.mnasnet1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_deeplabv3(self): # type: () -> None
model = tv.models.segmentation.deeplabv3_resnet50(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3207575 | <reponame>StevenCollins/pro-ve-pro<gh_stars>0
#!/usr/bin/env python3
# To run this you probably need to:
# pip install pyvesync
# pip install python-dotenv
import os
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
from pyvesync import VeSync
from dotenv import load_dotenv
load_dotenv()
# Setup VeSync, login, and get initial device info
vesync = VeSync(os.getenv("VESYNC_USERNAME"), os.getenv("VESYNC_PASSWORD"))
vesync.login()
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
# Setup server response
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
if (self.path == "/metrics"):
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
cid = humidifier["CID"]
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("# HELP vesync_humidity_ratio The current humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_target_humidity_ratio The target humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_target_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_target_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Auto Target Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_level The current mist level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_level{{CID=\"{cid}\"}} {humidifier['Mist Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_virtual_level The current mist virtual level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_virtual_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_virtual_level{{CID=\"{cid}\"}} {humidifier['Mist Virtual Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_night_light_brightness The night light brightness.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_night_light_brightness gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_night_light_brightness{{CID=\"{cid}\"}} {humidifier['Night Light Brightness']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_status Device is on.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_status gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_status{{CID=\"{cid}\"}} {1 if humidifier['Status'] == 'on' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_online Device is online.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_online gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_online{{CID=\"{cid}\"}} {1 if humidifier['Online'] == 'online' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mode_auto Auto mode enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mode_auto gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mode_auto{{CID=\"{cid}\"}} {1 if humidifier['Mode'] == 'auto' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_lacks Water level low.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_lacks gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_lacks{{CID=\"{cid}\"}} {1 if humidifier['Water Lacks'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_humidity_high Humidity too high.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_high gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_high{{CID=\"{cid}\"}} {1 if humidifier['Humidity High'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_tank_lifted Water tank missing.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_tank_lifted gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_tank_lifted{{CID=\"{cid}\"}} {1 if humidifier['Water Tank Lifted'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_display_enabled Display is enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_display_enabled gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_display_enabled{{CID=\"{cid}\"}} {1 if humidifier['Display'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop_reach_target Automatic stop reach target?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop_reach_target gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop_reach_target{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop Reach Target'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop Automatic stop?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop'] == True else 0}\n", "utf-8"))
else:
self.send_response(501)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("501 Not Implemented", "utf-8"))
# Start server
server = HTTPServer((os.getenv("HOSTNAME"), int(os.getenv("PORT"))), MyServer)
print("Server started http://%s:%s" % (os.getenv("HOSTNAME"), os.getenv("PORT")))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
print("Server stopped.") | StarcoderdataPython |
89835 | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import argparse, time, sys, os, cv2
import numpy as np
from dataloader import AlzhDataset
import tensorboard_logger as tb_logger
from PIL import Image
from utils import AverageMeter, accuracy, adjust_learning_rate
from network.resnet import SupConResNet
from network.custom import Custom_CNN, Linear_cls
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, roc_curve, auc, classification_report
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--batch_size', type=int, default=128,
help='batch_size')
parser.add_argument('--epochs', type=int, default=100,
help='number of training epochs')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default=[60,80],
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--class_type', type=str, default='AD_CN', choices=['AD_CN', 'MCI_CN', 'AD_MCI', '3class'])
parser.add_argument('--pretrained_model', type=str, default='./save_models/SimCLR_pretrained.pth')
parser.add_argument('--dataset_path', type=str, default='/data/tm/alzh/data_PGGAN')
opt = parser.parse_args()
opt.tb_path = './logs'
opt.model_path = './save_models'
opt.tb_folder = os.path.join(opt.tb_path, time.strftime('%Y%m%d%H%M', time.localtime(time.time())) + '-' + opt.class_type + '-' + str(opt.seed))
opt.model_folder = os.path.join(opt.model_path, time.strftime('%Y%m%d%H%M', time.localtime(time.time())) + '-' + opt.class_type + '-' + str(opt.seed))
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
if not os.path.isdir(opt.model_folder):
os.makedirs(opt.model_folder)
return opt
def set_loader(opt):
transform_train = transforms.Compose([
transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
])
train_dataset = AlzhDataset(type=opt.class_type, root=os.path.join(opt.dataset_path, 'train'), transform=transform_train)
valid_dataset = AlzhDataset(type=opt.class_type, root=os.path.join(opt.dataset_path, 'validation'), transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_dataset, opt.batch_size, num_workers=0, shuffle=True, drop_last=False)
valid_loader = torch.utils.data.DataLoader(valid_dataset, opt.batch_size, num_workers=0, shuffle=False, drop_last=False)
return train_loader, valid_loader
def set_model(opt):
model = SupConResNet(name='resnet18')
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt, model2):
model.train()
model2.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for idx, (image, labels) in enumerate(train_loader):
data_time.update(time.time() - end)
if torch.cuda.is_available():
image = image.cuda()
labels = labels.cuda()
bsz = labels.shape[0]
logits = model.encoder(image)
logits = model2(logits)
loss = criterion(logits, labels)
losses.update(loss.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg
def validation(val_loader, model, model2):
model.eval()
model2.eval()
top1 = AverageMeter()
with torch.no_grad():
for i, (image, label) in enumerate(val_loader):
if torch.cuda.is_available():
image = image.cuda()
label = label.cuda()
bsz = label.shape[0]
output = model.encoder(image)
output = model2(output, softmax=True)
acc1 = accuracy(output, label)
top1.update(acc1[0].item(), bsz)
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def test(opt, model, model2, root, transform=None):
model.eval()
model2.eval()
if opt.class_type == 'AD_CN':
type_list = ['AD_0', 'CN_1']
elif opt.class_type == 'AD_MCI':
type_list = ['AD_0', 'MCI_1']
elif opt.class_type == 'MCI_CN':
type_list = ['MCI_0', 'CN_1']
elif opt.class_type == '3class':
type_list = ['AD_0', 'MCI_1', 'CN_2']
y_true = []
y_pred = []
y_pred_label = []
output_list = []
num_correct = 0
num = 0
with torch.no_grad():
for types in type_list:
correct = 0
total = 0
path = os.path.join(root, types.split('_')[0])
for dirname in os.listdir(path):
new_path = os.path.join(path, dirname)
for i in range(len(os.listdir(new_path))):
img = Image.open(os.path.join(new_path, os.listdir(new_path)[i])).convert('RGB')
if transform is not None:
img = transform(img)
if i == 0:
img_concat = img.unsqueeze(0)
else:
img_concat = torch.cat([img_concat, img.unsqueeze(0)], dim=0)
label = torch.empty(i + 1)
class_type = int(types.split('_')[1])
label.fill_(class_type)
if torch.cuda.is_available():
img_concat = img_concat.cuda()
label = label.cuda()
bsz = label.shape[0]
output = model.encoder(img_concat)
output = model2(output, softmax=True)
acc1 = accuracy(output, label)
if acc1[0].item() >= 50:
correct += 1
total += 1
num_correct += bsz * acc1[0].item() / 100
num += bsz
y_true = y_true + label.cpu().tolist()
y_pred = y_pred + output[:, 0].tolist()
y_pred_label = y_pred_label + torch.argmax(output.cpu(), dim=1).tolist()
output_list.append([types.split('_')[0], total, correct])
precision = precision_score(y_true, y_pred_label, pos_label=0)
recall = recall_score(y_true, y_pred_label, pos_label=0)
f1 = f1_score(y_true, y_pred_label, pos_label=0)
fpr, tpr, thresholds = roc_curve(y_true, y_pred, pos_label=0)
auc_score = auc(fpr, tpr)
return output_list, 100 * num_correct/num, [precision, recall, f1, auc_score]
def main():
opt = parse_option()
torch.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_loader, valid_loader = set_loader(opt)
model, criterion = set_model(opt)
if opt.pretrained_model is not '':
checkpoint = torch.load(opt.pretrained_model)
model.load_state_dict(checkpoint)
if opt.class_type == 'AD_CN' or opt.class_type == 'AD_MCI' or opt.class_type == 'MCI_CN':
model2 = Linear_cls(512, 2)
else:
model2 = Linear_cls(512, 3)
model2 = model2.cuda()
optimizer = torch.optim.SGD(list(model.parameters()) +list(model2.parameters()),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
valid_best = 0
for epoch in range(1, opt.epochs + 1):
lr = adjust_learning_rate(opt, optimizer, epoch)
time1 = time.time()
loss = train(train_loader, model, criterion, optimizer, epoch, opt, model2)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
top1 = validation(valid_loader, model, model2)
output_list, test_acc, metric = test(opt, model, model2, transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]), root=os.path.join(opt.dataset_path, 'test'))
if top1 > valid_best:
best_epoch = epoch
valid_best = top1
torch.save(model.state_dict(), os.path.join(opt.model_folder, 'best_model.pth'))
final_test_acc = test_acc
final_test_auc = metric[0]
final_test_precision = metric[1]
final_test_recall = metric[2]
final_test_f1 = metric[3]
print('test acc {:.2f} auc {:.2f} precision {:.2f} recall {:.2f} f1 score {:.2f}'.format(test_acc, metric[0], metric[1], metric[2], metric[3]))
for test_list in output_list:
print('{0}: {1}/{2}'.format(test_list[0], test_list[2], test_list[1]))
logger.log_value(test_list[0], test_list[2]/test_list[1], epoch)
logger.log_value('loss', loss, epoch)
logger.log_value('valid acc', top1, epoch)
logger.log_value('test/acc', test_acc, epoch)
logger.log_value('test/auc', metric[0], epoch)
logger.log_value('test/precision', metric[1], epoch)
logger.log_value('test/recall', metric[2], epoch)
logger.log_value('test/f1 score', metric[3], epoch)
print('Best Epoch {}'.format(best_epoch))
print('Validation acc {:.2f}'.format(valid_best))
print('test acc {:.2f} auc {:.2f} precision {:.2f} recall {:.2f} f1 score {:.2f}'.format(final_test_acc, final_test_auc,
final_test_precision, final_test_recall,
final_test_f1))
if __name__ == '__main__':
main()
| StarcoderdataPython |
114431 | <filename>src/tools.py<gh_stars>1-10
"""
File: tools.py
Authors: <NAME> & <NAME>
Copyright (c) 2020 <NAME> & <NAME>
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
import io
import pygame as pg
import math
import random
import threading
import json
song_names = [
"<NAME> - D a y",
"<NAME> - Evade",
"Unfound - Dawn",
"FRACTAL MAN - Glimpses of Starlight",
"A.L.I.S.O.N - Golden Dust",
"Stratford Ct. - HOME - Still Life",
"Unfound - Intercept",
# loosegoose - SPRINGFIELD '96",
"l o o s e g o o s e - S P R I N G F I E L D ' 9 6",
"Color Index - Intervals (Open Spectrum)",
"Nowtro - Still Human (Teaser)",
"Syntax - Stratus (f. HOME)",
"oDDling - Ascend"
]
class SoundManager:
def __init__(self, filename):
pg.mixer.init()
self.sounds = []
self.filenames = [filename]
self.setup_data()
self.volume = self.get_volume()
def get_volume(self):
data = self.get_data(self.filenames[0])
if not "sfx_volume" in data:
data['sfx_volume'] = 0.5
self.set_data('prefs.json', data)
return (data['sfx_volume'])
def setup_data(self, force=False):
basedir = os.path.dirname(os.path.join(os.getcwd(), f"src/data/"))
if not os.path.exists(basedir):
os.makedirs(basedir)
for filename in self.filenames:
path = os.path.join(os.getcwd(), f"src/data/{filename}")
exists = os.path.exists(path)
if not exists or force:
f = open(path, 'a')
f.write('{}')
f.close()
def get_data(self, filename):
try:
with open(os.path.join(os.getcwd(), f"src/data/{filename}"), "r") as dataFile:
return json.load(dataFile)
except (TypeError, json.JSONDecodeError):
self.setup_data(force=True)
return self.get_data(filename)
def set_data(self, filename, object):
try:
with open(os.path.join(os.getcwd(), f"src/data/{filename}"), "w") as dataFile:
json.dump(object, dataFile)
except (TypeError, json.JSONDecodeError):
self.setup_data(force=True)
return self.set_data(filename, object)
def playSound(self, filename, duration, volumeFactor=1.0):
sound = pg.mixer.Sound(file=(os.path.join(
os.getcwd(), f"src/soundeffects/{filename}")))
volume = max(self.volume * volumeFactor, 0.0)
volume = min(self.volume * volumeFactor, 1.0)
sound.set_volume(volume)
sound.play(maxtime=duration)
self.sounds.append({"name": filename, "sound": sound})
def stopSound(self, filename):
for soundData in self.sounds:
if soundData.name == filename:
soundData["sound"].stop()
self.sounds.remove(soundData)
class Vector:
def __init__(self, magnitude, direction):
self.magnitude = magnitude
self.direction = direction
def updateVector(self, magnitude, direction):
self.magnitude = magnitude
self.direction = direction
@staticmethod
def getQuadrant(direction):
pi = math.pi
boundaries = [-pi, -pi/2, 0, pi/2, pi]
quadrants = [3, 4, 1, 2]
for i in range(len(boundaries)):
if direction == boundaries[i]:
return quadrants[i]
elif direction == boundaries[i+1]:
return quadrants[i]
elif direction > boundaries[i] and direction < boundaries[i+1]:
return quadrants[i]
return None
def getTrigRatios(self):
direction = self.getDirection()
quadrant = self.getQuadrant(direction)
if quadrant == 1:
xfactor = math.cos(direction)
yfactor = math.sin(direction)
elif quadrant == 2:
direction -= math.pi/2
xfactor = -math.sin(direction)
yfactor = math.cos(direction)
elif quadrant == 3:
direction = abs(direction)
direction -= math.pi/2
xfactor = -math.sin(direction)
yfactor = -math.cos(direction)
else:
direction = abs(direction)
xfactor = math.cos(direction)
yfactor = -math.sin(direction)
return (xfactor, yfactor)
def getMagnitude(self):
return self.magnitude
def getDirection(self):
return self.direction
def getXComponent(self):
return self.getTrigRatios()[0] * self.getMagnitude()
def getYComponent(self):
return self.getTrigRatios()[1] * self.getMagnitude()
def getComponents(self):
return (self.getXComponent(), self.getYComponent())
@staticmethod
def getReverseDirection(direction):
quadrant = Vector.getQuadrant(direction)
if quadrant == 1 or quadrant == 2:
return -math.pi + direction
else:
return math.pi + direction
class Game(object):
def __init__(self, screen, caption, states, start_state):
"""
Initialise the Game object, and save some important variables.
"""
self.done = False
self.screen = screen
self.clock = pg.time.Clock()
self.fps = 60
self.fps_visible = False
self.caption = caption
self.states = states
self.state_name = start_state
self.state = self.states[self.state_name]
self.filenames = ['prefs.json', 'highscore.json']
self.setup_data()
self.music_vol = self.get_music_volume()
self.sfx_vol = self.get_sfx_volume()
self.music_pos = [0, 149, 358, 610, 928,
1197, 1389, 1606, 1775, 1900, 2277, 2488, 2670]
self.music_index = random.randint(0, 11)
self.music_start = self.music_pos[self.music_index]
self.music_end = self.music_pos[self.music_index + 1]
self.music_current_seek = self.music_start
self.game_music("music.ogg")
self.highscore = self.get_highscore()
self.intro_done = self.get_intro_done()
# Initialise first state and call startup!
self.set_state()
self.state.startup({})
# The following functions are for data saving...
def setup_data(self, force=False):
basedir = os.path.dirname(os.path.join(os.getcwd(), f"src/data/"))
if not os.path.exists(basedir):
os.makedirs(basedir)
for filename in self.filenames:
path = os.path.join(os.getcwd(), f"src/data/{filename}")
exists = os.path.exists(path)
if not exists or force:
f = open(path, 'w')
f.write('{}')
f.close()
def get_intro_done(self):
data = self.get_data('prefs.json')
try:
if not "intro_done" in data:
data['intro_done'] = False
self.set_data('prefs.json', data)
return (data['intro_done'])
except TypeError:
self.setup_data(force=True)
return False
def set_intro_done(self, newValue):
data = self.get_data('prefs.json')
data['intro_done'] = newValue
self.set_data('prefs.json', data)
def get_data(self, filename):
try:
with open(os.path.join(os.getcwd(), f"src/data/{filename}"), "r") as dataFile:
return json.load(dataFile)
except (TypeError, json.JSONDecodeError):
self.setup_data(force=True)
return {}
def set_data(self, filename, objectVar):
try:
with open(os.path.join(os.getcwd(), f"src/data/{filename}"), "w") as dataFile:
json.dump(objectVar, dataFile)
except (TypeError, json.JSONDecodeError):
self.setup_data(force=True)
return {}
def game_music(self, filename):
# Game music
pg.mixer.music.load(os.path.join(
os.getcwd(), f"src/soundeffects/{filename}"))
pg.mixer.music.set_volume(self.music_vol)
pg.mixer.music.play(start=self.music_start)
self.set_state()
def pause_music(self, duration):
pg.mixer.music.pause()
threading.Timer(duration, pg.mixer.music.unpause).start()
def get_music_volume(self):
data = self.get_data('prefs.json')
try:
if not "music_volume" in data:
data['music_volume'] = 0.08
self.set_data('prefs.json', data)
return (data['music_volume'])
except TypeError:
self.setup_data(force=True)
def get_highscore(self):
data = self.get_data('highscore.json')
try:
if not "highscore" in data:
data['highscore'] = 0
self.set_data('highscore.json', data)
return (data['highscore'])
except TypeError:
self.setup_data(force=True)
def set_highscore(self, newHighscore):
self.highscore = newHighscore
self.save_highscore()
def save_highscore(self):
data = self.get_data('highscore.json')
data['highscore'] = self.highscore
self.set_data('highscore.json', data)
def set_music_volume(self, newVolume):
self.music_vol = newVolume
pg.mixer.music.set_volume(self.music_vol)
def get_sfx_volume(self):
data = self.get_data('prefs.json')
try:
if not "sfx_volume" in data:
data['sfx_volume'] = 0.2
self.set_data('prefs.json', data)
return (data['sfx_volume'])
except TypeError:
self.setup_data(force=True)
def set_sfx_volume(self, newVolume):
self.sfx_vol = newVolume
def get_min_and_secs(self, time_s):
mins = time_s // 60
secs = time_s % 60
return (mins, secs)
def save_music_volume(self):
data = self.get_data('prefs.json')
data['music_volume'] = self.music_vol
data['sfx_volume'] = self.sfx_vol
self.set_data('prefs.json', data)
def event_loop(self):
"""Events are passed to current state"""
for event in pg.event.get():
if event.type == pg.KEYDOWN and event.key == pg.K_F5:
self.toggle_show_fps(event.key)
self.state.handle_event(event)
def switch_state(self):
"""Switch to the next state."""
current_state = self.state_name
next_state = self.state.next_state
self.state.done = False
self.state_name = next_state
game_data = self.state.game_data # Persistent data
self.state = self.states[self.state_name]
self.set_state()
self.state.startup(game_data)
def set_state(self):
"Set state variables."
self.state.bgmusic = {
"song_name": song_names[self.music_index], "pause_music": self.pause_music,
"get_volume": self.get_music_volume, "set_volume": self.set_music_volume,
"save_volume": self.save_music_volume, "get_sfx_volume": self.get_sfx_volume,
"set_sfx_volume": self.set_sfx_volume, "current_position": self.get_min_and_secs(self.music_current_seek),
"song_length": self.get_min_and_secs(self.music_end-self.music_start)
}
self.state.highscore = {
"get_highscore": self.get_highscore, "set_highscore": self.set_highscore
}
self.state.intro = {
"get_done": self.get_intro_done, "set_done": self.set_intro_done
}
def toggle_show_fps(self, key):
"""Press f5 to turn on/off displaying the framerate in the caption."""
if key == pg.K_F5:
self.fps_visible = not self.fps_visible
if not self.fps_visible:
pg.display.set_caption(self.caption)
def update(self, dt):
"""Check for state switch and update state if needed"""
self.music_current_seek = pg.mixer.music.get_pos()//1000
self.state.bgmusic["current_position"] = self.get_min_and_secs(
self.music_current_seek)
if self.music_current_seek >= (self.music_end - self.music_start):
self.music_index = random.randint(0, 11)
self.music_start = self.music_pos[self.music_index]
self.music_end = self.music_pos[self.music_index + 1]
self.state.bgmusic["song_name"] = song_names[self.music_index]
self.state.bgmusic["current_position"] = (0, 0)
self.state.bgmusic["song_length"] = self.get_min_and_secs(
self.music_end-self.music_start)
pg.mixer.music.play(start=self.music_start)
if self.state.quit:
self.done = True
elif self.state.done:
self.switch_state()
self.state.update(dt)
def draw(self):
"""Pass surface to state for drawing"""
self.state.draw(self.screen)
self.show_fps()
def show_fps(self):
"""
Display the current FPS in the window handle if fps_visible is True.
"""
if self.fps_visible:
fps = self.clock.get_fps()
with_fps = "{} - {:.2f} FPS".format(self.caption, fps)
pg.display.set_caption(with_fps)
def run(self):
"""Game loop will run in the while loop here"""
while not self.done:
dt = self.clock.tick(self.fps)
self.event_loop()
self.update(dt)
self.draw()
pg.display.update()
class State(object):
"""Base class for game states"""
def __init__(self):
self.done = False
self.quit = False
self.next_state = None
self.screen_rect = pg.display.get_surface().get_rect()
self.game_data = {}
self.font = pg.font.Font(None, 24)
def startup(self, game_data):
"""Called when state is about to become active or resumes being active."""
self.game_data = game_data
def handle_event(self, event):
"""Handle events passed by Game"""
pass
def update(self, dt):
"""Update the state."""
pass
def draw(self, surface):
"""Draw scene to screen"""
pass
class _BaseSprite(pg.sprite.Sprite):
"""The base class for all types of sprites"""
def __init__(self, pos, size, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.rect = pg.Rect(pos, size)
self.exact_pos = list(self.rect.topleft)
self.old_pos = self.exact_pos[:]
def reset_position(self, value):
"""
Set sprite location to new point. By default `value`
specifies a position in the topleft corner.
"""
setattr(self.rect, "topleft", value)
self.exact_pos = list(self.rect.topleft)
self.old_pos = self.exact_pos[:]
def draw(self, surface):
surface.blit(self.image, self.rect)
def rotateImage(surf, image, pos, originPos, angle):
"""Method that rotates objects (sprites) by center by an angle."""
# calcaulate the axis aligned bounding box of the rotated image
w, h = image.get_size()
box = [pg.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
min_box = (min(box_rotate, key=lambda p: p[0])[
0], min(box_rotate, key=lambda p: p[1])[1])
max_box = (max(box_rotate, key=lambda p: p[0])[
0], max(box_rotate, key=lambda p: p[1])[1])
# calculate the translation of the pivot
pivot = pg.math.Vector2(originPos[0], -originPos[1])
pivot_rotate = pivot.rotate(angle)
pivot_move = pivot_rotate - pivot
# calculate the upper left origin of the rotated image
origin = (pos[0] - originPos[0] + min_box[0] - pivot_move[0],
pos[1] - originPos[1] - max_box[1] + pivot_move[1])
# get a rotated image
rotated_image = pg.transform.rotate(image, angle)
return rotated_image, origin
class Slider(_BaseSprite):
def __init__(self, starting_value, position, when_update):
super().__init__(position, (300, 50))
self.value = starting_value
self.position = position
self.when_update = when_update
self.max = 200
self.setup()
def setup(self):
self.image = pg.Surface((204, 20), pg.SRCALPHA).convert_alpha()
# Outer rect
pg.draw.rect(self.image, pg.color.Color(
'yellow'), (0, 0, 204, 20), True)
# Inner rect
val = 200 * self.value
pg.draw.rect(self.image, pg.color.Color('red'), (2, 2, val, 16))
def draw(self, surface):
self.setup()
surface.blit(self.image, self.rect)
def set_value(self, posx):
posx = min(posx, self.max)
posx = max(posx, 0)
self.value = (posx/self.max)
self.when_update(self.value)
def handle_mouse(self, pos):
self.set_value(pos[0] - self.rect.x)
| StarcoderdataPython |
3228974 | <reponame>young43/ISCC_2020
#!/usr/bin/env python
import cv2
import threading
# import Queue as que
import time
import numpy as np
# import roslib
import sys
# import rospy
import importlib
# import cPickle
# import genpy.message
# from rospy import ROSException
# import sensor_msgs.msg
# import actionlib
# import rostopic
# import rosservice
# from rosservice import ROSServiceException
from slidewindow import SlideWindow
from warper import Warper
from pidcal import PidCal
warper = Warper()
slidewindow = SlideWindow()
pidcal = PidCal()
cv_image = None
ack_publisher = None
car_run_speed = 0.5
def auto_drive(pid):
global car_run_speed
w = 0
if -0.065 < pid and pid < 0.065:
w = 1
else:
w = 0.3
if car_run_speed < 1.0 * w:
car_run_speed += 0.002 * 10
else:
car_run_speed -= 0.003 * 10
drive_value = drive_values()
drive_value.throttle = int(3)
drive_value.steering = (pid/0.074)
drive_values_pub.publish(drive_value)
print('steer: ', drive_value.steering)
print('speed: ', car_run_speed)
def main():
# cap = cv2.VideoCapture(0);
# cap = cv2.VideoCapture(1)
cap = cv2.VideoCapture("TEST14.avi")
# cap.set(CV_CAP_PROP_FRAME_WIDTH,800)
# cap.set(CV_CAP_PROP_FRAME_HEIGHT,448)
cap.set(3,800)
cap.set(4,448)
while True:
ret, img = cap.read()
img1, x_location = process_image(img)
if x_location != None:
pid = round(pidcal.pid_control(int(x_location)), 6)
# auto_drive(pid)
cv2.imshow('result', img1)
# print (pid)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#out.write(img1)
#out2.write(cv_image)
def light_calc(frame):
arr = np.array(frame)
max_val = np.max(arr)
min_val = np.min(arr)
dst = (frame - min_val)*(255/(max_val - min_val))
# print('min_val : ' ,min_val)
# print('max_val : ', max_val )
# cv2.imshow("dst",dst)
return dst
def stop_line(frame):
out_img = np.dstack((frame, frame, frame)) * 255
height = frame.shape[0]
width = frame.shape[1]
# inputImage = gray[y:y+h, x:x+w]
x = 180
w = 440
y = 320
h = 80
x1 = 0
y1 = 0
x2 = 0
y2 = 0
cnt = 0
roi_img = frame[y:y+h, x:x+w]
lines = cv2.HoughLines(roi_img,1,np.pi/180,100)
print(lines)
if lines is None:
return [-1, -1, -1, -1]
else :
for i in range(len(lines)):
for rho, theta in lines[i]:
tempTheta = theta/np.pi *180
if(tempTheta > 88 and tempTheta < 92):
cnt = cnt + 1
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 += int(x0 + 1000*(-b))
y1 += int(y0 + 1000*(a))
x2 += int(x0 - 1000*(-b))
y2 += int(y0 -1000*(a))
if cnt != 0:
return [int(x1/cnt), int(y1/cnt + y), int(x2/cnt), int(y2/cnt + y)]
else :
return [-1, -1, -1, -1]
def process_image(frame):
# blur
kernel_size = 3
blur = cv2.GaussianBlur(frame,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
blur = cv2.GaussianBlur(blur,(kernel_size, kernel_size), 1)
cv2.imshow("blue", blur)
# img_bird = warper.warp(frame)
# cv2.imshow("img_bird",img_bird)
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
hls = cv2.cvtColor(blur,cv2.COLOR_BGR2HLS)
h,l,s = cv2.split(hls)
# cv2.imshow("h",h)
yellow_process_img = light_calc(s)
white_process_img = light_calc(l)
# cv2.imshow("enhance", yellow_process_img)
# cv2.imshow("white_mask",white_process_img)
# grayscle
# gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# # blur
# kernel_size = 5
# blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
# blur_gray1 = cv2.GaussianBlur(yellow_process_img,(kernel_size, kernel_size), 0)
# blur_gray2 = cv2.GaussianBlur(white_process_img,(kernel_size, kernel_size), 0)
ret,binary_img = cv2.threshold(yellow_process_img, 70, 255, cv2.THRESH_BINARY)
# cv2.imshow("bi",binary_img)
ret1,binary_img1 = cv2.threshold(white_process_img, 150, 255, cv2.THRESH_BINARY)
# cv2.imshow("bi1",binary_img1)
img_mask = cv2.bitwise_or(binary_img,binary_img1)
# img_result = cv2.bitwise_and(binary_img,binary_img,mask = img_mask)
cv2.imshow("img_result",img_mask)
# canny edge
low_threshold = 60#60
high_threshold = 70# 70
# edges_img = cv2.Canny(np.uint8(blur_gray), low_threshold, high_threshold)
# edges_img1 = cv2.Canny(np.uint8(binary_img), low_threshold, high_threshold)
# edges_img2 = cv2.Canny(np.uint8(binary_img1), low_threshold, high_threshold)
edges_img3 = cv2.Canny(np.uint8(blur), low_threshold, high_threshold)
# cv2.imshow("edges_img1",edges_img3)
# warper
# img = warper.warp(edges_img)
# bird = warper.warp(edges_img1)
# bird1 = warper.warp(edges_img2)
bird2 = warper.warp(edges_img3)
# stop_line
# 미검출시[-1, -1, -1, -1] 검출시[x1, y1, x2, y2]
stop_line_array = stop_line(bird2)
# print(stop_line_array)
# cv2.imshow("bird",bird2)
# img1, x_location1 = slidewindow.slidewindow(img)
# img2, x_location2 = slidewindow.slidewindow(bird)
img3, x_location3 = slidewindow.slidewindow(bird2)
# 정지선 인식 범위
cv2.rectangle(img3, (180, 320), (180+440, 320+80), (0, 0, 255), 4)
# 정지선 그리기
if(stop_line_array[0] != -1):
font = cv2.FONT_HERSHEY_COMPLEX # normal size serif font
fontScale = 1.2
cv2.putText(img3, 'stop!!!!!!', (10, 80), font, fontScale, (0, 0, 255), 4)
cv2.line(img3,(stop_line_array[0], stop_line_array[1]) \
,(stop_line_array[2], stop_line_array[3]),(0,0,255),10)
# print(x_location1)
return img3, x_location3
if __name__ == '__main__':
main()
| StarcoderdataPython |
132057 | <reponame>CorvusEtiam/financemgr<gh_stars>0
from setuptools import setup
setup(
name = "financemgr",
version = "1.0.0",
packages = ['financemgr'],
install_requires = ["sqlalchemy"]
) | StarcoderdataPython |
1631732 | # Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import ArgumentError
import copy
import logging
import re
import socket
import threading
import time
import base64
from lib.collectinfo_analyzer.collectinfo_handler.collectinfo_parser import conf_parser
from lib.collectinfo_analyzer.collectinfo_handler.collectinfo_parser import full_parser
from lib.utils import common, constants, util, version
from .assocket import ASSocket
from .config_handler import JsonDynamicConfigHandler
from . import client_util
#### Remote Server connection module
PXSSH_NO_MODULE = 0 # Non-linux
PXSSH_NEW_MODULE = 1
try:
from pexpect import pxssh
PEXPECT_VERSION = PXSSH_NEW_MODULE
except ImportError:
PEXPECT_VERSION = PXSSH_NO_MODULE
def get_fully_qualified_domain_name(address, timeout=0.5):
# note: cannot use timeout lib because signal must be run from the
# main thread
result = [address]
def helper():
result[0] = socket.getfqdn(address)
t = threading.Thread(target=helper)
t.daemon = True
t.start()
t.join(timeout)
return result[0]
def return_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
args[0].alive = False
return e
return wrapper
ASINFO_RESPONSE_OK = "ok"
class ASInfoError(Exception):
generic_error = "Unknown error occurred"
def __init__(self, message, response):
self.message = message
# Success can either be "ok", "OK", or "" :(
if response.lower() in {ASINFO_RESPONSE_OK, ""}:
raise ValueError('info() returned value "ok" which is not an error.')
try:
# sometimes there is a message with 'error' and sometimes not. i.e. set-config, udf-put
if response.startswith("error") or response.startswith("ERROR"):
try:
response = response.split("=")[1]
except IndexError:
response = response.split(":")[2]
elif response.startswith("fail") or response.startswith("FAIL"):
response = response.split(":")[2]
self.response = response.strip(" .")
except IndexError:
self.response = self.generic_error
def __str__(self):
return "{} : {}.".format(self.message, self.response)
class ASInfoConfigError(ASInfoError):
def __init__(self, message, resp, node, context, param, value):
self.message = message
self.response = super().generic_error
self.logger = logging.getLogger("asadm")
is_valid_context, invalid_context = self._check_context(node, context[:])
if not is_valid_context:
self.response = "Invalid subcontext {}".format(invalid_context)
return
config_type = node.config_type(context[:], param)
self.logger.debug("Found config type %s for param %s", str(config_type), param)
if config_type is None:
self.response = "Invalid parameter"
return
if not config_type.dynamic:
self.response = "Parameter is not dynamically configurable"
return
if not config_type.validate(value):
self.response = "Invalid value for {}".format(str(config_type))
return
super().__init__(message, resp)
def _check_context(self, node, subcontexts):
current_context = []
while subcontexts:
next_subcontext = subcontexts.pop(0)
valid_subcontexts = node.config_subcontext(current_context[:])
if next_subcontext not in valid_subcontexts:
return False, next_subcontext
current_context.append(next_subcontext)
return True, ""
class Node(object):
dns_cache = {}
pool_lock = threading.Lock()
def __init__(
self,
address,
port=3000,
tls_name=None,
timeout=5,
user=None,
password=<PASSWORD>,
auth_mode=constants.AuthMode.INTERNAL,
ssl_context=None,
consider_alumni=False,
use_services_alt=False,
):
"""
address -- ip or fqdn for this node
port -- info port for this node
timeout -- number of seconds to wait before giving up on the node
If address is ip then get fqdn else get ip
store ip in self.ip
store fqdn in self.fqdn
store port in self.port
NOTE: would be nice if the port could either be the service or telnet
access port. Can we detect from the socket?
ALSO NOTE: May be better to just use telnet instead?
"""
self.logger = logging.getLogger("asadm")
self.remote_system_command_prompt = "[#$] "
self.ip = address
self.port = port
self._timeout = timeout
self.user = user
self.password = password
self.auth_mode = auth_mode
self.tls_name = tls_name
self.ssl_context = ssl_context
if ssl_context:
self.enable_tls = True
else:
self.enable_tls = False
self.consider_alumni = consider_alumni
self.use_services_alt = use_services_alt
# session token
self.session_token = None
self.session_expiration = 0
self.perform_login = True
# System Details
self.sys_ssh_port = None
self.sys_user_id = None
self.sys_pwd = None
self.sys_ssh_key = None
self.sys_credential_file = None
self.sys_default_ssh_port = None
self.sys_default_user_id = None
self.sys_default_pwd = <PASSWORD>
self.sys_default_ssh_key = None
self.sys_cmds = [
# format: (command name as in parser, ignore error, command list)
("hostname", False, ["hostname -I", "hostname"]),
("top", False, ["top -n1 -b", "top -l 1"]),
(
"lsb",
False,
["lsb_release -a", "ls /etc|grep release|xargs -I f cat /etc/f"],
),
("meminfo", False, ["cat /proc/meminfo", "vmstat -s"]),
("interrupts", False, ["cat /proc/interrupts", ""]),
("iostat", False, ["iostat -y -x 5 1", ""]),
("dmesg", False, ["dmesg -T", "dmesg"]),
(
"limits",
False,
['sudo pgrep asd | xargs -I f sh -c "sudo cat /proc/f/limits"', ""],
),
("lscpu", False, ["lscpu", ""]),
("sysctlall", False, ["sudo sysctl vm fs", ""]),
("iptables", False, ["sudo iptables -S", ""]),
(
"hdparm",
False,
[
'sudo fdisk -l |grep Disk |grep dev | cut -d " " -f 2 | cut -d ":" -f 1 | xargs sudo hdparm -I 2>/dev/null',
"",
],
),
("df", False, ["df -h", ""]),
("free-m", False, ["free -m", ""]),
("uname", False, ["uname -a", ""]),
(
"scheduler",
True,
[
'ls /sys/block/{sd*,xvd*,nvme*}/queue/scheduler |xargs -I f sh -c "echo f; cat f;"',
"",
],
),
# Todo: Add more commands for other cloud platform detection
(
"environment",
False,
["curl -m 1 -s http://169.254.169.254/1.0/", "uname"],
),
]
# hack, _key needs to be defines before info calls... but may have
# wrong (localhost) address before info_service is called. Will set
# again after that call.
self._key = hash(self.create_key(address, self.port))
self.peers_generation = -1
self.service_addresses = []
self._initialize_socket_pool()
self.connect(address, port)
self.localhost = False
try:
if address.lower() == "localhost":
self.localhost = True
else:
o, e = util.shell_command(["hostname -I"])
self.localhost = self._is_any_my_ip(o.split())
except Exception:
pass
# configurations from conf file
self.as_conf_data = {}
# TODO: Put json files in a submodule
self.conf_schema_handler = JsonDynamicConfigHandler(
constants.CONFIG_SCHEMAS_HOME, self.info_build()
)
def _initialize_socket_pool(self):
self.socket_pool = {}
self.socket_pool[self.port] = set()
self.socket_pool_max_size = 3
def _is_any_my_ip(self, ips):
if not ips:
return False
s_a = [a[0] for a in self.service_addresses]
if set(ips).intersection(set(s_a)):
return True
return False
def connect(self, address, port):
try:
if not self.login():
raise IOError("Login Error")
service_addresses = util.Future(self.info_service_list).start()
node_id = util.Future(self.info_node).start()
features = util.Future(self.info, "features").start()
update_ip = util.Future(self._update_IP, address, port).start()
changed = util.Future(self.has_peers_changed).start()
peers = util.Future(self.info_peers_list).start()
self.node_id = node_id.result()
if isinstance(self.node_id, Exception):
# Not able to connect this address
raise self.node_id
service_addresses = service_addresses.result()
self.features = features.result()
# Original address may not be the service address, the
# following will ensure we have the service address
if not isinstance(service_addresses, Exception):
self.service_addresses = service_addresses
# else : might be it's IP is not available, node should try all old
# service addresses
update_ip.result()
self.close()
self._initialize_socket_pool()
current_host = (self.ip, self.port, self.tls_name)
if not self.service_addresses or current_host not in self.service_addresses:
# if asd >= 3.10 and node has only IPv6 address
self.service_addresses.append(current_host)
for i, s in enumerate(self.service_addresses):
try:
# calling update ip again because info_service may have provided a
# different IP than what was seeded.
self.ip = s[0]
self.port = s[1]
# Most common case
if s[0] == current_host[0] and s[1] == current_host[1] and i == 0:
# The following info requests were already made
# no need to do again
break
# IP address have changed. Not common.
node_id = util.Future(self.info_node).start()
update_ip = util.Future(self._update_IP, self.ip, self.port).start()
peers = util.Future(self.info_peers_list).start()
self.node_id = node_id.result()
if not isinstance(self.node_id, Exception):
break
except Exception:
# Sometime unavailable address might be present in service
# list, for ex. Down NIC address (server < 3.10).
# In such scenario, we want to try all addresses from
# service list till we get available address
pass
if isinstance(self.node_id, Exception):
raise self.node_id
self._service_IP_port = self.create_key(self.ip, self.port)
self._key = hash(self._service_IP_port)
self.new_histogram_version = self._is_new_histogram_version()
self.alive = True
self.peers = peers.result()
except Exception:
# Node is offline... fake a node
self.ip = address
self.fqdn = address
self.port = port
self._service_IP_port = self.create_key(self.ip, self.port)
self._key = hash(self._service_IP_port)
self.node_id = "000000000000000"
self.service_addresses = [(self.ip, self.port, self.tls_name)]
self.features = ""
self.peers = []
self.use_new_histogram_format = False
self.alive = False
def refresh_connection(self):
self.connect(self.ip, self.port)
def login(self):
if self.auth_mode != constants.AuthMode.PKI and self.user is None:
return True
if not self.perform_login and (
self.session_expiration == 0 or self.session_expiration > time.time()
):
return True
sock = ASSocket(
self.ip,
self.port,
self.tls_name,
self.user,
self.password,
self.auth_mode,
self.ssl_context,
timeout=self._timeout,
)
if not sock.connect():
sock.close()
return False
if not sock.login():
sock.close()
return False
self.session_token, self.session_expiration = sock.get_session_info()
self.perform_login = False
return True
@property
def key(self):
"""Get the value of service_IP_port"""
return self._service_IP_port
@staticmethod
def create_key(address, port):
if address and ":" in address:
# IPv6 format
return "[%s]:%s" % (address, port)
return "%s:%s" % (address, port)
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return self._key == other._key
def _update_IP(self, address, port):
if address not in self.dns_cache:
self.dns_cache[address] = (
socket.getaddrinfo(address, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[
0
][4][0],
get_fully_qualified_domain_name(address),
)
self.ip, self.fqdn = self.dns_cache[address]
def sock_name(self, use_fqdn=False):
if use_fqdn:
address = self.fqdn
else:
address = self.ip
return self.create_key(address, self.port)
def __str__(self):
return self.sock_name()
def is_XDR_enabled(self):
config = self.info_get_config("xdr")
if isinstance(config, Exception):
return False
# 'enable-xdr' was removed in XDR5.0, so check that get-config:context=xdr does not return an error.
if client_util.info_valid(config):
try:
xdr_enabled = config["enable-xdr"]
return xdr_enabled == "true"
except Exception:
pass
return True
return False
def is_feature_present(self, feature):
if not self.features or isinstance(self.features, Exception):
return False
return feature in self.features
def has_peers_changed(self):
try:
new_generation = self.info("peers-generation")
if self.peers_generation != new_generation:
self.peers_generation = new_generation
return True
else:
return False
except Exception:
return True
def _is_new_histogram_version(self):
as_version = self.info_build()
if isinstance(as_version, Exception):
return False
return common.is_new_histogram_version(as_version)
def _get_connection(self, ip, port):
sock = None
with Node.pool_lock:
try:
while True:
sock = self.socket_pool[port].pop()
if sock.is_connected():
if not self.ssl_context:
sock.settimeout(self._timeout)
break
sock.close()
sock = None
except Exception:
pass
if sock:
return sock
sock = ASSocket(
ip,
port,
self.tls_name,
self.user,
self.password,
self.auth_mode,
self.ssl_context,
timeout=self._timeout,
)
if sock.connect():
if sock.authenticate(self.session_token):
return sock
elif self.session_token is not None:
# login enabled.... might be session_token expired, need to perform login again
self.perform_login = True
return None
def close(self):
try:
while True:
sock = self.socket_pool[self.port].pop()
sock.close()
except Exception:
pass
self.socket_pool = None
############################################################################
#
# Info Protocol API
#
############################################################################
# Need to provide ip to _info_cinfo as to maintain
# unique key for cache. When we run cluster on VM and asadm on Host then
# services returns all endpoints of server but some of them might not
# allowed by Host and VM connection. If we do not provide IP here, then
# we will get same result from cache for that IP to which asadm can't
# connect. If this happens while setting ip (connection process) then node
# will get that ip to which asadm can't connect. It will create new
# issues in future process.
@return_exceptions
@client_util.cached
def _info_cinfo(self, command, ip=None, port=None):
# TODO: citrusleaf.py does not support passing a timeout default is
# 0.5s
if ip is None:
ip = self.ip
if port is None:
port = self.port
result = None
sock = self._get_connection(ip, port)
if not sock:
raise IOError("Error: Could not connect to node %s" % ip)
try:
if sock:
result = sock.info(command)
try:
if len(self.socket_pool[port]) < self.socket_pool_max_size:
sock.settimeout(None)
self.socket_pool[port].add(sock)
else:
sock.close()
except Exception:
sock.close()
if result != -1 and result is not None:
return result
else:
raise ASInfoError("Error", "Invalid command '%s'" % command)
except Exception as ex:
if sock:
sock.close()
raise ex
@return_exceptions
@util.logthis
def info(self, command):
"""
asinfo function equivalent
Arguments:
command -- the info command to execute on this node
"""
return self._info_cinfo(command, self.ip)
@return_exceptions
def info_node(self):
"""
Get this nodes id. asinfo -v "node"
Returns:
string -- this node's id.
"""
return self.info("node")
@return_exceptions
def info_ip_port(self):
"""
Get this nodes ip:port.
Returns:
string -- this node's ip:port.
"""
return self.create_key(self.ip, self.port)
###### Services ######
# post 3.10 services
@return_exceptions
def info_peers(self):
"""
Get peers this node knows of that are active
Returns:
list -- [(p1_ip,p1_port,p1_tls_name),((p2_ip1,p2_port1,p2_tls_name),(p2_ip2,p2_port2,p2_tls_name))...]
"""
if self.enable_tls:
return self._info_peers_helper(self.info("peers-tls-std"))
return self._info_peers_helper(self.info("peers-clear-std"))
@return_exceptions
def info_peers_alumni(self):
"""
Get peers this node has ever know of
Returns:
list -- [(p1_ip,p1_port,p1_tls_name),((p2_ip1,p2_port1,p2_tls_name),(p2_ip2,p2_port2,p2_tls_name))...]
"""
if self.enable_tls:
return self._info_peers_helper(self.info("alumni-tls-std"))
return self._info_peers_helper(self.info("alumni-clear-std"))
@return_exceptions
def info_peers_alt(self):
"""
Get peers this node knows of that are active alternative addresses
Returns:
list -- [(p1_ip,p1_port,p1_tls_name),((p2_ip1,p2_port1,p2_tls_name),(p2_ip2,p2_port2,p2_tls_name))...]
"""
if self.enable_tls:
return self._info_peers_helper(self.info("peers-tls-alt"))
return self._info_peers_helper(self.info("peers-clear-alt"))
@return_exceptions
def _info_peers_helper(self, peers):
"""
Takes an info peers list response and returns a list.
"""
gen_port_peers = client_util.parse_peers_string(peers)
if not gen_port_peers or len(gen_port_peers) < 3:
return []
default_port = 3000
# TODO not used generation = gen_port_peers[0]
if gen_port_peers[1]:
default_port = int(gen_port_peers[1])
peers_list = client_util.parse_peers_string(gen_port_peers[2])
if not peers_list or len(peers_list) < 1:
return []
p_list = []
for p in peers_list:
p_data = client_util.parse_peers_string(p)
if not p_data or len(p_data) < 3:
continue
# TODO - not used node_name = p_data[0]
tls_name = None
if p_data[1] and len(p_data[1]) > 0:
tls_name = p_data[1]
endpoints = client_util.parse_peers_string(p_data[2])
if not endpoints or len(endpoints) < 1:
continue
if not tls_name:
tls_name = client_util.find_dns(endpoints)
endpoint_list = []
for e in endpoints:
if "[" in e and "]:" not in e:
addr_port = client_util.parse_peers_string(e, delim=",")
else:
addr_port = client_util.parse_peers_string(e, delim=":")
addr = addr_port[0]
if addr.startswith("["):
addr = addr[1:]
if addr.endswith("]"):
addr = addr[:-1].strip()
if len(addr_port) > 1 and addr_port[1] and len(addr_port[1]) > 0:
port = addr_port[1]
else:
port = default_port
try:
port = int(port)
except Exception:
port = default_port
endpoint_list.append((addr, port, tls_name))
p_list.append(tuple(endpoint_list))
return p_list
@return_exceptions
def get_alumni_peers(self):
# info_peers_alumni for server version prior to 4.3.1 gives
# only old nodes (which are not part of current cluster), so to get full list we need to
# add info_peers
alumni_peers = util.Future(self.get_peers).start()
peers_alumni = util.Future(self.info_peers_alumni).start()
return list(set(alumni_peers.result() + peers_alumni.result()))
@return_exceptions
def get_peers(self, all=False):
if all:
alt = util.Future(self.info_peers_alt).start()
std = util.Future(self.info_peers).start()
return alt.result() + std.result()
if self.use_services_alt:
return self.info_peers_alt()
return self.info_peers()
@return_exceptions
def info_peers_list(self):
if self.consider_alumni:
return self.get_alumni_peers()
else:
return self.get_peers()
@return_exceptions
def info_peers_flat_list(self):
return client_util.flatten(self.info_peers_list())
###### Services End ######
###### Service ######
# post 3.10 services
@return_exceptions
def _info_service_helper(self, service, delimiter=";"):
if not service or isinstance(service, Exception):
return []
s = [
client_util.parse_peers_string(v, ":")
for v in client_util.info_to_list(service, delimiter=delimiter)
]
return [
(
v[0].strip("[]"),
int(v[1]) if len(v) > 1 and v[1] else int(self.port),
self.tls_name,
)
for v in s
]
@return_exceptions
def info_service_alt(self):
"""
Get service alternate endpoints of this node
Returns:
list -- [(ip,port,tls_name),...]
"""
try:
if self.enable_tls:
return self._info_service_helper(self.info("service-tls-alt"), ",")
return self._info_service_helper(self.info("service-clear-alt"), ",")
except Exception:
return []
@return_exceptions
def info_service(self):
"""
Get service endpoints of this node
Returns:
list -- [(ip,port,tls_name),...]
"""
try:
if self.enable_tls:
return self._info_service_helper(self.info("service-tls-std"), ",")
return self._info_service_helper(self.info("service-clear-std"), ",")
except Exception:
return []
@return_exceptions
def info_service_list(self):
if self.use_services_alt:
return self.info_service_alt()
return self.info_service()
###### Service End ######
@return_exceptions
def info_statistics(self):
"""
Get statistics for this node. asinfo -v "statistics"
Returns:
dictionary -- statistic name -> value
"""
return client_util.info_to_dict(self.info("statistics"))
@return_exceptions
def info_namespaces(self):
"""
Get a list of namespaces for this node. asinfo -v "namespaces"
Returns:
list -- list of namespaces
"""
return client_util.info_to_list(self.info("namespaces"))
@return_exceptions
def info_namespace_statistics(self, namespace):
"""
Get statistics for a namespace.
Returns:
dict -- {stat_name : stat_value, ...}
"""
ns_stat = client_util.info_to_dict(self.info("namespace/%s" % namespace))
# Due to new server feature namespace add/remove with rolling restart,
# there is possibility that different nodes will have different namespaces.
# type = unknown means namespace is not available on this node, so just return empty map.
if (
ns_stat
and not isinstance(ns_stat, Exception)
and "type" in ns_stat
and ns_stat["type"] == "unknown"
):
ns_stat = {}
return ns_stat
@return_exceptions
def info_all_namespace_statistics(self):
namespaces = self.info_namespaces()
if isinstance(namespaces, Exception):
return namespaces
stats = {}
for ns in namespaces:
stats[ns] = self.info_namespace_statistics(ns)
return stats
@return_exceptions
def info_set_statistics(self, namespace, set_):
set_stat = self.info("sets/{}/{}".format(namespace, set_))
if set_stat[-1] == ";":
set_stat = client_util.info_colon_to_dict(set_stat[0:-1])
else:
set_stat = client_util.info_colon_to_dict(set_stat)
return set_stat
@return_exceptions
def info_all_set_statistics(self):
stats = self.info("sets")
stats = client_util.info_to_list(stats)
if not stats:
return {}
stats.pop()
stats = [client_util.info_colon_to_dict(stat) for stat in stats]
sets = {}
for stat in stats:
ns_name = util.get_value_from_dict(
d=stat, keys=("ns_name", "namespace", "ns")
)
set_name = util.get_value_from_dict(d=stat, keys=("set_name", "set"))
key = (ns_name, set_name)
if key not in sets:
sets[key] = {}
set_dict = sets[key]
set_dict.update(stat)
return sets
@return_exceptions
def info_health_outliers(self):
stats = self.info("health-outliers")
stats = client_util.info_to_list(stats)
if not stats:
return {}
stats = [client_util.info_colon_to_dict(stat) for stat in stats]
health_dict = {}
for i, stat in enumerate(stats):
key = "outlier" + str(i)
health_dict[key] = stat
return health_dict
@return_exceptions
def info_best_practices(self):
failed_practices = []
resp = self.info("best-practices")
if isinstance(resp, ASInfoError):
return resp
resp_dict = client_util.info_to_dict(resp)
if (
"failed_best_practices" in resp_dict
and resp_dict["failed_best_practices"] != "none"
):
failed_practices = client_util.info_to_list(
resp_dict["failed_best_practices"], delimiter=","
)
return failed_practices
@return_exceptions
def info_bin_statistics(self):
stats = client_util.info_to_list(self.info("bins"))
if not stats:
return {}
stats.pop()
stats = [value.split(":") for value in stats]
stat_dict = {}
for stat in stats:
values = client_util.info_to_list(stat[1], ",")
values = ";".join([v for v in values if "=" in v])
values = client_util.info_to_dict(values)
stat_dict[stat[0]] = values
return stat_dict
@return_exceptions
def info_XDR_statistics(self):
"""
Get statistics for XDR
Returns:
dict -- {stat_name : stat_value, ...}
"""
build = self.info_build()
# for new aerospike version (>=3.8) with
# xdr-in-asd stats available on service port
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
return client_util.info_to_dict(self.info("statistics/xdr"))
return self.info_all_dc_statistics()
@return_exceptions
def info_set_config_xdr_create_dc(self, dc):
dcs = self.info_dcs()
error_message = "Failed to create XDR datacenter"
if dc in dcs:
raise ASInfoError(error_message, "DC already exists")
build = self.info_build()
req = "set-config:context=xdr;dc={};action=create"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError(error_message, resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr_delete_dc(self, dc):
dcs = self.info_dcs()
error_message = "Failed to delete XDR datacenter"
self.logger.debug("Found dcs: %s", dcs)
if dc not in dcs:
raise ASInfoError(error_message, "DC does not exist")
build = self.info_build()
req = "set-config:context=xdr;dc={};action=delete"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError(error_message, resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr_add_namespace(self, dc, namespace, rewind=None):
error_message = "Failed to add namespace to XDR datacenter"
build = self.info_build()
req = "set-config:context=xdr;dc={};namespace={};action=add"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc, namespace)
if rewind:
if rewind != "all":
try:
int(rewind)
except ValueError:
raise ASInfoError(
error_message,
'Invalid rewind. Must be int or "all"',
)
req += ";rewind={}".format(rewind)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError(error_message, resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr_remove_namespace(self, dc, namespace):
build = self.info_build()
req = "set-config:context=xdr;dc={};namespace={};action=remove"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc, namespace)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to remove namespace from XDR datacenter", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr_add_node(self, dc, node):
build = self.info_build()
req = "set-config:context=xdr;dc={};node-address-port={};action=add"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc, node)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to add node to XDR datacenter", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr_remove_node(self, dc, node):
build = self.info_build()
req = "set-config:context=xdr;dc={};node-address-port={};action=remove"
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req = req.replace("dc", "datacenter")
req = req.format(dc, node)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to remove node from XDR datacenter", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_xdr(self, param, value, dc=None, namespace=None):
if namespace and not dc:
raise ArgumentError("Namespace must be accompanied by a dc.")
req = "set-config:context=xdr;{}={}".format(param, value)
if dc:
build = self.info_build()
if version.LooseVersion(build) < version.LooseVersion(
constants.SERVER_NEW_XDR5_VERSION
):
req += ";datacenter={}".format(dc)
else:
req += ";dc={}".format(dc)
if namespace:
req += ";namespace={}".format(namespace)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
context = ["xdr"]
if dc is not None:
context.append("dc")
if namespace is not None:
context.append("namespace")
raise ASInfoConfigError(
"Failed to set XDR configuration parameter {} to {}".format(
param, value
),
resp,
self,
context,
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_logs(self):
id_file_dict = {}
ls = client_util.info_to_list(self.info("logs"))
for pair in ls:
id, file = pair.split(":")
id_file_dict[file] = id
return id_file_dict
@return_exceptions
def info_set_config_logging(self, file, param, value):
logs = self.info_logs()
error_message = "Failed to set logging configuration parameter {} to {}"
if file not in logs:
raise ASInfoError(
error_message.format(param, value),
"{} does not exist".format(file),
)
resp = self.info("log-set:id={};{}={}".format(logs[file], param, value))
if resp != ASINFO_RESPONSE_OK:
raise ASInfoConfigError(
error_message.format(param, value),
resp,
self,
["logging"],
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_service(self, param, value):
resp = self.info("set-config:context=service;{}={}".format(param, value))
if resp != ASINFO_RESPONSE_OK:
raise ASInfoConfigError(
"Failed to set service configuration parameter {} to {}".format(
param, value
),
resp,
self,
["service"],
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
@util.logthis
def info_set_config_namespace(
self, param, value, namespace, set_=None, subcontext=None
):
new_param = param
if subcontext and subcontext != "storage-engine":
delimiter = "."
if subcontext == "geo2dsphere-within":
delimiter = "-"
new_param = delimiter.join([subcontext, param])
req = "set-config:context=namespace;id={};{}={}".format(
namespace, new_param, value
)
if set_:
req += ";set={}".format(set_)
resp = self.info(req)
if resp != ASINFO_RESPONSE_OK:
context = ["namespace"]
if set_ is not None:
context.append("set")
if subcontext is not None:
context.append(subcontext)
raise ASInfoConfigError(
"Failed to set namespace configuration parameter {} to {}".format(
param, value
),
resp,
self,
context,
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_network(self, param, value, subcontext):
new_param = ".".join([subcontext, param])
resp = self.info("set-config:context=network;{}={}".format(new_param, value))
if resp != ASINFO_RESPONSE_OK:
context = ["network"]
if subcontext is not None:
context.append(subcontext)
raise ASInfoConfigError(
"Failed to set network configuration parameter {} to {}".format(
param, value
),
resp,
self,
context,
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_set_config_security(self, param, value, subcontext=None):
new_param = param
if subcontext:
new_param = ".".join([subcontext, param])
resp = self.info("set-config:context=security;{}={}".format(new_param, value))
if resp != ASINFO_RESPONSE_OK:
context = ["security"]
if subcontext is not None:
context.append(subcontext)
raise ASInfoConfigError(
"Failed to set security configuration parameter {} to {}".format(
param, value
),
resp,
self,
context,
param,
value,
)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_get_config(self, stanza="", namespace=""):
"""
Get the complete config for a node. This should include the following
stanzas: Service, Network, XDR, and Namespace
Sadly it seems Service and Network are not seperable.
Returns:
dict -- stanza --> [namespace] --> param --> value
"""
build = util.Future(self.info_build).start()
config = {}
if stanza == "namespace":
if namespace != "":
config = {
namespace: client_util.info_to_dict(
self.info("get-config:context=namespace;id=%s" % namespace)
)
}
else:
namespace_configs = {}
namespaces = self.info_namespaces()
for namespace in namespaces:
namespace_config = self.info_get_config("namespace", namespace)
namespace_config = namespace_config[namespace]
namespace_configs[namespace] = namespace_config
config = namespace_configs
elif stanza == "" or stanza == "service":
config = client_util.info_to_dict(self.info("get-config:"))
elif stanza == "xdr" and version.LooseVersion(
build.result()
) >= version.LooseVersion(constants.SERVER_NEW_XDR5_VERSION):
xdr_config = {}
xdr_config["dc_configs"] = {}
xdr_config["ns_configs"] = {}
xdr_config["xdr_configs"] = client_util.info_to_dict(
self.info("get-config:context=xdr")
)
for dc in xdr_config["xdr_configs"]["dcs"].split(","):
dc_config = self.info("get-config:context=xdr;dc=%s" % dc)
xdr_config["ns_configs"][dc] = {}
xdr_config["dc_configs"][dc] = client_util.info_to_dict(dc_config)
start_namespaces = dc_config.find("namespaces=") + len("namespaces=")
end_namespaces = dc_config.find(";", start_namespaces)
namespaces = (
ns for ns in dc_config[start_namespaces:end_namespaces].split(",")
)
for namespace in namespaces:
namespace_config = self.info(
"get-config:context=xdr;dc=%s;namespace=%s" % (dc, namespace)
)
xdr_config["ns_configs"][dc][namespace] = client_util.info_to_dict(
namespace_config
)
config = xdr_config
elif stanza != "all":
config = client_util.info_to_dict(
self.info("get-config:context=%s" % stanza)
)
elif stanza == "all":
config["namespace"] = self.info_get_config("namespace")
config["service"] = self.info_get_config("service")
# Server lumps this with service
# config["network"] = self.info_get_config("network")
return config
@return_exceptions
def info_get_originalconfig(self, stanza=""):
"""
Get the original config (from conf file) for a node. This should include the following
stanzas: Service, Network, XDR, DC, and Namespace
Returns:
dict -- stanza --> [namespace] --> param --> value
"""
config = {}
if not self.localhost:
return config
if not self.as_conf_data:
conf_path = "/etc/aerospike/aerospike.conf"
self.as_conf_data = conf_parser.parse_file(conf_path)
if "namespace" in self.as_conf_data:
for ns in self.as_conf_data["namespace"].keys():
if "service" in self.as_conf_data["namespace"][ns]:
self.as_conf_data["namespace"][ns] = self.as_conf_data[
"namespace"
][ns]["service"]
try:
config = self.as_conf_data[stanza]
except Exception:
pass
return config
def _update_total_latency(self, total_rows, row, has_time_range_col=True):
"""
Takes a latency information for a single histogram and integrates it into
the total_rows. Since most of the values are percentages there is some
math involve.
row -- a single histograms values. These values coorespond to ops/sec
and a specified number of latency buckets, i.e. 1ms, 8ms, 64ms . . .
total_rows -- The total latency information before the current row is
integrated.
total_rows --
"""
if not row or not isinstance(row, list):
return total_rows
if not total_rows:
total_rows = []
total_rows.append(row)
return total_rows
has_time_range_col = int(has_time_range_col)
time_range = row[0]
updated = False
for total_row in total_rows:
if not has_time_range_col or total_row[0] == time_range:
new_sum = float(row[has_time_range_col])
if new_sum > 0:
old_sum = float(total_row[has_time_range_col])
for i, transaction_percent in enumerate(
total_row[1 + has_time_range_col :]
):
row_idx = i + 1 + has_time_range_col
old_transactions = float(
(old_sum * transaction_percent) / 100.00
)
new_transactions = float((new_sum * row[row_idx]) / 100.00)
total_row[row_idx] = round(
float(
((old_transactions + new_transactions) * 100)
/ (old_sum + new_sum)
),
2,
)
total_row[has_time_range_col] = round(old_sum + new_sum, 2)
updated = True
break
if not updated:
total_rows.append(copy.deepcopy(row))
return total_rows
@return_exceptions
def info_latency(self, back=None, duration=None, slice_tm=None, ns_set=None):
cmd = "latency:"
try:
if back or back == 0:
cmd += "back=%d" % (back) + ";"
except Exception:
pass
try:
if duration or duration == 0:
cmd += "duration=%d" % (duration) + ";"
except Exception:
pass
try:
if slice_tm or slice_tm == 0:
cmd += "slice=%d" % (slice_tm) + ";"
except Exception:
pass
data = {}
try:
hist_info = self.info(cmd)
except Exception:
return data
tdata = hist_info.split(";")
hist_name = None
ns = None
start_time = None
columns = []
ns_hist_pattern = r"{([A-Za-z_\d-]+)}-([A-Za-z_-]+)"
total_key = "total"
while tdata != []:
row = tdata.pop(0)
if not row:
continue
row = row.split(",")
# neglect if error string
if len(row) < 2:
continue
s1, s2 = row[0].split(":", 1)
if not s1.isdigit():
m = re.search(ns_hist_pattern, s1)
if m:
ns = m.group(1)
hist_name = m.group(2)
else:
ns = None
hist_name = s1
if ns_set and (not ns or ns not in ns_set):
hist_name = None
continue
columns = [col.replace("u", u"\u03bc") for col in row[1:]]
start_time = s2
start_time = client_util.remove_suffix(start_time, "-GMT")
columns.insert(0, "Time Span")
continue
if not hist_name or not start_time:
continue
try:
end_time = row.pop(0)
end_time = client_util.remove_suffix(end_time, "-GMT")
row = [float(r) for r in row]
row.insert(0, "%s->%s" % (start_time, end_time))
if hist_name not in data:
data[hist_name] = {}
if ns:
ns_key = "namespace"
if ns_key not in data[hist_name]:
data[hist_name][ns_key] = {}
if ns not in data[hist_name][ns_key]:
data[hist_name][ns_key][ns] = {}
data[hist_name][ns_key][ns]["columns"] = columns
data[hist_name][ns_key][ns]["values"] = []
data[hist_name][ns_key][ns]["values"].append(copy.deepcopy(row))
if total_key not in data[hist_name]:
data[hist_name][total_key] = {}
data[hist_name][total_key]["columns"] = columns
data[hist_name][total_key]["values"] = []
data[hist_name][total_key]["values"] = self._update_total_latency(
data[hist_name][total_key]["values"], row
)
start_time = end_time
except Exception:
pass
return data
@return_exceptions
def info_latencies(
self, buckets=3, exponent_increment=3, verbose=False, ns_set=None
):
"""
Get latencies metrics from this node. asinfo -v "latencies:" -p 3004
Returns:
dict -- {'host_address:port': {'histogram_name': {'namespace/total':
{'namespace_name': {'columns': ['column1', 'column2', . . .], 'values':
[[val1, val2, . . .]]}}, . . .}}}}
"""
# If ns_set is set filter through all default latencies with ns_set
# If optional_benchmark is set make additional queries for the
# optional_benchmark
cmd_latencies = ["latencies:"]
data = {}
if verbose:
namespaces = []
if ns_set:
namespaces = ns_set
else:
try:
namespaces = self.info("namespaces").split(";")
except Exception:
return data
optional_benchmarks = [
"proxy",
"benchmark-fabric",
"benchmarks-ops-sub",
"benchmarks-read",
"benchmarks-write",
"benchmarks-udf",
"benchmarks-udf-sub",
"benchmarks-batch-sub",
]
cmd_latencies += [
"latencies:hist={%s}-%s" % (ns, optional)
for ns in namespaces
for optional in optional_benchmarks
]
hist_info = []
for cmd in cmd_latencies:
try:
hist_info.append(self.info(cmd))
except Exception:
return data
if hist_info[-1].startswith("error"):
hist_info.pop()
continue
# example hist info after join:
# batch-index:;{test}-read:msec,0.0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00, /
# 0.00,0.00,0.00,0.00,0.00,0.00,0.00;{test}-write:msec,0.0,0.00,0.00,0.00,0.00,0.00,0.00, /
# 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00;{test}-udf:;{test}-query:;{bar}-read:; /
# {bar}-write:;{bar}-udf:;{bar}-query: /
hist_info = ";".join(hist_info)
tdata = hist_info.split(";")
hist_name = None
ns = None
unit_mapping = {"msec": "ms", "usec": u"\u03bcs"}
time_units = None
columns = [
">1",
">2",
">4",
">8",
">16",
">32",
">64",
">128",
">256",
">512",
">1024",
">2048",
">4096",
">8192",
">16384",
">32768",
">65536",
][::exponent_increment][:buckets]
ns_hist_pattern = r"{([A-Za-z_\d-]+)}-([A-Za-z_-]+)"
total_key = "total"
for hist in tdata:
if not hist:
continue
hist_name, hist_data = hist.split(":")
hist_data = hist_data.split(",")
m = re.search(ns_hist_pattern, hist_name)
# Remove empty histograms, len 2 just to be safe
if len(hist_data) <= 2:
continue
if m:
ns = m.group(1)
hist_name = m.group(2)
# Is batch histogram w/o namespace
else:
ns = None
if ns_set and (not ns or ns not in ns_set):
hist_name = None
continue
if time_units is None:
time_units = hist_data.pop(0)
columns = ["ops/sec"] + [
col + unit_mapping[time_units] for col in list(columns)
]
else:
hist_data.pop(0)
latency_data = [float(r) for r in hist_data]
# Remove ops/sec and then add it back in after getting correct latency buckets.
latency_data = [latency_data[0]] + latency_data[1:][::exponent_increment][
:buckets
]
try:
if hist_name not in data:
data[hist_name] = {}
if ns:
ns_key = "namespace"
if ns_key not in data[hist_name]:
data[hist_name][ns_key] = {}
if ns not in data[hist_name][ns_key]:
data[hist_name][ns_key][ns] = {}
data[hist_name][ns_key][ns]["columns"] = columns
data[hist_name][ns_key][ns]["values"] = []
data[hist_name][ns_key][ns]["values"].append(
copy.deepcopy(latency_data)
)
if total_key not in data[hist_name]:
data[hist_name][total_key] = {}
data[hist_name][total_key]["columns"] = columns
data[hist_name][total_key]["values"] = []
data[hist_name][total_key]["values"] = self._update_total_latency(
data[hist_name][total_key]["values"],
latency_data,
has_time_range_col=False,
)
except Exception:
# Missing histogram
pass
return data
@return_exceptions
def info_dcs(self):
"""
Get a list of datacenters for this node. asinfo -v "dcs" -p 3004
Returns:
list -- list of dcs
"""
xdr_major_version = int(self.info_build()[0])
# for server versions >= 5 using XDR5.0
if xdr_major_version >= 5:
xdr_data = client_util.info_to_dict(self.info("get-config:context=xdr"))
if xdr_data is None:
return []
dcs = xdr_data.get("dcs", "")
if dcs == "":
return []
return dcs.split(",")
return client_util.info_to_list(self.info("dcs"))
@return_exceptions
def info_dc_statistics(self, dc):
"""
Get statistics for a datacenter.
Returns:
dict -- {stat_name : stat_value, ...}
"""
xdr_major_version = int(self.info_build()[0])
# If xdr version is < XDR5.0 return output of old asinfo command.
if xdr_major_version < 5:
return client_util.info_to_dict(self.info("dc/%s" % dc))
else:
return client_util.info_to_dict(
self.info("get-stats:context=xdr;dc=%s" % dc)
)
@return_exceptions
def info_all_dc_statistics(self):
dcs = self.info_dcs()
if isinstance(dcs, Exception):
return {}
stats = {}
for dc in dcs:
stat = self.info_dc_statistics(dc)
if not stat or isinstance(stat, Exception):
stat = {}
stats[dc] = stat
return stats
@return_exceptions
@util.logthis
def info_udf_list(self):
"""
Get list of UDFs stored on the node.
Returns:
dict -- {<file-name>: {"filename": <file-name>, "hash": <hash>, "type": 'LUA'}, . . .}
"""
udf_data = self.info("udf-list")
if not udf_data:
return {}
return client_util.info_to_dict_multi_level(
udf_data, "filename", delimiter2=","
)
@return_exceptions
def info_udf_put(self, udf_file_name, udf_str, udf_type="LUA"):
content = base64.b64encode(udf_str.encode("ascii"))
content = content.decode("ascii")
content_len = len(content)
command = (
"udf-put:filename="
+ udf_file_name
+ ";udf-type="
+ udf_type
+ ";content-len="
+ str(content_len)
+ ";content="
+ content
)
resp = self.info(command)
if resp.lower() not in {ASINFO_RESPONSE_OK, ""}:
raise ASInfoError("Failed to add UDF", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_udf_remove(self, udf_file_name):
existing_udfs = self.info_udf_list()
existing_names = existing_udfs.keys()
# Server does not check if udf exists
if udf_file_name not in existing_names:
raise ASInfoError(
"Failed to remove UDF {}".format(udf_file_name), "UDF does not exist"
)
command = "udf-remove:filename=" + udf_file_name + ";"
resp = self.info(command)
if resp.lower() not in {ASINFO_RESPONSE_OK, ""}:
raise ASInfoError("Failed to remove UDF {}".format(udf_file_name), resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_roster(self):
"""
Get roster info.
Returns:
dict -- {ns1:{key_name : key_value, ...}, ns2:{key_name : key_value, ...}}
"""
roster_data = self.info("roster:")
if not roster_data:
return {}
roster_data = client_util.info_to_dict_multi_level(roster_data, "ns")
list_fields = ["roster", "pending_roster", "observed_nodes"]
for ns, ns_roster_data in roster_data.items():
for k, v in ns_roster_data.items():
if k not in list_fields:
continue
try:
ns_roster_data[k] = v.split(",")
except Exception:
ns_roster_data[k] = v
return roster_data
@return_exceptions
def info_racks(self):
"""
Get rack info.
Returns:
dict -- {ns1:{rack-id: {'rack-id': rack-id, 'nodes': [node1, node2, ...]}, ns2:{...}, ...}
"""
rack_data = self.info("racks:")
if not rack_data:
return {}
rack_data = client_util.info_to_dict_multi_level(rack_data, "ns")
rack_dict = {}
for ns, ns_rack_data in rack_data.items():
rack_dict[ns] = {}
for k, v in ns_rack_data.items():
if k == "ns":
continue
try:
rack_id = k.split("_")[1]
nodes = v.split(",")
rack_dict[ns][rack_id] = {}
rack_dict[ns][rack_id]["rack-id"] = rack_id
rack_dict[ns][rack_id]["nodes"] = nodes
except Exception:
continue
return rack_dict
@return_exceptions
def info_rack_ids(self):
"""
Get rack info.
Returns:
dict -- {ns1:{rack-id: {'rack-id': rack-id, 'nodes': [node1, node2, ...]}, ns2:{...}, ...}
"""
resp = self.info("rack-ids")
rack_data = {}
if not resp:
return {}
resp = client_util.info_to_list(resp)
for ns_id in resp:
ns, id_ = client_util.info_to_tuple(ns_id)
if id_ != "":
rack_data[ns] = id_
return rack_data
@return_exceptions
def info_dc_get_config(self):
"""
Get config for a datacenter.
Returns:
dict -- {dc_name1:{config_name : config_value, ...}, dc_name2:{config_name : config_value, ...}}
"""
configs = self.info("get-dc-config")
if not configs or isinstance(configs, Exception):
configs = self.info("get-dc-config:")
if not configs or isinstance(configs, Exception):
return {}
return client_util.info_to_dict_multi_level(
configs,
["dc-name", "DC_Name"],
ignore_field_without_key_value_delimiter=False,
)
@return_exceptions
def info_XDR_get_config(self):
return self.info_get_config(stanza="xdr")
def _collect_histogram_data(
self, histogram, command, logarithmic=False, raw_output=False
):
namespaces = self.info_namespaces()
data = {}
for namespace in namespaces:
try:
datum = self.info(command % (namespace, histogram))
if not datum or isinstance(datum, Exception):
continue
if raw_output:
data[namespace] = datum
else:
d = common.parse_raw_histogram(
histogram, datum, logarithmic, self.new_histogram_version
)
if d and not isinstance(d, Exception):
data[namespace] = d
except Exception:
pass
return data
@return_exceptions
def info_histogram(self, histogram, logarithmic=False, raw_output=False):
if not self.new_histogram_version:
return self._collect_histogram_data(
histogram, command="hist-dump:ns=%s;hist=%s", raw_output=raw_output
)
command = "histogram:namespace=%s;type=%s"
if logarithmic:
if histogram == "objsz":
histogram = "object-size"
return self._collect_histogram_data(
histogram,
command=command,
logarithmic=logarithmic,
raw_output=raw_output,
)
if histogram == "objsz":
histogram = "object-size-linear"
return self._collect_histogram_data(
histogram, command=command, logarithmic=logarithmic, raw_output=raw_output
)
@return_exceptions
def info_sindex(self):
return [
client_util.info_to_dict(v, ":")
for v in client_util.info_to_list(self.info("sindex"))
if v != ""
]
@return_exceptions
def info_sindex_statistics(self, namespace, indexname):
"""
Get statistics for a sindex.
Returns:
dict -- {stat_name : stat_value, ...}
"""
return client_util.info_to_dict(
self.info("sindex/%s/%s" % (namespace, indexname))
)
@return_exceptions
def info_sindex_create(
self, index_name, namespace, bin_name, bin_type, index_type=None, set_=None
):
command = "sindex-create:indexname={};".format(index_name)
if index_type:
command += "indextype={};".format(index_type)
command += "ns={};".format(namespace)
if set_:
command += "set={};".format(set_)
command += "indexdata={},{}".format(bin_name, bin_type)
resp = self.info(command)
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to create sindex {}".format(index_name), resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_sindex_delete(self, index_name, namespace, set_=None):
command = ""
if set_ is None:
command = "sindex-delete:ns={};indexname={}".format(namespace, index_name)
else:
command = "sindex-delete:ns={};set={};indexname={}".format(
namespace, set_, index_name
)
resp = self.info(command)
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to delete sindex {}".format(index_name), resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_build(self):
"""
Get Build Version
Returns:
string -- build version
"""
return self.info("build")
def _use_new_truncate_command(self):
"""
A new truncate-namespace and truncate-namespace-undo was added to some
4.3.x, 4.4.x, and 4.5.x but not all
"""
build = self.info_build()
for version_ in constants.SERVER_TRUNCATE_NAMESPACE_CMD_FIRST_VERSIONS:
if version_[1] is not None:
if version.LooseVersion(version_[0]) <= version.LooseVersion(
build
) and version.LooseVersion(build) < version.LooseVersion(version_[1]):
return True
else:
if version.LooseVersion(version_[0]) <= version.LooseVersion(build):
return True
return False
@return_exceptions
def info_truncate(self, namespace, set_=None, lut=None):
"""
Truncate a namespace or set. If namespace and set are provided a set will be
truncated. Deletes every record in the namespace/set whose last update time (lut)
is older than the given time.
Returns: ASINFO_RESPONSE_OK on success and ASInfoError on failure
"""
req = None
error_message = None
if set_ is not None:
req = "truncate:namespace={};set={}".format(namespace, set_)
error_message = "Failed to truncate namespace {} set {}".format(
namespace, set_
)
else:
error_message = "Failed to truncate namespace {}".format(namespace)
if self._use_new_truncate_command():
req = "truncate-namespace:namespace={}".format(namespace)
else:
req = "truncate:namespace={}".format(namespace)
if lut is not None:
req += ";lut={}".format(lut)
resp = self.info(req)
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError(error_message, resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_truncate_undo(self, namespace, set_=None):
"""
Undo truncation of a namespace or set.
Returns: ASINFO_RESPONSE_OK on success and ASInfoError on failure
"""
req = None
error_message = None
if set_ is not None:
req = "truncate-undo:namespace={};set={}".format(namespace, set_)
error_message = "Failed to undo truncation of namespace {} set {}".format(
namespace, set_
)
else:
error_message = "Failed to undo truncation of namespace {}".format(
namespace
)
if self._use_new_truncate_command():
req = "truncate-namespace-undo:namespace={}".format(namespace)
else:
req = "truncate-undo:namespace={}".format(namespace)
resp = self.info(req)
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError(error_message, resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_recluster(self):
"""
Force the cluster to advance the cluster key and rebalance.
Returns: ASINFO_RESPONSE_OK on success and ASInfoError on failure
"""
resp = self.info("recluster:")
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to recluster", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_quiesce(self):
"""
Cause a node to avoid participating as a replica after the next recluster event.
Quiescing and reclustering before removing a node from the cluster prevents
client timeouts that may otherwise happen when a node drops from the cluster.
Returns: ASINFO_RESPONSE_OK on success and ASInfoError on failure
"""
resp = self.info("quiesce:")
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to quiesce", resp)
return ASINFO_RESPONSE_OK
@return_exceptions
def info_quiesce_undo(self):
"""
Revert the effects of the quiesce on the next recluster event.
Returns: ASINFO_RESPONSE_OK on success and ASInfoError on failure
"""
resp = self.info("quiesce-undo:")
if resp.lower() != ASINFO_RESPONSE_OK:
raise ASInfoError("Failed to undo quiesce", resp)
return ASINFO_RESPONSE_OK
############################################################################
#
# Admin (Security Protocol) API
#
############################################################################
@util.logthis
def _admin_cadmin(self, admin_func, args, ip, port=None):
if port is None:
port = self.port
result = None
sock = self._get_connection(ip, port)
if not sock:
raise IOError("Error: Could not connect to node %s" % ip)
try:
result = admin_func(sock, *args)
# Either restore the socket in the pool or close it if it is full.
if len(self.socket_pool[port]) < self.socket_pool_max_size:
sock.settimeout(None)
self.socket_pool[port].add(sock)
else:
sock.close()
except Exception:
if sock:
sock.close()
# Re-raise the last exception
raise
return result
@return_exceptions
def admin_create_user(self, user, password, roles):
"""
Create user.
user: string
password: string (un-hashed)
roles: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(
ASSocket.create_user, (user, password, roles), self.ip
)
@return_exceptions
def admin_delete_user(self, user):
"""
Delete user.
user: string
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.delete_user, [user], self.ip)
@return_exceptions
def admin_set_password(self, user, password):
"""
Set user password.
user: string
password: string (un-hashed)
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.set_password, (user, password), self.ip)
@return_exceptions
def admin_change_password(self, user, old_password, new_password):
"""
Change user password.
user: string
old_password: string (un-hashed)
new_password: string (un-hashed)
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(
ASSocket.change_password, (user, old_password, new_password), self.ip
)
@return_exceptions
def admin_grant_roles(self, user, roles):
"""
Grant roles to user.
user: string
roles: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.grant_roles, (user, roles), self.ip)
@return_exceptions
def admin_revoke_roles(self, user, roles):
"""
Remove roles from user.
user: string
roles: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.revoke_roles, (user, roles), self.ip)
@return_exceptions
def admin_query_users(self):
"""
Query users.
Returns: {username1: [role1, role2, . . .], username2: [. . .], . . .},
ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.query_users, (), self.ip)
@return_exceptions
def admin_query_user(self, user):
"""
Query a user.
user: string
Returns: {username: [role1, role2, . . .]},
ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.query_user, [user], self.ip)
@return_exceptions
def admin_create_role(
self, role, privileges, whitelist=None, read_quota=None, write_quota=None
):
"""
Create role with privileges and whitelist.
role: string
privileges: list[string]
whitelist: list[string] (optional)
read_quota: (optional)
write_quota: (optional)
Returns: None on success, ASProtocolError on fail
"""
self._admin_cadmin(
ASSocket.create_role,
(role, privileges, whitelist, read_quota, write_quota),
self.ip,
)
@return_exceptions
def admin_delete_role(self, role):
"""
Delete role.
role: string
Returns: 0 on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.delete_role, [role], self.ip)
@return_exceptions
def admin_add_privileges(self, role, privileges):
"""
Add privileges to role.
role: string
privileges: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.add_privileges, (role, privileges), self.ip)
@return_exceptions
def admin_delete_privileges(self, role, privileges):
"""
Delete privileges from role.
role: string
privileges: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(
ASSocket.delete_privileges, (role, privileges), self.ip
)
@return_exceptions
def admin_set_whitelist(self, role, whitelist):
"""
Set whitelist for a role.
role: string
whitelist: list[string]
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.set_whitelist, (role, whitelist), self.ip)
@return_exceptions
def admin_delete_whitelist(self, role):
"""
Delete whitelist for a role.
role: string
Returns: 0 (ASResponse.OK) on success, ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.delete_whitelist, [role], self.ip)
@return_exceptions
def admin_set_quotas(self, role, read_quota=None, write_quota=None):
"""
Set rate limit for a role. Either read_quota or write_quota should be
provided but will be enforced elsewhere.
role: string
read_quota: int or string that represents and int
write_quota: int or string that represents and int
Returns: None on success, ASProtocolError on fail
"""
self._admin_cadmin(
ASSocket.set_quotas, (role, read_quota, write_quota), self.ip
)
@return_exceptions
def admin_delete_quotas(self, role, read_quota=False, write_quota=False):
"""
NOT IN USE
Delete rate limit for a role. Either read_quota or write_quota should be
provided but will be enforced elsewhere.
role: string
read_quota: True to delete, False to leave alone
write_quota: True to delete, False to leave alone
Returns: None on success, ASProtocolError on fail
"""
self._admin_cadmin(
ASSocket.delete_quotas, (role, read_quota, write_quota), self.ip
)
@return_exceptions
def admin_query_roles(self):
"""
Query all roles.
Returns: { role1:
'privileges': [privilege1, ...],
'whitelist': [addr1, addr2, ...]
role2:
'privileges': . . .,
'whitelist': . . .
},
ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.query_roles, (), self.ip)
@return_exceptions
def admin_query_role(self, role):
"""
Query a role.
role: string
Returns: {role:
'privileges': [privilege1, ...],
'whitelist': [addr1, addr2, ...]
},
ASProtocolError on fail
"""
return self._admin_cadmin(ASSocket.query_role, [role], self.ip)
############################################################################
#
# System Commands
#
############################################################################
def _set_default_system_credentials(
self,
default_user=None,
default_pwd=<PASSWORD>,
default_ssh_key=None,
default_ssh_port=None,
credential_file=None,
):
if default_user:
self.sys_default_user_id = default_user
if default_pwd:
self.sys_default_pwd = <PASSWORD>
if default_ssh_key:
self.sys_default_ssh_key = default_ssh_key
self.sys_credential_file = None
if credential_file:
self.sys_credential_file = credential_file
if default_ssh_port:
try:
self.sys_default_ssh_port = int(default_ssh_port)
except Exception:
pass
def _set_system_credentials_from_file(self):
if not self.sys_credential_file:
return False
result = False
f = None
try:
try:
f = open(self.sys_credential_file, "r")
except IOError as e:
self.logger.warning(
"Ignoring credential file. cannot open credential file. \n%s."
% (str(e))
)
return result
for line in f.readlines():
if not line or not line.strip():
continue
try:
line = line.strip().replace("\n", " ").strip().split(",")
if len(line) < 2:
continue
ip = None
port = None
ip_port = line[0].strip()
if not ip_port:
continue
if "]" in ip_port:
# IPv6
try:
ip_port = ip_port[1:].split("]")
ip = ip_port[0].strip()
if len(ip_port) > 1:
# Removing ':' from port
port = int(ip_port[1].strip()[1:])
except Exception:
pass
else:
# IPv4
try:
ip_port = ip_port.split(":")
ip = ip_port[0]
if len(ip_port) > 1:
port = int(ip_port[1].strip())
except Exception:
pass
if ip and self._is_any_my_ip([ip]):
self.sys_user_id = line[1].strip()
try:
self.sys_pwd = line[2].strip()
self.sys_ssh_key = line[3].strip()
except Exception:
pass
self.sys_ssh_port = port
result = True
break
except Exception:
pass
except Exception as e:
self.logger.warning("Ignoring credential file.\n%s." % (str(e)))
finally:
if f:
f.close()
return result
def _clear_sys_credentials(self):
self.sys_ssh_port = None
self.sys_user_id = None
self.sys_pwd = None
self.sys_ssh_key = None
def _set_system_credentials(self):
self._clear_sys_credentials()
set = self._set_system_credentials_from_file()
if set:
return
self.sys_user_id = self.sys_default_user_id
self.sys_pwd = self.<PASSWORD>
self.sys_ssh_key = self.sys_default_ssh_key
self.sys_ssh_port = self.sys_default_ssh_port
@return_exceptions
def _get_localhost_system_statistics(self, commands):
sys_stats = {}
self.logger.debug(
("%s._get_localhost_system_statistics cmds=%s"),
(
self.ip,
commands,
),
stackinfo=True,
)
for _key, ignore_error, cmds in self.sys_cmds:
if _key not in commands:
continue
for cmd in cmds:
self.logger.debug(
("%s._get_localhost_system_statistics running cmd=%s"),
(
self.ip,
cmd,
),
stackinfo=True,
)
o, e = util.shell_command([cmd])
if (e and not ignore_error) or not o:
continue
try:
full_parser.parse_system_live_command(_key, o, sys_stats)
except Exception:
pass
break
return sys_stats
@return_exceptions
def _login_remote_system(self, ip, user, pwd, ssh_key=None, port=None):
s = pxssh.pxssh()
s.force_password = True
s.SSH_OPTS = "-o 'NumberOfPasswordPrompts=1'"
s.login(ip, user, pwd, ssh_key=ssh_key, port=port)
return s
@return_exceptions
def _create_ssh_connection(self, ip, user, pwd, ssh_key=None, port=None):
if user is None and pwd is None and ssh_key is None:
raise Exception("Insufficient credentials to connect.")
if PEXPECT_VERSION == PXSSH_NEW_MODULE:
return self._login_remote_system(ip, user, pwd, ssh_key, port)
return None
@return_exceptions
def _execute_remote_system_command(self, conn, cmd):
if not conn or not cmd or PEXPECT_VERSION == PXSSH_NO_MODULE:
return None
conn.sendline(cmd)
if PEXPECT_VERSION == PXSSH_NEW_MODULE:
conn.prompt()
else:
return None
return conn.before
@return_exceptions
def _execute_system_command(self, conn, cmd):
out = self._execute_remote_system_command(conn, cmd)
status = self._execute_remote_system_command(conn, "echo $?")
status = status.split("\r\n")
status = status[1].strip() if len(status) > 1 else status[0].strip()
try:
status = int(status)
except Exception:
status = 1
return status, out
@return_exceptions
def _stop_ssh_connection(self, conn):
if not conn or PEXPECT_VERSION == PXSSH_NO_MODULE:
return
if PEXPECT_VERSION == PXSSH_NEW_MODULE:
conn.logout()
if conn:
conn.close()
self.remote_system_command_prompt = "[#$] "
@return_exceptions
def _get_remote_host_system_statistics(self, commands):
sys_stats = {}
if PEXPECT_VERSION == PXSSH_NO_MODULE:
self.logger.warning(
"Ignoring system statistics collection from node %s. No module named pexpect."
% (str(self.ip))
)
return sys_stats
sys_stats_collected = False
self._set_system_credentials()
max_tries = 1
tries = 0
while tries < max_tries and not sys_stats_collected:
tries += 1
s = None
try:
s = self._create_ssh_connection(
self.ip,
self.sys_user_id,
self.sys_pwd,
self.sys_ssh_key,
self.sys_ssh_port,
)
if not s:
raise Exception("Wrong credentials to connect.")
if isinstance(s, Exception):
raise s
except Exception as e:
if tries >= max_tries:
self.logger.warning(
"Ignoring system statistics collection. Couldn't make SSH login to remote server %s:%s. \n%s"
% (
str(self.ip),
"22"
if self.sys_ssh_port is None
else str(self.sys_ssh_port),
str(e),
)
)
continue
try:
for _key, _, cmds in self.sys_cmds:
if _key not in commands:
continue
for cmd in cmds:
try:
status, o = self._execute_system_command(s, cmd)
if status or not o or isinstance(o, Exception):
continue
full_parser.parse_system_live_command(_key, o, sys_stats)
break
except Exception:
pass
sys_stats_collected = True
self._stop_ssh_connection(s)
except Exception as e:
if tries >= max_tries:
self.logger.error(
"Ignoring system statistics collection. Couldn't get or parse remote system stats for remote server %s:%s. \n%s"
% (
str(self.ip),
"22"
if self.sys_ssh_port is None
else str(self.sys_ssh_port),
str(e),
)
)
finally:
if s and not isinstance(s, Exception):
s.close()
return sys_stats
@return_exceptions
def info_system_statistics(
self,
default_user=None,
default_pwd=<PASSWORD>,
default_ssh_key=None,
default_ssh_port=None,
credential_file=None,
commands=[],
collect_remote_data=False,
):
"""
Get statistics for a system.
Returns:
dict -- {stat_name : stat_value, ...}
"""
self.logger.debug(
(
"%s.info_system_statistics default_user=%s default_pws=%s"
"default_ssh_key=%s default_ssh_port=%s credential_file=%s"
"commands=%s collect_remote_data=%s"
),
(
self.ip,
default_user,
default_pwd,
default_ssh_key,
default_ssh_port,
credential_file,
commands,
collect_remote_data,
),
stackinfo=True,
)
if commands:
cmd_list = copy.deepcopy(commands)
else:
cmd_list = [_key for _key, _, _ in self.sys_cmds]
if self.localhost:
return self._get_localhost_system_statistics(cmd_list)
if collect_remote_data:
self._set_default_system_credentials(
default_user,
default_pwd,
default_ssh_key,
default_ssh_port,
credential_file,
)
return self._get_remote_host_system_statistics(cmd_list)
return {}
############################################################################
#
# Configuration
#
############################################################################
@return_exceptions
def config_subcontext(self, context, dynamic=True):
return self.conf_schema_handler.get_subcontext(context)
@return_exceptions
def config_params(self, context, dynamic=True):
return self.conf_schema_handler.get_params(context, dynamic=dynamic)
@return_exceptions
def config_types(self, context, params):
return self.conf_schema_handler.get_types(context, params)
@return_exceptions
def config_type(self, context, param):
param_dict = self.conf_schema_handler.get_types(context, param)
return param_dict[param]
| StarcoderdataPython |
3300363 | import os, csv
#dictionary function designed to read .csv file from a provided address and given an array to store the values
def RCSV(address):
csv_reader = csv.DictReader(open(address, 'r'), delimiter=',', quotechar='"')
headers = csv_reader.fieldnames
input=[]
for line in csv_reader:
for i in range(len(csv_reader.fieldnames)):
input.append(line[csv_reader.fieldnames[i]])
return input, headers
#dictionary function which writes a .csv file given its address, an array with values to be saved and an array with headers under which it is supposed to write the values (to be improved)
def WCSV(address, output, headers):
writer = csv.writer(open(address, 'w'), delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator = '\n')
writer.writerow(headers)
writer.writerows(output)
output, headers = RCSV(r"Input file.csv")
WCSV(r"Output file.csv", output, headers) | StarcoderdataPython |
1790059 | # -*- coding:UTF-8 -*-
##
# | file : main.py
# | version : V1.0
# | date : 2017-12-08
# | function : 1.5inch OLED
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import DEV_Config
import OLED_Driver
import Image
import ImageDraw
import ImageFont
import ImageColor
#try:
def main():
OLED = OLED_Driver.OLED()
print "**********Init OLED**********"
OLED_ScanDir = OLED_Driver.SCAN_DIR_DFT #SCAN_DIR_DFT = D2U_L2R
OLED.OLED_Init(OLED_ScanDir)
#OLED.OLED_Clear()
DEV_Config.Driver_Delay_ms(2000)
image = Image.new("L", (OLED.OLED_Dis_Column, OLED.OLED_Dis_Page), 0)# grayscale (luminance)
draw = ImageDraw.Draw(image)
#font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', "White")
print "***draw line"
draw.line([(0,0),(127,0)], fill = "White",width = 1)
draw.line([(127,0),(127,60)], fill = "White",width = 1)
draw.line([(127,60),(0,60)], fill = "White",width = 1)
draw.line([(0,60),(0,0)], fill = "White",width = 1)
print "***draw rectangle"
draw.rectangle([(18,10),(110,20)],fill = "White")
print "***draw text"
draw.text((33, 22), 'WaveShare ', fill = "White")
draw.text((32, 36), 'Electronic ', fill = "White")
draw.text((28, 48), '1.44inch OLED ', fill = "White")
OLED.OLED_ShowImage(image,0,0)
DEV_Config.Driver_Delay_ms(500)
image = Image.open('flower.bmp')#this pis is small ,Will trigger an exception,but you can show
OLED.OLED_ShowImage(image,0,70)
#while (True):
if __name__ == '__main__':
main()
#except:
# print("except")
# GPIO.cleanup() | StarcoderdataPython |
1725478 | from .visualize import (
hyperopt_viz,
compare_performance_viz,
learning_curves_viz,
)
| StarcoderdataPython |
1735293 | <reponame>MarcinStachowiak/CIFAR-10_challange
import cifar10_manager
import dim_reduction_service
import image_service
import inceptionv3_manager
import metrics_service
from classification_service import EnsembleVotingModel
from classification_service import NeuralNetworkModel
from feature_service import FearureImageExtractor
__author__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
# if True features will be calculated based on Inception v3 model and transfer learning
# if False features will be calbulated based on Haralick, Zernike and Linear Binary Patterns methods.
transfer_values_features = True
# if True the classification model will be based on SVM, LogicalRegression with boosting and voting
# if False the classification model will be based on Multilayer Perceptron (Neural Network)
use_ensemble=True
# Downloading or loading CIFAR 10 dataset.
data = cifar10_manager.download_or_load_CIFAR10('data')
# Plotting random images for each class.
dict = cifar10_manager.build_dictionary_with_images_per_class(data.train_x, data.train_y_cls, 10)
image_service.plot_images_per_class(dict, cifar10_manager.get_class_names())
# Feature extraction
feature_extractor = FearureImageExtractor(data.train_x, data.test_x)
if transfer_values_features:
# Using transfer leatning and Inception v3 model
model = inceptionv3_manager.download_or_load_Inceptionv3('inception')
(features_train_x, features_test_x) = feature_extractor.perform_transfer_values_extraction(model)
else:
# Using texture features: Haralick, Zernike and Linear Binary Patterns
(features_train_x, features_test_x) = feature_extractor.perform_texture_features_extraction()
# Plotting features on a two-dimensional chart after applying the PCA and TSNE reduction methods
dim_reduction_service.reduce_dim_PCA(features_train_x, data.train_y_cls, cifar10_manager.get_class_names(),
visualise=True)
dim_reduction_service.reduce_dim_TSNE(features_train_x, data.train_y_cls, cifar10_manager.get_class_names(),
visualise=True)
# Classification
if use_ensemble:
# Using ensemble: Boosting and Voting
voting_model = EnsembleVotingModel(features_train_x, data.train_y_cls) \
.with_SVM_model() \
.with_RandomForest_AdaBoost_model(5)\
.with_LogisticRegression_AdaBoost_model(5)\
.train()
predicted_cls = voting_model.predict(features_test_x)
else:
# Using Multilayer Perception (Neural Network)
model=NeuralNetworkModel(features_train_x,data.train_y).train()
predicted_cls = model.predict(features_test_x)
# Metrics calculation
metrics_service.print_full_metrics(data.test_y_cls, predicted_cls)
| StarcoderdataPython |
3393730 | """
This module produces the strain versus strain rate populations, with bivariate
histograms.
Example:
> cd ~/sibl/cli/process/exodus
> conda activate siblenv
> python visualization.py
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
# import pandas as pd
import seaborn as sns
np.random.seed(0)
sns.set(style="white", color_codes=True)
EXEMPLAR = 0 # turn on or off the exemplar problem
TEST = 0 # turn on or off Bob test with small data set
TRANSLATION = (
1 # turns on or off translational case (Bob-063f), else does rotation (Bob-066b)
)
INJURY_0 = 0 # turn on or off cellular injury curve, original
INJURY_1 = 1 # updated Summey injury curves
FIG_NAME = os.path.basename(__file__).split(".")[0] # remove the .py extension
FIG_FORMAT = "png" # "pdf" or "png", but "tiff" doesn't look good
DPI = 600
LATEX = 1
SERIALIZE = 1 # turn on or off write figure to disk
# sns.axes_style("darkgrid")
sns.set(style="darkgrid")
bbox_props = dict(boxstyle="square, pad=0.2", fc="white", ec="black", lw=1)
if LATEX:
# rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})
rc("text", usetex=True)
rc("font", family="serif")
# matplotlib.rcParams.update({'font.size': 22})
# rcParams.update({"font.size": 16})
# 2021-05-10: Increase base font size. Process for smaller pdf files:
# 1. Generate original pdf file (about 19 MB).
# 2. Open original pdf file in Preview, save as tiff, at 600 dpi (about 56 MB)
# 3. Open tiff, export as pdf (results in 1.7 MB)
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIG_SIZE = 14
plt.rc("font", size=BIG_SIZE) # controls default text sizes
# plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=BIG_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=BIG_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=BIG_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=BIG_SIZE) # legend fontsize
# plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
def cell_death_strain_rate_to_strain(x):
# The Summey cell death curve used for production-ready figures
y_cell_death = 0.128 * x ** (-0.156)
return y_cell_death
# Exemplar joint distribution plot - begin
if EXEMPLAR:
tips = sns.load_dataset("tips")
tip_data = np.array(tips["tip"])
bill_data = np.array(tips["total_bill"])
# legend_txt = 'hello'
# legend_properties = {'weight': 'bold', 'size': 12}
g = sns.JointGrid(x=bill_data, y=tip_data)
# g = g.plot_joint(plt.scatter, s=10, linewidths=0.05, edgecolors='blue', marker='o', alpha=0.3, label=legend_txt)
g = g.plot_joint(
plt.scatter, s=10, linewidths=0.05, edgecolors="blue", marker="o", alpha=0.3
)
_ = g.ax_marg_x.hist(bill_data, color="b", bins=np.arange(0, 60, 5))
_ = g.ax_marg_y.hist(
tip_data, color="g", orientation="horizontal", bins=np.arange(0, 12, 1)
)
# _ = g.ax_joint.legend(prop=legend_properties, loc='upper left')
_ = g.ax_joint.text(20, 10, "hello", ha="left", va="bottom", bbox=bbox_props)
axis_txt = f"exemplar"
plt.xlabel("total bill")
plt.ylabel("tip")
plt.show()
# Exemplar joint distribution plot - end
else:
# -------------------------------- ##
# Client application initializaton - begin
script_pth = os.getcwd()
# Client application initializaton - end
# -------------------------------- ##
if TEST:
simulation_path = "." # here, in same location as visualization.py
idx = 0 # index for the probes
probes = {
"steps": [0],
"time": [0.00],
"strain_p95": [0.015],
"strain_rate_p95": [30],
}
axis_txt = f'time = {probes["time"][idx]*1000:.3f} ms (Bob-TEST-1000-pts)'
blocks = [7]
labels = ["white matter"]
colors = ["C1"] # white plotted as orange, gray -> green
strain_files = [["test_ebe_max_principal_log_strain_51_small.txt"]]
strain_rate_files = [
["test_ebe_max_principal_rate_of_deformation_51_small.txt"]
]
marker_dict = {"linestyle": "", "marker": ".", "markersize": 10, "alpha": 0.2}
else:
# not Bob TEST data subset, is the actual full data set, either translation or rotation
# block 7 is white matter is 504,505 data points
# block 8 is gray matter is 790,102 data points
# combined white + gray = 1,294,607 data points
# markers are very small and light to cope with the large data set
marker_dict = {"linestyle": "", "marker": ",", "markersize": 0.7, "alpha": 0.2}
blocks = [7, 8]
labels = ["white matter", "gray matter"]
colors = ["C1", "C2"] # white plotted as orange, gray -> green
if TRANSLATION:
# relative to this script, location of the particular simulation
simulation_path = (
"../../../../casco_sim/bob-1mm-5kg-helmet2-0305-hemi-063f/"
)
idx = 0 # index for the probes
probes = {
"steps": [30, 51, 57],
"time": [
0.00580000428262166,
0.010000030740917116,
0.011200009903610695,
],
"strain_p95": [
0.013038920686082887,
0.007864328738051788,
0.009356105757136385,
],
"strain_rate_p95": [
26.62451150429535,
45.64035758617126,
47.167653798895905,
],
}
# axis_txt = f'time = {probes["time"][idx]*1000:.3f} ms (Bob-063f)'
axis_txt = f'time = {probes["time"][idx]*1000:.2f} ms'
strain_files = [
[
"ts_30_block_7_max_principal_green_lagrange_strain.txt",
"ts_30_block_8_max_principal_green_lagrange_strain.txt",
],
[
"ts_51_block_7_max_principal_green_lagrange_strain.txt",
"ts_51_block_8_max_principal_green_lagrange_strain.txt",
],
[
"ts_57_block_7_max_principal_green_lagrange_strain.txt",
"ts_57_block_8_max_principal_green_lagrange_strain.txt",
],
]
strain_rate_files = [
[
"ts_30_block_7_max_principal_green_lagrange_strain_rate.txt",
"ts_30_block_8_max_principal_green_lagrange_strain_rate.txt",
],
[
"ts_51_block_7_max_principal_green_lagrange_strain_rate.txt",
"ts_51_block_8_max_principal_green_lagrange_strain_rate.txt",
],
[
"ts_57_block_7_max_principal_green_lagrange_strain_rate.txt",
"ts_57_block_8_max_principal_green_lagrange_strain_rate.txt",
],
]
else: # not a TRANSLATION, then the rotation case
simulation_path = (
"../../../../casco_sim/bob-1mm-5kg-helmet2-0305-hemi-066b/"
)
idx = 1 # index for the probes
probes = {
"steps": [43, 69],
"time": [0.00840000000000000, 0.013600000000000000],
"strain_p95": [0.021800000000000000, 0.056370000000000000],
"strain_rate_p95": [10.60000000000000, 5.190000000000000],
}
# axis_txt = f'time = {probes["time"][idx]*1000:.3f} ms (Bob-066b)'
axis_txt = f'time = {probes["time"][idx]*1000:.1f} ms'
strain_files = [
["max_principal_green_lagrange_strain_ts_43.csv"],
["max_principal_green_lagrange_strain_ts_69.csv"],
]
strain_rate_files = [
["max_principal_green_lagrange_strain_rate_ts_43.csv"],
["max_principal_green_lagrange_strain_rate_ts_69.csv"],
]
# User Input Deck, simulation-specific input - end
# -------------------------------- ##
# fig, ax = plt.subplots(figsize=(8,8))
# ax.set_aspect("equal")
strain = np.array([])
strain_rate = np.array([])
# for i, (s, sr) in enumerate(zip(strain_files, strain_rate_files)):
for s, sr in zip(
strain_files[idx], strain_rate_files[idx]
): # collect over all blocks
block_strain = np.genfromtxt(os.path.join(simulation_path, s))
block_strain_rate = np.genfromtxt(os.path.join(simulation_path, sr))
strain = np.concatenate((strain, block_strain))
strain_rate = np.concatenate((strain_rate, block_strain_rate))
g = sns.JointGrid(x=strain_rate, y=strain)
# g = g.plot_joint(plt.plot, linestyle='', marker=',', markersize=0.7, alpha=0.2)
g = g.plot_joint(plt.plot, **marker_dict)
exp_min = -1 # x-domain minimum 10^exp_min
exp_max = 3 # x-domain maximum 10^exp_max
npts = 24 # number of points
strain_rate_095th = np.percentile(strain_rate, 95.0) # 95th percentile strain rate
x_bins = np.logspace(exp_min, exp_max, 2 * npts)
_ = g.ax_marg_x.hist(strain_rate, bins=x_bins)
strain_095th = np.percentile(strain, 95.0) # 95th percentile strain
strain_min = np.amin(strain)
strain_max = np.amax(strain)
y_bins = np.linspace(strain_min, strain_max, npts)
_ = g.ax_marg_y.hist(strain, orientation="horizontal", bins=y_bins)
g.ax_joint.set_xscale("log")
g.ax_marg_x.set_xscale("log")
g.ax_joint.set_xlim([0.01, 10000])
# g.ax_joint.set_xlim([10**exp_min, 10**exp_max])
g.ax_joint.set_ylim([-0.02, 0.10])
# g.ax_joint.text(0.02, 0.09, axis_txt, ha='left', va='bottom', bbox=bbox_props)
time_label_x = 0.02 # strain rate
time_label_y = -0.015 # strain
g.ax_joint.text(
time_label_x, time_label_y, axis_txt, ha="left", va="bottom", bbox=bbox_props
)
# draw 95th percentile boundaries
line_prop = dict(color="orange", linewidth=1)
# vertical line on joint plot
g.ax_joint.plot(
[strain_rate_095th, strain_rate_095th], g.ax_joint.get_ylim(), **line_prop
)
# horizontal line on the joint plot
g.ax_joint.plot(g.ax_joint.get_xlim(), [strain_095th, strain_095th], **line_prop)
# vertical line across marginal strain rate plot
y0_log_sr, y1_log_sr = g.ax_marg_x.get_ylim()
g.ax_marg_x.plot(
[strain_rate_095th, strain_rate_095th], [y0_log_sr, y1_log_sr], **line_prop
)
# marginal strain rate text
if TRANSLATION:
# strain_rate_txt = r" 95\% = " + str(round(strain_rate_095th, 1)) # 26.6
strain_rate_txt = "{:.{}f}".format(strain_rate_095th, 1) # 26.6
else: # then rotation
# strain_rate_txt = r" 95\% = " + str(round(strain_rate_095th, 2)) # 5.2, not 5.20 as desired
strain_rate_txt = "{:.{}f}".format(strain_rate_095th, 2) # 5.20
# g.ax_marg_x.text(strain_rate_095th, (y0_log_sr + y1_log_sr) / 2.0, ' 95% = ' + str(round(strain_rate_095th, 1)), ha='left', va='bottom')
g.ax_marg_x.text(
strain_rate_095th,
(y0_log_sr + y1_log_sr) / 2.0,
r" 95\% = " + strain_rate_txt,
ha="left",
va="bottom",
)
# horizontal line on the marginal strain plot
x0_strain, x1_strain = g.ax_marg_y.get_xlim()
g.ax_marg_y.plot([x0_strain, x1_strain], [strain_095th, strain_095th], **line_prop)
# marginal strain text
if TRANSLATION:
# strain_txt = r"95\% = " + str(round(strain_095th, 4)) # 0.0130
strain_txt = "{:.{}f}".format(strain_095th, 4) # 0.0130
else: # then rotation
# strain_txt = r"95\% = " + str(round(strain_095th, 4)) # 0.0564
strain_txt = "{:.{}f}".format(strain_095th, 4) # 0.0564
g.ax_marg_y.text(
(x0_strain + x1_strain) / 2.0,
strain_095th,
# strain_txt,
r" 95\% = " + strain_txt,
ha="center",
va="bottom",
)
# 2021-05-10: These seem not to work with new library, so just accept defaults.
# g.ax_joint.grid(color="gray")
# # g.ax_joint.grid(color="red")
# # g.ax_joint(grid_color="red")
# g.ax_marg_x.grid(color="green", axis="x")
# g.ax_marg_y.grid(color="gray", axis="y")
# plt.xlabel("max(eig(GL strain rate)) (1/s)")
plt.xlabel("maximum principal strain rate (1/s)")
# plt.ylabel("max(eig(GL strain)) (cm/cm)")
plt.ylabel("maximum principal strain (cm/cm)")
if INJURY_0 or INJURY_1:
exp_min = -2 # x-domain minimum 10^exp_min
exp_max = 4 # x-domain maximum 10^exp_max
npts = 100 # number of points
# x = np.linspace(-4, 4, npts)
x = np.logspace(exp_min, exp_max, npts)
# injury curves
if INJURY_0:
# pathway-induced injury
# y_pathway = 0.2589 * np.arctan(-0.5789 * np.log(10**x) - 1.83) + 0.4192
y_pathway = 0.2589 * np.arctan(-0.5789 * np.log(x) - 1.83) + 0.4192
# mechanical injury
# y_mechanical = 0.345 * np.arctan(-0.2923 * np.log(10**x) - 0.1617) + 0.5033
y_mechanical = 0.345 * np.arctan(-0.2923 * np.log(x) - 0.1617) + 0.5033
g.ax_joint.plot(
x,
y_pathway,
linestyle="--",
color="green",
linewidth=2,
alpha=0.8,
label="pathway induced injury",
)
# g.ax_joint.legend()
g.ax_joint.legend(loc="upper right")
if INJURY_1:
# y_cell_death = 0.128 * x ** (-0.156)
y_cell_death = cell_death_strain_rate_to_strain(x)
g.ax_joint.plot(
x,
y_cell_death,
linestyle="--",
color="black",
linewidth=2,
alpha=0.8,
label="cell death",
)
g.ax_joint.legend(loc="upper right")
x_intercept = probes["strain_rate_p95"][idx] # strain rate 10^x
y_intercept = cell_death_strain_rate_to_strain(x_intercept) # strain
y_intercept_txt = "{:.{}f}".format(
y_intercept, 4
) # 0.0767 trans, 0.0990 for rot
x_offset = 0 # strain rate
y_offset = 0.005 # strain
# strain_rate_txt = r" 95\% = " + str(round(strain_rate_095th, 1))
# intercept_txt = f"( {x_intercept}, {y_intercept})"
intercept_txt = "(" + strain_rate_txt + ", " + y_intercept_txt + ")"
# intercept_txt = "(x, y)"
# g.ax_joint.text(0.02, -0.015, axis_txt, ha="left", va="bottom", bbox=bbox_props)
# g.ax_joint.annotate(
# intercept_txt,
# xy=(x_intercept, y_intercept),
# xycoords="data",
# xytext=(x_intercept + x_offset, y_intercept + y_offset),
# )
intercept_label_x = time_label_x # strain rate
intercept_label_y = 0.08 # strain
g.ax_joint.annotate(
intercept_txt,
xy=(x_intercept, y_intercept),
xycoords="data",
xytext=(intercept_label_x + x_offset, intercept_label_y + y_offset),
# arrowprops=dict(facecolor="black", arrowstyle="->"),
arrowprops=dict(facecolor="black", arrowstyle="simple"),
horizontalalignment="left",
verticalalignment="top",
)
plt.show()
if SERIALIZE:
title_string = FIG_NAME + "_" + axis_txt + "." + FIG_FORMAT
g.savefig(title_string, dpi=DPI, bbox_inches="tight") # avoid cutoff of labels
print("Figure was saved to: " + os.path.join(os.getcwd(), title_string))
| StarcoderdataPython |
3259395 | <reponame>xjh093/LearnPythonTheHardWay<filename>pdf/examples/ex4.py
#! /usr/bin/python3
# p25
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driver = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are",cars,"cars available.")
print("There are only",drivers,"drivers available.")
print("There will be",cars_not_driver,"empty cars today.")
print("We can transport",carpool_capacity,"people today.")
print("We have",passengers,"to carpoll today.")
print("We need to put about",average_passengers_per_car,"in each car.")
| StarcoderdataPython |
3228575 | <filename>nlp_utils/Augmentation/SequenceAugmentation.py
from typing import List
from itertools import compress
from .BaseAugmentation import MixinAugmentation
class _RandomTruncate(MixinAugmentation):
def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):
super().__init__(random_seed=random_seed, threshold=threshold)
self.min_length: int = min_length
self.max_length: int = max_length
def _do_truncate(self, x, length: int):
raise NotImplementedError
def transform(self, x: List[str]) -> List[str]:
len_x: int = len(x)
if len_x <= self.min_length:
return x
if self._active_augmentation:
seq = (self.min_length, min(len_x, self.max_length))
length = self.rng.randint(min(seq), max(seq))
return self._do_truncate(x, length)
return x
class RandomTruncateHead(_RandomTruncate):
def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):
super().__init__(min_length=min_length, max_length=max_length, random_seed=random_seed, threshold=threshold)
def _do_truncate(self, x, length: int):
return x[-length:]
class RandomTruncateTail(_RandomTruncate):
def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):
super().__init__(min_length=min_length, max_length=max_length, random_seed=random_seed, threshold=threshold)
def _do_truncate(self, x, length: int):
return x[:length]
class RandomDropWords(MixinAugmentation):
def __init__(
self, min_length: int = 1, max_drop: int = 5, drop_rate: float = .1, random_seed: int = 42,
threshold: float = .5):
super().__init__(random_seed=random_seed, threshold=threshold)
self.min_length: int = min_length
self.max_drop: int = max_drop
self.drop_rate: float = drop_rate
def transform(self, x: List[str]) -> List[str]:
len_x: int = len(x)
if len_x < self.min_length:
return x
if self._active_augmentation:
max_drop = min(max(0, len_x - self.min_length), max(int(self.drop_rate * len_x), self.max_drop))
if max_drop < 1:
return x
mask = self._get_mask(len_x, max_drop)
x = list(compress(x, mask))
return x
| StarcoderdataPython |
3289699 | import asgineer
@asgineer.to_asgi
async def app(request):
if request.method == "GET":
if request.path == "/":
return ""
if request.path.startswith("/user/"):
return await output_second_param(request.path)
elif request.method == "POST":
return ""
else:
return 404, {}, f"404 not found {request.path}"
async def output_second_param(path):
params = path.split("/")
return params[2]
| StarcoderdataPython |
61759 | <reponame>anyboby/ConstrainedMBPO
import numpy as np
from softlearning.policies.safe_utils.mpi_tools import mpi_statistics_scalar
from softlearning.policies.safe_utils.utils import *
from softlearning.replay_pools.cpobuffer import CPOBuffer
import scipy.signal
class ModelBuffer(CPOBuffer):
def __init__(self, batch_size, env, max_path_length, ensemble_size,
rollout_mode=False,
cares_about_cost = False,
max_uncertainty_r = 5.5,
max_uncertainty_c = 5.5,
*args,
**kwargs,
):
self.max_path_length = max_path_length
self.batch_size = batch_size
self.env = env
self.obs_shape = self.env.observation_space.shape
self.act_shape = self.env.action_space.shape
self.pi_info_shapes = None
self.ensemble_size = ensemble_size
self.model_ind = np.random.randint(ensemble_size)
self.rollout_mode = rollout_mode
self.reset()
self.cares_about_cost = cares_about_cost
self.max_uncertainty_r = max_uncertainty_r
self.max_uncertainty_c = max_uncertainty_c
''' initialize policy dependendant pi_info shapes, gamma, lam etc.'''
def initialize(self, pi_info_shapes,
gamma=0.99, lam = 0.95,
cost_gamma = 0.99, cost_lam = 0.95,
):
self.pi_info_shapes = pi_info_shapes
self.pi_info_bufs = {k: np.zeros(shape=[self.ensemble_size, self.batch_size]+[self.max_path_length] + list(v), dtype=np.float32)
for k,v in pi_info_shapes.items()}
self.sorted_pi_info_keys = keys_as_sorted_list(self.pi_info_bufs)
self.gamma, self.lam = gamma, lam
self.cost_gamma, self.cost_lam = cost_gamma, cost_lam
def reset(self, batch_size=None , dynamics_normalization=1):
if batch_size is not None:
self.batch_size = batch_size
if self.rollout_mode=='iv_gae':
obs_buf_shape = combined_shape(self.ensemble_size, combined_shape(self.batch_size, combined_shape(self.max_path_length, self.obs_shape)))
act_buf_shape = combined_shape(self.ensemble_size, combined_shape(self.batch_size, combined_shape(self.max_path_length, self.act_shape)))
ens_scalar_shape = (self.ensemble_size, self.batch_size, self.max_path_length)
else:
obs_buf_shape = combined_shape(self.batch_size, combined_shape(self.max_path_length, self.obs_shape))
act_buf_shape = combined_shape(self.batch_size, combined_shape(self.max_path_length, self.act_shape))
ens_scalar_shape = (self.batch_size, self.max_path_length)
single_scalar_shape = (self.batch_size, self.max_path_length)
self.obs_buf = np.zeros(obs_buf_shape, dtype=np.float32)
self.act_buf = np.zeros(act_buf_shape, dtype=np.float32)
self.dyn_error_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.nextobs_buf = np.zeros(obs_buf_shape, dtype=np.float32)
self.adv_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.ret_var_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.ret_var_buf = np.zeros(single_scalar_shape, dtype=np.float32) ## epistemic value variance
self.roll_lengths_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.rew_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.rew_path_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.ret_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.val_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.val_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
#self.val_ep_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32) ## epistemic value variance
self.cadv_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.cret_var_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.cret_var_buf = np.zeros(single_scalar_shape, dtype=np.float32) ## epistemic cost return variance
self.croll_lengths_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.cost_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.cost_path_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.cret_buf = np.zeros(single_scalar_shape, dtype=np.float32)
self.cval_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.cval_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32) # cost value
#self.cval_ep_var_buf = np.zeros(ens_scalar_shape, dtype=np.float32) # epistemic cost value variance
self.logp_buf = np.zeros(ens_scalar_shape, dtype=np.float32)
self.term_buf = np.zeros(ens_scalar_shape, dtype=np.bool_)
if self.pi_info_shapes:
if self.rollout_mode == 'iv_gae':
self.pi_info_bufs = {k: np.zeros(shape=[self.ensemble_size, self.batch_size]+[self.max_path_length] + list(v), dtype=np.float32)
for k,v in self.pi_info_shapes.items()}
else:
self.pi_info_bufs = {k: np.zeros(shape=[self.batch_size]+[self.max_path_length] + list(v), dtype=np.float32)
for k,v in self.pi_info_shapes.items()}
self.cutoff_horizons_mean = 0
self.dyn_normalization = dynamics_normalization
# ptr is a scalar to the current position in all paths. You are expected to store at the same timestep
# in all parallel paths
# path_start_idx is the path starting index, which will actually always be 0, since paths are parallel
# and always start at 0, may be removed
# max_size is actually also the same for all parallel paths, but a batch sized vector is more convenient
# for masked assertion
# populated_mask shows us which entries in the buffer are valid, meaning they had a value stored in them
# and aren't terminated.
# terminated_paths_mask essentially notes the same thing as populated_mask but is one_dimensional for
# convenience
self.ptr, self.path_start_idx, self.max_size, self.populated_mask, self.populated_indices, self.terminated_paths_mask = \
0, \
0, \
np.ones(self.batch_size)*self.max_path_length, \
np.zeros((self.batch_size, self.max_path_length), dtype=np.bool), \
np.repeat(np.arange(self.max_path_length)[None], axis=0, repeats=self.batch_size), \
np.zeros(self.batch_size, dtype=np.bool)
@property
def size(self):
return self.populated_mask.sum()
@property
def has_room(self):
room_mask = self.ptr < self.max_size
return room_mask.all()
@property
def alive_paths(self):
return np.logical_not(self.terminated_paths_mask)
def store_multiple(self, obs, act, next_obs, rew, val, val_var, cost, cval, cval_var, dyn_error, logp, pi_info, term):
assert (self.ptr < self.max_size).all()
alive_paths = self.alive_paths
if self.rollout_mode=='iv_gae':
self.obs_buf[:, alive_paths, self.ptr] = obs
self.act_buf[:, alive_paths, self.ptr] = act
self.nextobs_buf[:, alive_paths, self.ptr] = next_obs
self.rew_buf[:, alive_paths, self.ptr] = rew
#self.rew_path_var_buf[:, alive_paths, self.ptr] = rew_var
self.val_buf[:, alive_paths, self.ptr] = val
self.val_var_buf[:, alive_paths, self.ptr] = val_var
self.cost_buf[:, alive_paths, self.ptr] = cost
#self.cost_path_var_buf[:, alive_paths, self.ptr] = cost_var
self.cval_buf[:, alive_paths, self.ptr] = cval
self.cval_var_buf[:, alive_paths, self.ptr] = cval_var
self.logp_buf[:, alive_paths, self.ptr] = logp
self.term_buf[:, alive_paths, self.ptr] = term
for k in self.sorted_pi_info_keys:
self.pi_info_bufs[k][:, alive_paths, self.ptr] = pi_info[k]
else:
self.obs_buf[alive_paths, self.ptr] = obs
self.act_buf[alive_paths, self.ptr] = act
self.nextobs_buf[alive_paths, self.ptr] = next_obs
self.rew_buf[alive_paths, self.ptr] = rew
#self.rew_path_var_buf[:, alive_paths, self.ptr] = rew_var
self.val_buf[alive_paths, self.ptr] = val
self.val_var_buf[alive_paths, self.ptr] = val_var
self.cost_buf[alive_paths, self.ptr] = cost
#self.cost_path_var_buf[:, alive_paths, self.ptr] = cost_var
self.cval_buf[alive_paths, self.ptr] = cval
self.cval_var_buf[alive_paths, self.ptr] = cval_var
self.logp_buf[alive_paths, self.ptr] = logp
self.term_buf[alive_paths, self.ptr] = term
for k in self.sorted_pi_info_keys:
self.pi_info_bufs[k][alive_paths, self.ptr] = pi_info[k]
self.dyn_error_buf[alive_paths, self.ptr] = dyn_error
self.populated_mask[alive_paths, self.ptr] = True
self.ptr += 1
def finish_path_multiple(self, term_mask, last_val=0, last_cval=0):
"""
finishes multiple paths according to term_mask.
Note: if the term_mask indicates to terminate a path that has not yet been populated,
it will terminate, but samples won't be marked as terminated (they won't be included
in get())
Args:
term_mask: a bool mask that indicates which paths should be terminated.
has to be of same length as currently alive paths.
last_val: value of the last state in the paths that are to be finished.
has to be of same length as the number of paths to be terminated (term_mask.sum())
last_cval: cost value of the last state in the paths that are to be finished.
has to be of same length as the number of paths to be terminated (term_mask.sum())
"""
if not term_mask.any(): return ### skip if not terminating anything
assert self.alive_paths.sum() == len(term_mask) ### terminating a non-alive path!
alive_paths = self.alive_paths
## concat masks for fancy indexing. (expand term_mask to buf dim)
finish_mask = np.zeros(len(self.alive_paths), dtype=np.bool)
finish_mask[tuple([alive[term_mask] for alive in np.where(alive_paths)])] = True
if self.ptr>0:
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[..., finish_mask, path_slice], last_val[..., None], axis=-1)
vals = np.append(self.val_buf[..., finish_mask, path_slice], last_val[..., None], axis=-1)
if self.rollout_mode=='iv_gae':
#=====================================================================#
# Inverse Variance Weighted Advantages #
#=====================================================================#
#### only choose single trajectory for deltas
deltas = rews[self.model_ind][...,:-1] + self.gamma * vals[self.model_ind][..., 1:] - vals[self.model_ind][..., :-1]
### define some utility indices
t, t_p_h, h = triu_indices_t_h(size=deltas.shape[-1])
Ht, HH = np.diag_indices(deltas.shape[-1])
HH = np.flip(HH) ### H is last value in each t-row
### define some utility vectors for lambda and gamma
seed = np.zeros(shape=deltas.shape[-1]+1)
seed[0] = 1
disc_vec = scipy.signal.lfilter([1], [1, float(-self.gamma)], seed) ### create vector of discounts
disc_vec_sq = scipy.signal.lfilter([1], [1, float(-(self.gamma**2))], seed) ### create vector of squared discounts
lam_vec = scipy.signal.lfilter([1], [1, float(-self.lam*self.gamma**2)], seed) ### create vector of lambdas
### (divide by gamma to get GAE for equal variances)
### calculate empirial epistemic variance per trajectory and rollout length
## @anyboby: for now without discount since we want the weighting to equal GAE for equal variances
rew_t_h = disc_cumsum_matrix(self.rew_buf[:, finish_mask, path_slice], discount=self.gamma) #self.gamma)
rew_t_h[..., t, h] += vals[..., t_p_h+1]*disc_vec[..., h+1]
rew_var_t_h = np.var(rew_t_h, axis=0) ### epistemic variances per timestep and rollout-length
### create inverse (epsitemic) variance matrix in t and rollout length h
weight_mat = np.zeros_like(rew_var_t_h)
weight_mat[...,t, h] = 1/(rew_var_t_h[..., t, h] + EPS) #* disc_vec[..., h+1]+EPS)
### add lambda weighting
weight_mat[...,t, h] *= lam_vec[..., h]
weight_mat[...,Ht, HH] *= 1/(1-self.lam*self.gamma**2+EPS)
### create weight matrix for deltas
d_weight_mat = discount_cumsum(weight_mat, 1.0, 1.0, axis=-1) #### sum from l to H to get the delta-weight-matrix
weight_norm = 1/d_weight_mat[..., 0] #### normalize:
d_weight_mat[...,t,h] = d_weight_mat[...,t,h]*weight_norm[..., t] #### first entry for every t containts sum of all weights
#### this is a bit peculiar: variances reduce squared per definition in a weighted average, but does that make sense here ?
#### should the uncertainty really be much lower only because there are more elements counted into the weighted average ?
ep_var_weight_mat = np.zeros(shape=weight_mat.shape)
ep_var_weight_mat[...,t,h] = (weight_mat[...,t, h]*weight_norm[..., t]) * rew_var_t_h[...,t,h]
# ep_var_weight_mat[...,t,h] = (weight_mat[...,t, h]*weight_norm[..., t])**2 * rew_var_t_h[...,t,h]
### calculate (epistemic) iv-weighted advantages
self.adv_buf[finish_mask, path_slice] = discount_cumsum_weighted(deltas, self.gamma, d_weight_mat)
self.ret_var_buf[finish_mask, path_slice] = \
discount_cumsum_weighted(np.ones_like(deltas), 1.0, ep_var_weight_mat)
self.roll_lengths_buf[finish_mask, path_slice] = \
discount_cumsum_weighted(np.arange(self.ptr), 1.0, weight_mat)*weight_norm - np.arange(self.ptr)
#### R_t = A_GAE,t^iv + V_t
self.ret_buf[finish_mask, path_slice] = self.adv_buf[finish_mask, path_slice] + self.val_buf[self.model_ind, finish_mask, path_slice]
else:
deltas = rews[...,:-1] + self.gamma * vals[..., 1:] - vals[..., :-1]
### calculate (epistemic) iv-weighted advantages
self.adv_buf[finish_mask, path_slice] = discount_cumsum(deltas, self.gamma, self.lam, axis=-1)
#### R_t = A_GAE,t^iv + V_t
self.ret_buf[finish_mask, path_slice] = self.adv_buf[finish_mask, path_slice] + self.val_buf[finish_mask, path_slice]
costs = np.append(self.cost_buf[..., finish_mask, path_slice], last_cval[..., None], axis=-1)
cvals = np.append(self.cval_buf[..., finish_mask, path_slice], last_cval[..., None], axis=-1)
if self.rollout_mode=='iv_gae':
#=====================================================================#
# Inverse Variance Weighted Cost Advantages #
#=====================================================================#
#### only choose single trajectory for deltas
cdeltas = costs[self.model_ind][...,:-1] + self.cost_gamma * cvals[self.model_ind][..., 1:] - cvals[self.model_ind][..., :-1]
### define some utility vectors for lambda and gamma
c_disc_vec = scipy.signal.lfilter([1], [1, float(-self.cost_gamma)], seed) ### create vector of discounts
c_disc_vec_sq = scipy.signal.lfilter([1], [1, float(-(self.cost_gamma**2))], seed) ### create vector of squared discounts
c_lam_vec = scipy.signal.lfilter([1], [1, float(-self.cost_lam*self.cost_gamma**2)], seed) ### create vector of lambdas
### calculate empirial epistemic variance per trajectory and rollout length
c_t_h = disc_cumsum_matrix(self.cost_buf[:, finish_mask, path_slice], discount=self.gamma)
c_t_h[..., t, h] += cvals[..., t_p_h+1]*c_disc_vec[..., h+1]
c_var_t_h = np.var(c_t_h, axis=0) ### epistemic variances per timestep and rollout-length
### create inverse (epsitemic) variance matrix in t and rollout length h
c_weight_mat = np.zeros_like(c_var_t_h)
c_weight_mat[...,t, h] = 1/(c_var_t_h[..., t, h] + EPS) #* disc_vec[..., h+1]+EPS)
### add lambda weighting
c_weight_mat[...,t, h] *= c_lam_vec[..., h]
c_weight_mat[...,Ht, HH] *= 1/(1-self.cost_lam*self.cost_gamma**2+EPS)
### create weight matrix for deltas
cd_weight_mat = discount_cumsum(c_weight_mat, 1.0, 1.0, axis=-1) #### sum from l to H to get the delta-weight-matrix
c_weight_norm = 1/cd_weight_mat[..., 0] #### normalize:
cd_weight_mat[...,t,h] = cd_weight_mat[...,t,h]*c_weight_norm[..., t] #### first entry for every t containts sum of all weights
#### this is a bit peculiar: variances reduce squared per definition in a weighted average, but does that make sense here ?
#### should the uncertainty really be much lower only because there are more elements counted into the weighted average ?
c_ep_var_weight_mat = np.zeros(shape=c_weight_mat.shape)
c_ep_var_weight_mat[...,t,h] = (c_weight_mat[...,t,h]*c_weight_norm[..., t]) * c_var_t_h[...,t,h]
# c_ep_var_weight_mat[...,t,h] = (c_weight_mat[...,t,h]*c_weight_norm[..., t])**2 * c_var_t_h[...,t,h]
### calculate (epistemic) iv-weighted advantages
self.cadv_buf[finish_mask, path_slice] = discount_cumsum_weighted(cdeltas, self.cost_gamma, cd_weight_mat)
self.cret_var_buf[finish_mask, path_slice] = \
discount_cumsum_weighted(np.ones_like(deltas), 1.0, c_ep_var_weight_mat)
self.croll_lengths_buf[finish_mask, path_slice] = \
discount_cumsum_weighted(np.arange(self.ptr), 1.0, c_weight_mat)*c_weight_norm - np.arange(self.ptr)
#### R_t = A_GAE,t^iv + V_t
self.cret_buf[finish_mask, path_slice] = self.cadv_buf[finish_mask, path_slice] + self.cval_buf[self.model_ind, finish_mask, path_slice]
else:
cdeltas = costs[...,:-1] + self.cost_gamma * cvals[..., 1:] - cvals[..., :-1]
### calculate (epistemic) iv-weighted advantages
self.cadv_buf[finish_mask, path_slice] = discount_cumsum(cdeltas, self.cost_gamma, self.cost_lam, axis=-1)
#### R_t = A_GAE,t^iv + V_t
self.cret_buf[finish_mask, path_slice] = self.cadv_buf[finish_mask, path_slice] + self.cval_buf[finish_mask, path_slice]
#=====================================================================#
# Determine Rollout Lengths #
#=====================================================================#
if self.rollout_mode == 'iv_gae':
### alternative b: normalize return variances by first entry
norm_cret_vars = self.cret_var_buf[finish_mask, path_slice]/(self.cret_var_buf[finish_mask, path_slice][...,0:1]+EPS)
norm_ret_vars = self.ret_var_buf[finish_mask, path_slice]/(self.ret_var_buf[finish_mask, path_slice][...,0:1]+EPS)
if self.cares_about_cost:
too_uncertain_mask = np.logical_or(
norm_cret_vars>self.max_uncertainty_c,
norm_ret_vars>self.max_uncertainty_r
)
else:
too_uncertain_mask = norm_ret_vars>self.max_uncertainty_r
horizons = np.argmax(too_uncertain_mask, axis=-1)[...,None]
self.populated_mask[finish_mask,:] *= self.populated_indices[finish_mask,:]<horizons
# mark terminated paths
self.terminated_paths_mask += finish_mask
def get(self):
"""
Returns a list of predetermined values in the buffer.
Returns:
list: [self.obs_buf, self.act_buf, self.adv_buf,
self.cadv_buf, self.ret_buf, self.cret_buf,
self.logp_buf] + values_as_sorted_list(self.pi_info_bufs)
"""
assert self.terminated_paths_mask.all() ### all paths have to be finished
if self.size>0:
# Advantage normalizing trick for policy gradient
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf[self.populated_mask].flatten()) # mpi can only handle 1d data
adv_var = np.var(self.adv_buf[self.populated_mask])
self.adv_buf[self.populated_mask] = (self.adv_buf[self.populated_mask] - adv_mean) / (adv_std + EPS)
# Center, but do NOT rescale advantages for cost gradient
# (since we're not just minimizing but aiming for a specific c)
cadv_mean, _ = mpi_statistics_scalar(self.cadv_buf[self.populated_mask].flatten())
cadv_var = np.var(self.cadv_buf[self.populated_mask])
self.cadv_buf[self.populated_mask] -= cadv_mean
ret_mean = self.ret_buf[self.populated_mask].mean()
cret_mean = self.cret_buf[self.populated_mask].mean()
val_var_mean = self.val_var_buf[..., self.populated_mask].mean()
cval_var_mean = self.cval_var_buf[..., self.populated_mask].mean()
else:
ret_mean = 0
cret_mean = 0
val_var_mean = 0
cval_var_mean = 0
if self.rollout_mode=='iv_gae':
res = [self.obs_buf[self.model_ind], self.act_buf[self.model_ind], self.adv_buf, self.ret_var_buf,
self.cadv_buf, self.cret_var_buf, self.ret_buf, self.cret_buf,
self.logp_buf[self.model_ind], self.val_buf[self.model_ind], self.val_var_buf[self.model_ind],
self.cval_buf[self.model_ind], self.cval_var_buf[self.model_ind], self.cost_buf[self.model_ind]] \
+ [v[self.model_ind] for v in values_as_sorted_list(self.pi_info_bufs)]
else:
res = [self.obs_buf, self.act_buf, self.adv_buf, self.ret_var_buf,
self.cadv_buf, self.cret_var_buf, self.ret_buf, self.cret_buf,
self.logp_buf, self.val_buf, self.val_var_buf,
self.cval_buf, self.cval_var_buf, self.cost_buf] \
+ [v for v in values_as_sorted_list(self.pi_info_bufs)]
# filter out unpopulated entries / finished paths
res = [buf[self.populated_mask] for buf in res]
diagnostics = dict( poolm_batch_size = self.populated_mask.sum(),
poolm_ret_mean=ret_mean,
poolm_cret_mean=cret_mean,
poolm_val_var_mean = val_var_mean,
poolm_cval_var_mean = cval_var_mean,
)
# reset
self.reset()
return res, diagnostics
| StarcoderdataPython |
3292810 | from mountequist.responses.httpis import HttpIs
from mountequist.responses.proxy import Proxy
| StarcoderdataPython |
3208153 | <filename>watson/analyze_tone.py
import requests
import json
def analyze_tone(text):
usern = '<PASSWORD>'
passw = '<PASSWORD>'
#watsonUrl = 'https://gateway.watsonplatform.net/tone-analyzer-beta/api/v3/tone?version=2016-05-18'
watsonUrl='https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone?version=2016-05-19'
headers = {"content-type": "text/plain"}
print(text)
data = text
try:
r = requests.post(watsonUrl, auth=(usern,passw),headers = headers,
data=data)
return r.text
except:
return False
def display_results(data, syl_sec):
array=[]
data = json.loads(str(data))
#print(data)
array.append(syl_sec)
for i in data['document_tone']['tone_categories']:
for j in i['tones']:
array.append({j['tone_name']:(str(round(j['score'] * 100,1)))})
return array
def analyze(data, sec):
#welcome()
#data = input("Enter some text to be analyzed for tone analysis by IBM Watson (Q to quit):\n")
if len(data) >= 1:
if data == 'q'.lower():
exit
num_syl=0
test=data.split()
for word in test:
num_syl+=syllables(word)
syl_sec=(num_syl*60)/sec
results = analyze_tone(data)
if results != False:
#display_results(results)
return display_results(results, syl_sec)
#exit
else:
print("Something went wrong")
else: print("No data was recieved")
#return
#main()
def syllables(word):
count = 0
vowels = 'aeiouy'
word = word.lower().strip(".:;?!")
if word[0] in vowels:
count +=1
for index in range(1,len(word)):
if word[index] in vowels and word[index-1] not in vowels:
count +=1
if word.endswith('e'):
count -= 1
if word.endswith('le'):
count+=1
if count == 0:
count +=1
return count
| StarcoderdataPython |
119515 | import pandas as pd
from pathlib import Path
def process_files(input_dir, output_dir, record_name):
img_dir = output_dir / 'images'
labels_dir = output_dir / 'labels'
record_path = output_dir / record_name
class_path = output_dir / 'classes.names'
img_dir.mkdir(exist_ok=True)
labels_dir.mkdir(exist_ok=True)
copy_images(input_dir, img_dir)
copy_labels(input_dir, labels_dir, record_path, class_path)
def copy_images(input_dir, img_dir):
for input_img_path in input_dir.glob('*png'):
img_path = img_dir / input_img_path.name
print('Writing', img_path)
img_path.write_bytes(input_img_path.read_bytes())
def copy_labels(input_dir, labels_dir, record_path, class_path):
input_labels_path = input_dir / 'labels.csv'
df = pd.read_csv(input_labels_path)
class_names = df['class'].unique()
class_ids = dict(zip(class_names, range(len(class_names))))
df['class_id'] = df['class'].map(class_ids)
# write class ids to file
with open(class_path, 'w') as class_file:
for class_name in class_ids.keys():
class_file.write(f'{class_name}\n')
# write box coordinates to files
with open(record_path, 'w') as record_file:
print('Writing', record_path)
for input_filename, dfg in df.groupby('filename'):
labels_path = labels_dir / Path(input_filename).with_suffix('.txt')
# write all boxes to a single file
with open(labels_path, 'w') as labels_file:
print('Writing', labels_path)
for _, row in dfg.iterrows():
labels_file.write(convert_boxes(row))
# add image filename to record
record_file.write(f'data/images/{input_filename}\n')
def convert_boxes(row):
''' Extract box coordinates from dataframe row '''
class_id = row['class_id']
x_center = (row['xmax'] + row['xmin']) * 0.5 / row['width']
y_center = (row['ymax'] + row['ymin']) * 0.5 / row['height']
width = (row['xmax'] - row['xmin']) / row['width']
height = (row['ymax'] - row['ymin']) / row['height']
return f'{class_id} {x_center} {y_center} {width} {height}\n'
if __name__ == '__main__':
process_files(
input_dir=Path('tensorflow/data/train'),
output_dir=Path('pytorch/data'),
record_name='train.txt'
)
process_files(
input_dir=Path('tensorflow/data/test'),
output_dir=Path('pytorch/data'),
record_name='test.txt'
)
| StarcoderdataPython |
30007 | """Tools for working with Cryptopunk NFTs; this includes utilities for data analysis and image preparation for training machine learning models using Cryptopunks as training data.
Functions:
get_punk(id)
pixel_to_img(pixel_str, dim)
flatten(img)
unflatten(img)
sort_dict_by_function_of_value(d, f)
add_index_to_colors(colors)
"""
import os
import time
import requests
from collections import OrderedDict
from bs4 import BeautifulSoup
from re import sub
import numpy as np
import pandas as pd
from matplotlib.colors import rgb2hex
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
__ROOT_DIR__ = os.path.dirname(os.path.abspath(__file__))
__PUNK_DIR__ = f"{__ROOT_DIR__}/images/training";
def camel_case(string):
'''
Convert string to camelCase
'''
string = string.strip("\n")
string = sub(r"(_|-)+", " ", string).title().replace(" ", "")
return string[0].lower() + string[1:]
def color_str_to_hex(s):
'''
Convert string representation of numpy pixel array
to a string hex value
'''
return rgb2hex([float(x) for x in s[1:-1].split(' ') if x != ''])
def get_punk(id):
'''
Returns a ndarray with loaded image
'''
return mpimg.imread(f'''{__PUNK_DIR__}/punk{"%04d" % id}.png''')
def pixel_to_img(pixel_str, dim = (24,24)):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
only the pixel's color.
'''
(x,y) = dim
c = np.fromstring(pixel_str[1:-1], float, sep=' ')
return np.full((x, y, 4), c)
def pixel_to_ximg(pixel_strs, dim = (24,24), n=3 ):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
a matrix of size n*n
'''
(x,y) = (dim[0]//n, dim[1]//n)
m = []
for i in range(0,n):
l=[]
for j in range(0,n):
img = np.full((x, y, 4),
np.fromstring(pixel_strs[i*n + j][1:-1], float, sep=' '))
l.append(img)
m.append(np.concatenate(l, axis=1))
return np.concatenate(m, axis=0)
def flatten(img):
'''
Convert (x,y,z) array containing a pixel in z-dimension
to an (x,y) array with str values for each (i,j)
the intention is to make this easier to work with in ML
training.
'''
return np.array([[str(c) for c in row]
for row in img])
def unflatten(img):
'''
Return a flattend image to valid .png format for display
'''
return np.array([[np.fromstring(c[1:-1], float, sep=' ')
for c in row] for row in img])
def sort_dict_by_function_of_value(d, f = len):
sorted_tuples = sorted(d.items(),
key=lambda item: len(item[1]))
return {k: v for k, v in sorted_tuples}
def add_index_to_colors(colors):
'''
Add a unique, sequential index to the entry for
each color. returned dictionary will be of form
{`color_string`: { `"id": `int`, "punkIds" : `list[int`}}
'''
i=0
d={}
for k in colors.keys():
d[k] = {
'id' : i,
'punkIds' : colors[k]
}
i=i+1
return d
def get_attr_dict():
'''
Read the attr csv and populate a default dict
'''
d=OrderedDict()
with open(f"{__ROOT_DIR__}/data/list_attr_punx.csv") as f:
for attr in f.read().strip('\n').split(','):
d[attr]=-1
return d
def get_punk_attrs(id):
'''
Retrieve `id` cryptopunk from larvalabs.com,
parse HTML to extract type and attribute list
to return list of attributes
'''
typeClass="col-md-10 col-md-offset-1 col-xs-12"
punk_page=requests.get(f"https://www.larvalabs.com/cryptopunks/details/{id}")
if(punk_page.status_code != 200):
print(punk_page.status_code)
return {}
punk_html=punk_page.text
soup = BeautifulSoup(punk_html, 'html.parser')
details = soup.find(id="punkDetails")
punkType = camel_case(details.find(class_=typeClass).find('a').contents[0])
attrs=[punkType]
attrTags = details.find(class_ = "row detail-row")
for attrTag in attrTags.find_all('a'):
attrs.append(camel_case(attrTag.contents[0]))
return attrs
def get_punk_dict(id):
'''
Retrieve a punk page, pull type and attributes
from HTML and return a dictionary of attribute to
(-1,1) mapping where 1 is truthy for existence of
attribute
'''
od = {k:__ATTR_DICT__[k] for k in __ATTR_DICT__}
attrs = get_punk_attrs(id)
for attr in attrs:
od[attr]=1
return od
def get_punks(start, end):
'''
Retrieve punks in range `start` to `end`
'''
punks={}
for id in range(start, end):
print(id)
time.sleep(3.3)
punks[id] = get_punk_dict(id)
return punks
def plot_in_grid(n, images, predictions, labels):
'''
Plot `images` in an n*n grid with
prediction and labels as header
'''
(x,y) = (n,n)
fig = plt.figure(figsize=(9,14))
i=0
for i in range(1,(x*y)+1):
fig.add_subplot(x, y, i)
plt.imshow(images[i])
plt.title(f"{predictions[i][0]},{labels[i][0]}")
plt.axis('off')
i=i+1
return fig
| StarcoderdataPython |
3332124 | <filename>AdventOfCode2021/Day14/Day14.py<gh_stars>0
def part1():
code = open("input.txt").read().split("\n\n")[0]
keys = {i.split(" -> ")[0]:i.split(" -> ")[1] for i in open("input.txt").read().split("\n\n")[1].split("\n")}
string = {}
for i in range(0,len(code)-1):
string[code[i]+code[i+1]] = 1 if code[i]+code[i+1] not in string else string[code[i]+code[i+1]]+1
for i in range(11):
copy = dict(string)
for key,value in copy.items():
if (value > 0 and key in keys):
string[key[0]+keys[key]] = value if key[0]+keys[key] not in string else string[key[0]+keys[key]] + value
string[keys[key]+key[1]] = value if keys[key]+key[1] not in string else string[keys[key]+key[1]] + value
string[key] -= value
occurences = {}
for key, value in copy.items():
occurences[key[0]] = value if key[0] not in occurences else occurences[key[0]]+value
occurences[key[1]] = value if key[1] not in occurences else occurences[key[1]]+value
occurences[code[0]] += 1
occurences[code[-1]] += 1
for key in occurences.keys():
occurences[key] //= 2
return(max(occurences.values()) - min(occurences.values()))
def part2():
code = open("input.txt").read().split("\n\n")[0]
keys = {i.split(" -> ")[0]:i.split(" -> ")[1] for i in open("input.txt").read().split("\n\n")[1].split("\n")}
string = {}
for i in range(0,len(code)-1):
string[code[i]+code[i+1]] = 1 if code[i]+code[i+1] not in string else string[code[i]+code[i+1]]+1
for i in range(41):
copy = dict(string)
for key,value in copy.items():
if (value > 0 and key in keys):
string[key[0]+keys[key]] = value if key[0]+keys[key] not in string else string[key[0]+keys[key]] + value
string[keys[key]+key[1]] = value if keys[key]+key[1] not in string else string[keys[key]+key[1]] + value
string[key] -= value
occurences = {}
for key, value in copy.items():
occurences[key[0]] = value if key[0] not in occurences else occurences[key[0]]+value
occurences[key[1]] = value if key[1] not in occurences else occurences[key[1]]+value
occurences[code[0]] += 1
occurences[code[-1]] += 1
for key in occurences.keys():
occurences[key] //= 2
return(max(occurences.values()) - min(occurences.values()))
print(f"answer to part1: {part1()}")
print(f"answer to part2: {part2()}")
| StarcoderdataPython |
3247929 | <gh_stars>10-100
import os
from pkg_resources import resource_filename
MISSING_LIBRARY_ERROR = """
Could not find the kaldi-io shared object (libtf_kaldi_io.so) on the system
search paths, on paths specified in the LD_LIBRARY_PATH environment variable,
or by importing package data from the 'tf_kaldi_io' package.
Please ensure that libtf_kaldi_io.so is built and available, or that the
'tf_kaldi_io' package is installed and available.
"""
# Functions and values included when you run `from tf_kaldi_io import *`
__all__ = ["KaldiReaderDataset", "KaldiDataset"]
def find_shared_library(library_name):
library_paths = {}
# Next, traverse LD_LIBRARY_PATH.
for directory in os.environ.get("LD_LIBRARY_PATH", "").split(":"):
if os.path.isdir(directory):
files = os.listdir(directory)
for filename in files:
# Rely on filename to search for shared objects
if ".so." in filename or filename.endswith(".so"):
library_paths[filename] = os.path.join(directory, filename)
# Filter output to library we're looking for
object_name = "lib{}.so".format(library_name)
paths = {name: path for (name, path) in library_paths.items()
if name.startswith(object_name)}
# Return None if the list of paths for this library is empty
if paths:
return paths
else:
return None
def find_kaldi_io_library():
"""Check that libtf_kaldi_io.so can be found. If it can, ensure that
Tensorflow's tf.load_op_library() can find it by potentially adding it to
the LD_LIBRARY_PATH as necessary.
If it is not found, raise a helpful and informative error."""
try:
libtf_kaldi_io = resource_filename(__package__, "libtf_kaldi_io.so")
found = os.path.isfile(libtf_kaldi_io)
except ImportError:
# If we can't import tf_kaldi_io, definitely can't get its resources.
found = False
if found:
# If we have a libtf_kaldi_io.so from the tf_kaldi_io Python package,
# then ensure it gets on the path. We stick it on the front of the
# path, because it would be confusing if a tf_kaldi_io package used a
# libtf_kaldi_io.so that didn't correspond to it, just because the user
# happened to have a custom LD_LIBRARY_PATH set.
old_ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
lib_dir = os.path.dirname(libtf_kaldi_io)
os.environ["LD_LIBRARY_PATH"] = lib_dir + ":" + old_ld_library_path
# Ensure that at this point, no matter what, Tensorflow should be able to
# load libtf_kaldi_io.so as an op library.
kaldi_io_lib_paths = find_shared_library("tf_kaldi_io")
if kaldi_io_lib_paths:
return kaldi_io_lib_paths["libtf_kaldi_io.so"]
else:
raise RuntimeError(MISSING_LIBRARY_ERROR)
# Find the path to the KaldiIO shared library.
libtf_kaldi_io_path = find_kaldi_io_library()
# Load KaldiIO shared library. There is no documentation on how
# tf.load_op_library finds its shared libraries, but looking at the source
# indicates that the string passed to it is simply forwarded to dlopen(), so we
# can pass it an absolute path to libtf_kaldi_io.so.
import tensorflow as tf
kaldi_reader_dataset_module = tf.load_op_library(libtf_kaldi_io_path)
from . import tf_kaldi_io
# aliases for backwards compatibility
KaldiReaderDataset = tf_kaldi_io.KaldiReaderDataset
from . import tf_kaldi_dataset
KaldiDataset = tf_kaldi_dataset.KaldiDataset
| StarcoderdataPython |
986 | <reponame>HaidongHe/rqalpha
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
from datetime import datetime
import logbook
from logbook import Logger, StderrHandler
from rqalpha.utils.py2 import to_utf8
logbook.set_datetime_format("local")
# patch warn
logbook.base._level_names[logbook.base.WARNING] = 'WARN'
__all__ = [
"user_log",
"system_log",
"user_system_log",
]
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def user_std_handler_log_formatter(record, handler):
from rqalpha.environment import Environment
try:
dt = Environment.get_instance().calendar_dt.strftime(DATETIME_FORMAT)
except Exception:
dt = datetime.now().strftime(DATETIME_FORMAT)
log = "{dt} {level} {msg}".format(
dt=dt,
level=record.level_name,
msg=to_utf8(record.message),
)
return log
user_std_handler = StderrHandler(bubble=True)
user_std_handler.formatter = user_std_handler_log_formatter
def formatter_builder(tag):
def formatter(record, handler):
log = "[{formatter_tag}] [{time}] {level}: {msg}".format(
formatter_tag=tag,
level=record.level_name,
msg=to_utf8(record.message),
time=record.time,
)
if record.formatted_exception:
log += "\n" + record.formatted_exception
return log
return formatter
# loggers
# 用户代码logger日志
user_log = Logger("user_log")
# 给用户看的系统日志
user_system_log = Logger("user_system_log")
# 用于用户异常的详细日志打印
user_detail_log = Logger("user_detail_log")
# user_detail_log.handlers.append(StderrHandler(bubble=True))
# 系统日志
system_log = Logger("system_log")
basic_system_log = Logger("basic_system_log")
# 标准输出日志
std_log = Logger("std_log")
def init_logger():
system_log.handlers = [StderrHandler(bubble=True)]
basic_system_log.handlers = [StderrHandler(bubble=True)]
std_log.handlers = [StderrHandler(bubble=True)]
user_log.handlers = []
user_system_log.handlers = []
def user_print(*args, **kwargs):
sep = kwargs.get("sep", " ")
end = kwargs.get("end", "")
message = sep.join(map(str, args)) + end
user_log.info(message)
init_logger()
| StarcoderdataPython |
3327042 | from time import time
def bouquets(narcissus_price, tulip_price, rose_price, summ):
count = 0
args = sorted([narcissus_price, tulip_price, rose_price])
amounts = {
'cheap': [None, args[0]],
'medium': [None, args[1]],
'expensive': [None, args[2]]
}
del args
for i in range(1, int(summ // amounts['cheap'][1] + 1), 2):
amounts['cheap'][0] = i
amounts['medium'][0] = 0
amounts['expensive'][0] = 0
for j in range(int((i + 2) / 2.0 * (i + 1))):
cost = amounts['cheap'][0] * amounts['cheap'][1] + amounts['medium'][0] * amounts['medium'][1] + amounts['expensive'][0] * amounts['expensive'][1]
if cost <= summ:
count += 1
elif not (amounts['expensive'][0] and amounts['cheap'][0]):
break
else:
amounts['medium'][0] += amounts['expensive'][0] + 1
amounts['expensive'][0] = 0
amounts['cheap'][0] -= 1
continue
if amounts['medium'][0]:
amounts['medium'][0] -= 1
amounts['expensive'][0] += 1
elif amounts['cheap'][0]:
amounts['medium'][0] = amounts['expensive'][0] + 1
amounts['expensive'][0] = 0
amounts['cheap'][0] -= 1
return count
while True:
try:
num1 = float(input('Num1: '))
num2 = float(input('Num2: '))
num3 = float(input('Num3: '))
num4 = float(input('Num4: '))
start = time()
print(bouquets(num1, num2, num3, num4))
print(time() - start)
except ValueError:
break | StarcoderdataPython |
3275866 | <reponame>hust201010701/OneKeyUploadImage
from qiniu import *
import qiniu.config
from win32con import *
import win32clipboard
import ctypes
from ctypes.wintypes import *
import sys
import time
import os
from PIL import Image
from qiniu.services.storage.upload_progress_recorder import UploadProgressRecorder
class BITMAPFILEHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('bfType', WORD), # file type ("BM")
('bfSize', DWORD), # file size in bytes
('bfReserved1', WORD), # must be zero
('bfReserved2', WORD), # must be zero
('bfOffBits', DWORD), # byte offset to the pixel array
]
SIZEOF_BITMAPFILEHEADER = ctypes.sizeof(BITMAPFILEHEADER)
class BITMAPINFOHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPLanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
SIZEOF_BITMAPINFOHEADER = ctypes.sizeof(BITMAPINFOHEADER)
def GetNowTime():
return time.strftime("%Y-%m-%d_%H_%M_%S",time.localtime(time.time()))
def uploadImage(localfile,access_key,secret_key,bucket_name):
q = Auth(access_key, secret_key)
#服务器上的文件名
key = localfile
token = q.upload_token(bucket_name, key, 3600)
#要上传文件的本地路径
put_file(token, key, localfile)
ret, info = put_file(token, key, localfile,upload_progress_recorder= UploadProgressRecorder())
print(info)
return key
def set_clipboard(text):
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(text.encode('utf-8'),
win32clipboard.CF_TEXT)
win32clipboard.CloseClipboard()
def upload():
win32clipboard.OpenClipboard()
try:
if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_DIB):
data = win32clipboard.GetClipboardData(win32clipboard.CF_DIB)
else:
print('clipboard does not contain an image in DIB format')
sys.exit(1)
finally:
win32clipboard.CloseClipboard()
bmih = BITMAPINFOHEADER()
ctypes.memmove(ctypes.pointer(bmih), data, SIZEOF_BITMAPINFOHEADER)
#这个验证有问题,会让正常的图片无法通过
#if bmih.biCompression != BI_BITFIELDS: # RGBA?
#print('insupported compression type {}'.format(bmih.biCompression))
#sys.exit(1)
bmfh = BITMAPFILEHEADER()
ctypes.memset(ctypes.pointer(bmfh), 0, SIZEOF_BITMAPFILEHEADER) # zero structure
bmfh.bfType = ord('B') | (ord('M') << 8)
bmfh.bfSize = SIZEOF_BITMAPFILEHEADER + len(data) # file size
SIZEOF_COLORTABLE = 0
bmfh.bfOffBits = SIZEOF_BITMAPFILEHEADER + SIZEOF_BITMAPINFOHEADER + SIZEOF_COLORTABLE
#webp格式加载速度快,需要转化为webp
bmp_filename = '%s.png'%GetNowTime()
with open(bmp_filename, 'wb') as bmp_file:
bmp_file.write(bmfh)
bmp_file.write(data)
bmp_file.close()
#转换格式,经测试显示效果无异,webp所占空间仅为png的1% ,截图在这里:
im = Image.open(bmp_filename)
f,ext = os.path.splitext(bmp_filename)
webp_path = "%s.webp"%f
im.save(webp_path, "WEBP")
access_key = "<KEY>"
secret_key = "<KEY>"
bucket_name = "xxxxxxxx"
domain_name = "xxxxxxxxxxxxxxxxxxxxxxx"
try:
key = uploadImage(webp_path,access_key,secret_key,bucket_name)
except Exception as e:
print("发生错误:%s"%e)
else:
#print(os.getcwd())
os.remove("%s\%s"%(os.getcwd(),bmp_filename))
os.remove("%s\%s"%(os.getcwd(),webp_path))
#800表示如果图片长度超过800,则长度变为800,宽度自动缩放
set_clipboard(""%(domain_name,key))
if __name__ == "__main__":
upload()
| StarcoderdataPython |
158030 | '''
Exercise 1:
1. Write a recursive function print_all(numbers) that prints all the
elements of list of integers, one per line (use no while loops or for loops).
The parameters numbers to the function is a list of int.
2. Same problem as the last one but prints out the elements in reverse order.
'''
#printing all in same order
def print_all(number):
if len(number) <= 1:
print (number[0])
else:
print (number[0])
return print_all(number[1:])
print_all([1,2,3,4,5,6,7,8])
##############################################
print()
##############################################
#printing all in reverse order
def print_all_reverse(number):
if len(number) <= 1:
print (number[-1])
else:
print (number[-1])
return print_all(number[:-1])
print_all_reverse([1,2,3,4,5,6,7,8]) | StarcoderdataPython |
1737955 | <reponame>yellowb/ml-sample
import heapq
""" self encapsulated priority queue """
class PriorityQueue:
def __init__(self):
self._queue = []
self._count = 0
def _push(self, priority, item):
heapq.heappush(self._queue,
(-priority, self._count, item)) # the first two elements in tuple are to be compared.
self._count = self._count + 1
def push(self, item):
self._push(item.price, item)
def pop(self):
return heapq.heappop(self._queue)[2]
def size(self):
return len(self._queue)
class Item:
def __init__(self, name, price):
self.name = name
self.price = price
def __str__(self) -> str:
return 'name = %s, price = %s' % (self.name, self.price)
item_1 = Item('Apple', 10)
item_2 = Item('Orange', 6)
item_3 = Item('Banana', 3)
item_4 = Item('Pig', 30)
item_5 = Item('Chicken', 25)
# order by price from high to low
pq = PriorityQueue()
pq.push(item_1)
pq.push(item_2)
pq.push(item_3)
pq.push(item_4)
pq.push(item_5)
for i in range(pq.size()):
print(pq.pop())
| StarcoderdataPython |
1789364 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
import six
from datetime import timedelta, datetime
import json
import adal
import dateutil.parser
import requests
from Kqlmagic.my_aad_helper import _MyAadHelper, ConnKeysKCSB
from Kqlmagic.kql_client import KqlQueryResponse, KqlError
from Kqlmagic.constants import Constants, ConnStrKeys
from Kqlmagic.version import VERSION
class Kusto_Client(object):
"""
Kusto client wrapper for Python.
KustoClient works with both 2.x and 3.x flavors of Python. All primitive types are supported.
KustoClient takes care of ADAL authentication, parsing response and giving you typed result set,
and offers familiar Python DB API.
Test are run using nose.
Examples
--------
To use KustoClient, you can choose betwen two ways of authentication.
For the first option, you'll need to have your own AAD application and know your client credentials (client_id and client_secret).
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> kusto_client = KustoClient(kusto_cluster, client_id, client_secret='your_app_secret')
For the second option, you can use KustoClient's client id and authenticate using your username and password.
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> client_id = 'e07cf1fb-c6a6-4668-b21a-f74731afa19a'
>>> kusto_client = KustoClient(kusto_cluster, client_id, username='your_username', password='<PASSWORD>')"""
_DEFAULT_CLIENTID = "db662dc1-0cfe-4e1c-a843-19a68e65be58" # kusto client app, (didn't find app name ?)
# _DEFAULT_CLIENTID = "8430759c-5626-4577-b151-d0755f5355d8" # kusto client app, don't know app name
_MGMT_ENDPOINT_VERSION = "v1"
_QUERY_ENDPOINT_VERSION = "v2"
_MGMT_ENDPOINT_TEMPLATE = "{0}/{1}/rest/mgmt"
_QUERY_ENDPOINT_TEMPLATE = "{0}/{1}/rest/query"
_DATA_SOURCE_TEMPLATE = "https://{0}.kusto.windows.net"
_WEB_CLIENT_VERSION = VERSION
def __init__(self, conn_kv:dict):
"""
Kusto Client constructor.
Parameters
----------
kusto_cluster : str
Kusto cluster endpoint. Example: https://help.kusto.windows.net
client_id : str
The AAD application ID of the application making the request to Kusto
client_secret : str
The AAD application key of the application making the request to Kusto.
if this is given, then username/password should not be.
username : str
The username of the user making the request to Kusto.
if this is given, then password must follow and the client_secret should not be given.
password : str
The password matching the username of the user making the request to Kusto
authority : 'microsoft.com', optional
In case your tenant is not microsoft please use this param.
"""
cluster_name = conn_kv[ConnStrKeys.CLUSTER]
data_source = cluster_name if cluster_name.find("://") >= 0 else self._DATA_SOURCE_TEMPLATE.format(cluster_name)
self._mgmt_endpoint = self._MGMT_ENDPOINT_TEMPLATE.format(data_source, self._MGMT_ENDPOINT_VERSION)
self._query_endpoint = self._QUERY_ENDPOINT_TEMPLATE.format(data_source, self._QUERY_ENDPOINT_VERSION)
self._aad_helper = _MyAadHelper(ConnKeysKCSB(conn_kv, data_source), self._DEFAULT_CLIENTID) if conn_kv.get(ConnStrKeys.ANONYMOUS) is None else None
def execute(self, kusto_database, kusto_query, accept_partial_results=False, **options):
""" Execute a simple query or management command
Parameters
----------
kusto_database : str
Database against query will be executed.
query : str
Query to be executed
accept_partial_results : bool
Optional parameter. If query fails, but we receive some results, we consider results as partial.
If this is True, results are returned to client, even if there are exceptions.
If this is False, exception is raised. Default is False.
options["timeout"] : float, optional
Optional parameter. Network timeout in seconds. Default is no timeout.
"""
if kusto_query.startswith("."):
endpoint_version = self._MGMT_ENDPOINT_VERSION
endpoint = self._mgmt_endpoint
else:
endpoint_version = self._QUERY_ENDPOINT_VERSION
endpoint = self._query_endpoint
request_payload = {
"db": kusto_database,
"csl": kusto_query,
}
request_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json; charset=utf-8",
"x-ms-client-version": "{0}.Python.Client:{1}".format(Constants.MAGIC_CLASS_NAME, self._WEB_CLIENT_VERSION),
"x-ms-client-request-id": "{0}.execute;{1}".format(Constants.MAGIC_CLASS_NAME, str(uuid.uuid4())),
}
if self._aad_helper is not None:
request_headers["Authorization"] = self._aad_helper.acquire_token(**options)
request_headers["Fed"] = "True"
response = requests.post(endpoint, headers=request_headers, json=request_payload, timeout=options.get("timeout"))
if response.status_code != requests.codes.ok: # pylint: disable=E1101
raise KqlError([response.text], response)
kql_response = KqlQueryResponse(response.json(), endpoint_version)
if kql_response.has_exceptions() and not accept_partial_results:
raise KqlError(kql_response.get_exceptions(), response, kql_response)
return kql_response
| StarcoderdataPython |
3221346 | from time import time
from multiprocessing.pool import Pool
NUM_LIMIT = 1000000
start_time = time()
def cal_uv(i):
print(i)
result_list =[]
for u in range(1, i):
v = i/u
if v < 1:
continue
if not (u+v) % 4:
z_4 = 3*v-u
if z_4> 0 and not z_4 % 4:
result_list.append(1)
if sum(result_list)>10:
return 11
return sum(result_list)
def check(i):
if cal_uv(i) == 10:
return 1
return 0
# test
assert cal_uv(1155) == 10
assert cal_uv(20) == 1
p = Pool(processes=8)
num_range = range(1, NUM_LIMIT+1)
result = p.map(check, num_range)
p.close()
p.join()
print(sum(result))
print('time:'+str(time()-start_time))
# slow but correct maybe
| StarcoderdataPython |
40987 | <reponame>mampilly/backend-global<gh_stars>0
'''Rate limiting via Redis'''
import logging
from datetime import timedelta
from redis import Redis
from app.core.database.cache import get_redis_connection
from app.exceptions.application_exception import exception
def rate_request(key, limit, period):
"""Rate request wrapping redis connection"""
return request_is_limited(get_redis_connection(), key, limit, period)
def request_is_limited(r: Redis, key: str, limit: int, period: timedelta):
"""Rate Limiting"""
if r.setnx(key, limit):
r.expire(key, int(period.total_seconds()))
bucket_val = r.get(key)
if bucket_val and int(bucket_val) > 0:
r.decrby(key, 1)
return False
return True
def rate_limit(auth, limit, period):
"""Rate limit main function"""
if rate_request(auth, limit, timedelta(seconds=period)):
raise exception.too_many_rquests()
logging.info('✅ Request is allowed')
return True
| StarcoderdataPython |
82929 | <filename>python/controller.py<gh_stars>0
from distutils.log import debug
from threading import Thread
from flask import Flask, jsonify, send_file
from time import sleep
from dataclasses import dataclass, field
from datetime import datetime, timedelta
import serial
import uuid
from collections import deque
import sys
import enum
import struct
import time
from obus import Message, ModuleAddress
INFO_ROUND_DURATION = timedelta(seconds=3)
GAMESTATE_UPDATE_INTERVAL = timedelta(seconds=0.5)
@enum.unique
class Gamestate(enum.Enum):
INACTIVE = 0
INFO = 1
DISCOVER = 2
GAME = 3
GAMEOVER = 4
@dataclass
class PuzzleState:
'''State keeping object for puzzle and needy modules'''
strike_amount: int = 0
solved: bool = False
@dataclass
class SharedWebToSerial:
game_duration: timedelta = timedelta(seconds=60)
max_allowed_strikes: int = 3
seed: int = 1
blocked_modules: list[ModuleAddress] = field(default_factory=list)
start_game: bool = False
restart_game: bool = False
@dataclass
class SharedSerialToWeb:
gamestate: Gamestate = Gamestate.INACTIVE
info_round_start: datetime = None
discover_round_start: datetime = None
game_start: datetime = None
game_stop: datetime = None
last_state_update: datetime = None
registered_modules: dict[ModuleAddress, PuzzleState] = field(default_factory=dict)
@dataclass
class DebugShared:
messages: deque
last_message_index: int
# Keep this the same as max_messages on the client!
max_message_cache = 200
debug_shared = DebugShared(deque(maxlen=max_message_cache), -1)
app = Flask(__name__)
server_id = uuid.uuid4()
print("Server ID: ", server_id)
web_to_serial = SharedWebToSerial()
serial_to_web = SharedSerialToWeb()
def parse_can_line(ser, debug_shared) -> Message:
if not ser.in_waiting:
return None
line = ser.read(12)
if len(line) == 12:
if line == b'BEGIN START\n' or line[0] > 0b111:
return None
sender = (int(line[0]) << 8) + int(line[1])
size = int(line[2])
message = line[3:3+size]
obj = Message(message, sender, datetime.now())
debug_shared.messages.append(obj)
debug_shared.last_message_index += 1
return obj
return None
def send_message(ser, msg, debug_shared) -> None:
debug_shared.messages.append(msg)
debug_shared.last_message_index += 1
# we send the payload padded with null-bytes, but these don't actually get sent
packed = struct.pack('>HB8s', msg.module_address().as_binary(), len(msg.payload), msg.payload)
ser.write(packed + b'\n')
def calculate_puzzle_modules_left(serial_to_web) -> int:
return sum(address.is_puzzle() and not state.solved for address, state in serial_to_web.registered_modules.items())
def calculate_strikes(serial_to_web) -> int:
return sum(state.strike_amount for state in serial_to_web.registered_modules.values())
def serial_controller(serialport, web_to_serial, serial_to_web, debug_shared):
with serial.Serial(serialport, 115200, timeout=0.05) as ser:
serial_to_web.gamestate = Gamestate.INACTIVE
# TODO send message here to get all modules to stop talking and reset
ser.reset_input_buffer()
time.sleep(5)
while True:
if serial_to_web.gamestate == Gamestate.INACTIVE:
send_message(ser, Message.create_controller_infostart(web_to_serial.seed),debug_shared)
serial_to_web.gamestate = Gamestate.INFO
serial_to_web.info_round_start = datetime.now()
serial_to_web.registered_modules = {}
elif serial_to_web.gamestate == Gamestate.INFO:
parse_can_line(ser, debug_shared) # throw away, TODO keep this and display it
if datetime.now() - serial_to_web.info_round_start > INFO_ROUND_DURATION:
serial_to_web.gamestate = Gamestate.DISCOVER
send_message(ser, Message.create_controller_hello(),debug_shared)
elif serial_to_web.gamestate == Gamestate.DISCOVER:
if web_to_serial.start_game:
web_to_serial.start_game = False
serial_to_web.game_start = datetime.now()
serial_to_web.last_state_update = datetime.now()
serial_to_web.gamestate = Gamestate.GAME
send_message(ser, Message.create_controller_gamestart(web_to_serial.game_duration, 0, web_to_serial.max_allowed_strikes, len(serial_to_web.registered_modules)),debug_shared)
msg = parse_can_line(ser, debug_shared)
if msg is None:
continue
puzzle_address = msg.get_puzzle_register()
if puzzle_address is None:
continue
if puzzle_address in web_to_serial.blocked_modules:
# this is blocked puzzle module, don't ack it
continue
serial_to_web.registered_modules[puzzle_address] = PuzzleState()
send_message(ser, Message.create_controller_ack(msg.module_address()),debug_shared)
elif serial_to_web.gamestate == Gamestate.GAME:
# React to puzzle strike / solve
msg = parse_can_line(ser, debug_shared)
if msg is None:
pass
elif (strike_details := msg.get_puzzle_strike_details()):
strike_address, strike_amount = strike_details
serial_to_web.registered_modules[strike_address].strike_amount = strike_amount
elif (solved_puzzle_address := msg.get_puzzle_solved()):
serial_to_web.registered_modules[solved_puzzle_address].solved = True
# Handle strikeout / timeout / solve
time_left = web_to_serial.game_duration - (datetime.now() - serial_to_web.game_start)
puzzle_modules_left = calculate_puzzle_modules_left(serial_to_web)
total_strikes = calculate_strikes(serial_to_web)
if time_left.total_seconds() <= 0:
# Pass zero timedelta, because time left can't be negative in the CAN protocol
# Timeout case is also handled first, so that in other cases we know there's time left
send_message(ser, Message.create_controller_timeout(timedelta(), puzzle_modules_left, web_to_serial.max_allowed_strikes, puzzle_modules_left),debug_shared)
serial_to_web.gamestate = Gamestate.GAMEOVER
elif total_strikes > web_to_serial.max_allowed_strikes:
send_message(ser, Message.create_controller_strikeout(time_left, puzzle_modules_left, web_to_serial.max_allowed_strikes, puzzle_modules_left),debug_shared)
serial_to_web.gamestate = Gamestate.GAMEOVER
elif puzzle_modules_left == 0:
send_message(ser, Message.create_controller_solved(time_left, puzzle_modules_left, web_to_serial.max_allowed_strikes, puzzle_modules_left),debug_shared)
serial_to_web.gamestate = Gamestate.GAMEOVER
if serial_to_web.gamestate == Gamestate.GAMEOVER:
serial_to_web.game_stop = datetime.now()
continue
if datetime.now() - serial_to_web.last_state_update > GAMESTATE_UPDATE_INTERVAL:
serial_to_web.last_state_update = datetime.now()
# Send state update with known-good checked values
send_message(ser, Message.create_controller_state(time_left, total_strikes, web_to_serial.max_allowed_strikes, puzzle_modules_left),debug_shared)
elif serial_to_web.gamestate == Gamestate.GAMEOVER:
if web_to_serial.restart_game:
web_to_serial.restart_game = False
serial_to_web.gamestate = Gamestate.INACTIVE
@app.route('/status.json')
def status():
status_dict = {
'gamestate': serial_to_web.gamestate.name,
'server_id': server_id
}
if serial_to_web.gamestate == Gamestate.GAME:
# Send the time left to avoid time syncronisation issues between server and client
# Client can then extrapolate if it wants to
status_dict['timeleft'] = (web_to_serial.game_duration - (datetime.now() - serial_to_web.game_start)).total_seconds()
elif serial_to_web.gamestate == Gamestate.GAMEOVER:
status_dict['timeleft'] = max(0, (web_to_serial.game_duration - (serial_to_web.game_stop - serial_to_web.game_start)).total_seconds())
if serial_to_web.gamestate in (Gamestate.DISCOVER, Gamestate.GAME, Gamestate.GAMEOVER):
status_dict['puzzles'] = [
{'address': address.as_binary(), 'solved': state.solved if address.is_puzzle() else None, 'strikes': state.strike_amount}
for address, state
in sorted(serial_to_web.registered_modules.items(), key=(lambda kv: kv[0].as_binary()))
]
status_dict['max_allowed_strikes'] = web_to_serial.max_allowed_strikes
status_dict['game_duration'] = web_to_serial.game_duration.total_seconds()
print(status_dict)
return jsonify(status_dict)
@app.route('/start')
def start():
if serial_to_web.gamestate == Gamestate.DISCOVER:
web_to_serial.start_game = True
return 'OK'
return 'Wrong gamestage'
@app.route('/restart')
def restart():
if serial_to_web.gamestate == Gamestate.GAMEOVER:
web_to_serial.restart_game = True
return 'OK'
return 'Wrong gamestage'
@app.route('/<last_received>/api.json')
def api(last_received):
last_received = int(last_received)
messages = list(m.serialize() for m in debug_shared.messages)
if last_received >= debug_shared.last_message_index - len(debug_shared.messages):
messages = messages[len(debug_shared.messages) - (debug_shared.last_message_index - last_received):]
return jsonify({"server_id": server_id, "newest_msg": debug_shared.last_message_index, "messages": messages})
@app.route('/')
def index():
return send_file('static/controller.html')
@app.route('/debugger')
def debugger():
return send_file('static/debugger.html')
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python3 controller.py [serial port]")
sys.exit()
if sys.argv[1] != 'mock':
thread = Thread(target=serial_controller, args=(sys.argv[1], web_to_serial, serial_to_web, debug_shared))
thread.start()
app.run(debug=False, host='0.0.0.0', port=8080)
| StarcoderdataPython |
181601 | <filename>weasel/widgets/image_sliders.py
__all__ = ['ImageSliders']
import pandas as pd
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import (
QWidget, QHBoxLayout, QPushButton,
)
from PyQt5.QtGui import QIcon
from .. import widgets as widgets
class ImageSliders(QWidget):
"""Widget with sliders to navigate through a DICOM series."""
valueChanged = pyqtSignal()
def __init__(self, series=None, image=None, dimensions=[]):
super().__init__()
if dimensions == []:
self.sliderTags = ["AcquisitionTime", "SliceLocation"]
else:
self.sliderTags = dimensions
self._setWidgets()
self._setLayout()
if series is not None:
self.setData(series, image)
def _setWidgets(self):
self.slidersButton = QPushButton()
self.slidersButton.setToolTip("Display Multiple Sliders")
self.slidersButton.setCheckable(True)
self.slidersButton.setIcon(QIcon(widgets.icons.slider_icon))
self.slidersButton.clicked.connect(self._slidersButtonClicked)
self.instanceSlider = widgets.LabelSlider("", range(1))
self.instanceSlider.valueChanged.connect(self._mainSliderValueChanged)
self.sliders = [self.instanceSlider]
def _setLayout(self):
self.layout = QHBoxLayout()
self.layout.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.layout.setSpacing(2)
self.layout.setContentsMargins(0,0,0,0)
self.layout.addWidget(self.slidersButton)
self.layout.addWidget(self.instanceSlider)
self.setStyleSheet("background-color: white")
self.setLayout(self.layout)
def setData(self, series=None, image=None):
self.series = series
self._readDataFrame()
self._setSliderValueLists()
self.image = image
if image is None:
if self.series is not None:
self.image = self.series.children(0)
self._setSliderValues()
self._sliderValueChanged()
def setSeries(self, series): # Obsolete?
self.series = series
self._readDataFrame()
self._setSliderValueLists()
self.image = self.series.children(0)
self.setImage(self.image)
def setImage(self, image): # Obsolete?
self.image = image
self._setSliderValues()
self._sliderValueChanged()
def getSeries(self):
return self.series
def getImage(self):
return self.image
def _setSliderValueLists(self):
for slider in self._activeSliders:
values = self.dataFrame[slider.label].unique().tolist()
values.sort()
slider.setValues(values)
def _readDataFrame(self):
"""Read the dataframe for the series.
Drop tags that are not present in every instance.
Drop tags that appear only once.
"""
# Add all default tags in the registry and get values
tags = self.sliderTags.copy()
tags = list(set(tags + list(self.series.folder.dataframe)))
if self.series is None:
self.dataFrame = pd.DataFrame([], index=[], columns=tags)
else:
# If all required tags are in the register,
# then just extract the register for the series;
# else read the data from disk.
if set(tags) == set(self.series.folder.dataframe):
self.dataFrame = self.series.data()
else:
self.dataFrame = self.series.read_dataframe(tags)
self.dataFrame.sort_values("InstanceNumber", inplace=True)
self.dataFrame.dropna(axis=1, inplace=True)
self.dataFrame.reset_index()
# remove tags with one unique value
for tag in self.sliderTags:
if tag in self.dataFrame:
values = self.dataFrame[tag].unique().tolist()
if len(values) == 1:
self.dataFrame.drop(tag, axis=1, inplace=True)
# update list of slider Tags
for tag in self.sliderTags.copy():
if tag not in self.dataFrame:
self.sliderTags.remove(tag)
def _slidersButtonClicked(self):
"""Show or hide the other sliders that can be added."""
if self.slidersButton.isChecked():
# Build Checkbox sliders
self.slidersButton.setStyleSheet("background-color: red")
for tag in self.sliderTags:
tagValues = self.dataFrame[tag].unique().tolist()
tagValues.sort()
slider = widgets.CheckBoxSlider(tag, tagValues)
slider.valueChanged.connect(self._sliderValueChanged)
slider.stateChanged.connect(self._sliderStateChanged)
self.layout.addWidget(slider)
self.sliders.append(slider)
else:
# Delete CheckBox sliders
self.slidersButton.setStyleSheet(
"background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #CCCCBB, stop: 1 #FFFFFF)"
)
for slider in self.sliders[1:]:
slider.deleteLater()
self.sliders = self.sliders[:1]
self.sliders[0].show()
def _sliderStateChanged(self):
if self.image is None:
self._sliderValueChanged()
else:
self._setActiveSliderValues()
self._setMainSliderValue()
def _setSliderValues(self):
if self.image is None: return
self._setActiveSliderValues()
self._setMainSliderValue()
def _setActiveSliderValues(self):
if self.image is None: return
find = self.dataFrame.SOPInstanceUID == self.image.UID[-1]
row = self.dataFrame.loc[find]
for slider in self._activeSliders:
value = row[slider.label].values[0]
slider.setValue(value)
def _setMainSliderValue(self):
if self.image is None: return
imageUIDs = self._getAllSelectedImages()
if len(imageUIDs) <= 1:
self.sliders[0].hide()
else:
index = imageUIDs.index(self.image.UID[-1])
self.sliders[0].setValue(index)
self.sliders[0].show()
def _mainSliderValueChanged(self):
"""Change the selected image"""
imageUIDs = self._getAllSelectedImages()
if imageUIDs == []:
self.image = None
self.sliders[0].hide()
elif len(imageUIDs) == 1:
self._set_image(imageUIDs[0])
self.sliders[0].hide()
else:
index = self.sliders[0].value()
self._set_image(imageUIDs[index])
self.valueChanged.emit()
def _sliderValueChanged(self):
"""Change the selected image"""
imageUIDs = self._getAllSelectedImages()
if imageUIDs == []:
self.image = None
self.sliders[0].hide()
elif len(imageUIDs) == 1:
#self.image = self.series.children(SOPInstanceUID = imageUIDs[0])[0]
self._set_image(imageUIDs[0])
self.sliders[0].hide()
else:
self.sliders[0].setValues(range(len(imageUIDs)))
index = self.sliders[0].value()
# self.image = self.series.children(SOPInstanceUID = imageUIDs[index])[0]
self._set_image(imageUIDs[index])
self.sliders[0].show()
self.valueChanged.emit()
def _set_image(self, SOPInstanceUID):
"""
Set image based on its UID
"""
df = self.dataFrame[self.dataFrame.SOPInstanceUID == SOPInstanceUID]
self.image = self.series.dicm.object(self.series.folder, df.iloc[0], 4)
# self.image = self.series.children(SOPInstanceUID = imageUIDs[index])[0]
def _getAllSelectedImages(self):
"""Get the list of all image files selected by the optional sliders"""
selection = pd.Series(
index = self.dataFrame.index,
data = self.dataFrame.shape[0] * [True]
)
for slider in self._activeSliders:
sliderSelection = self.dataFrame[slider.label] == slider.value()
selection = selection & sliderSelection
if not selection.any():
return []
else:
return self.dataFrame.SOPInstanceUID[selection].values.tolist()
@property
def _activeSliders(self):
"""Create a list of all active sliders"""
activeSliders = []
for slider in self.sliders[1:]:
if slider.checkBox.isChecked():
activeSliders.append(slider)
return activeSliders | StarcoderdataPython |
1633092 | import itertools
from typing import List, Iterable
from bispy.utilities.graph_entities import _Vertex, _QBlock
# check if the given partition is stable with respect to the given block, or if
# it's stable if the block isn't given
def is_stable_vertexes_partition(partition: List[List[_Vertex]]) -> bool:
"""Checks the stability of the given partition. The input must be a
partition of Vertex instances, and the relation which we consider for the
stability is a->b, where a,b are two vertexes.
Args:
partition (list[list[_Vertex]]): A partition of Vertex instances.
Returns:
bool: True if the partition is stable. False otherwise.
"""
for couple in itertools.product(partition, repeat=2):
if not (
check_vertexes_stability(couple[0], couple[1])
and check_vertexes_stability(couple[1], couple[0])
):
return False
return True
def is_stable_partition(partition: List[_QBlock]) -> bool:
return is_stable_vertexes_partition(
[list(block.vertexes) for block in partition]
)
# return True if A_block \subseteq R^{-1}(B_block) or
# A_block cap R^{-1}(B_block) = \emptyset
def check_vertexes_stability(
A_block_vertexes: Iterable[_Vertex], B_block_vertexes: Iterable[_Vertex]
) -> bool:
"""Checks the stability of the first block with respect to the second one.
The two inputs must be list of Vertex instances, and the relation which we
consider for the stability is a->b, where a,b are two vertexes.
Args:
A_block_vertexes (list[_Vertex]): The checked block.
B_block_vertexes (list[_Vertex]): The block against we check the
stability of A.
Returns:
bool: True if A is stable with respect to B. False otherwise.
"""
# if there's a vertex y in B_qblock_vertexes such that for the i-th vertex
# we have i->y, then is_inside_B[i] = True
is_inside_B = []
for vertex in A_block_vertexes:
is_inside_B.append(False)
for edge in vertex.image:
if edge.destination in B_block_vertexes:
is_inside_B[-1] = True
# all == True if for each vertex x in A there's a vertex y such that
# x \in E({x}) AND y \in B.
# not any == True if the set "image of A" and B are distinct.
return all(is_inside_B) or not any(is_inside_B)
# return True if A_block \subseteq R^{-1}(B_block) or
# A_block cap R^{-1}(B_block) = \emptyset
def check_block_stability(A_block: _QBlock, B_block: _QBlock) -> bool:
"""Checks the stability of the first block with respect to the second one.
The two inputs must be list of Vertex instances, and the relation which we
consider for the stability is a->b, where a,b are two vertexes.
Args:
A_block_vertexes (list[_Vertex]): The checked block.
B_block_vertexes (list[_Vertex]): The block against we check the
stability of A.
Returns:
bool: True if A is stable with respect to B. False otherwise.
"""
A_block_vertexes = [vertex for vertex in A_block.vertexes]
B_block_vertexes = [vertex for vertex in B_block.vertexes]
return check_vertexes_stability(A_block_vertexes, B_block_vertexes)
| StarcoderdataPython |
1788800 | #!/usr/bin/env python3
import os
import sys
import MySQLdb
def database_check():
dbname = os.environ.get('MYSQL_DATABASE')
user = os.environ.get('MYSQL_USER')
password = os.environ.get('MYSQL_PASSWORD')
host = "db"
port = 3306
print("HOST: {host}:{port}, DB: {dbname}, USER: {user}".format(
dbname=dbname,
user=user,
host=host,
port=port))
try:
MySQLdb.connect(
db=dbname,
user=user,
passwd=password,
host=host,
port=port)
except:
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
database_check()
| StarcoderdataPython |
1785351 | <filename>dominion/tasks.py
# Copyright 2020 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import time
from celery.utils.log import get_task_logger
from django.core.mail import send_mail
from django.template.loader import render_to_string
from images.models import Image
from dominion import base
from dominion.app import APP
from dominion.engine import EXIT_STATUS, PiemanDocker
from dominion.exceptions import DoesNotExist, Failed, Interrupted, UnknownStatus
from dominion.settings import (
CHANNEL_NAME,
CONTAINER_NAME,
POLLING_FREQUENCY,
QUEUE_BUILD_NAME,
QUEUE_WATCH_NAME,
TIMEOUT,
)
from dominion.util import connect_to_redis
LOGGER = get_task_logger(__name__)
@APP.task(bind=True, base=base.BaseBuildTask)
def build(self, image_id):
"""Builds an image. """
image = Image.objects.get(image_id=image_id)
env = {
'COMPRESS_WITH_GZIP': 'true',
'DEVICE': image.device_name,
'OS': image.distro_name,
'PROJECT_NAME': image_id,
}
for prop_key, prop_value in image.props.items():
if prop_key.startswith('PIEMAN_'):
env_name = prop_key.replace('PIEMAN_', '')
env[env_name] = prop_value
container_name = CONTAINER_NAME.format(image_id=image_id)
LOGGER.info(f'Running {container_name}')
self.request.kwargs['image_id'] = image_id
self.request.kwargs['pieman'] = pieman = PiemanDocker(container_name)
pieman.run(env=env)
watch.apply_async((image_id, ), queue=QUEUE_WATCH_NAME)
channel_name = CHANNEL_NAME.format(image_id=image_id)
conn = connect_to_redis()
for line in pieman.logs(stream=True):
conn.publish(channel_name, line)
try:
pieman.wait()
except Interrupted as exc:
conn.publish(channel_name, str(exc))
raise exc
@APP.task
def email(image_id, status):
"""Sends an email to the user when their image is successful, failed or interrupted. """
image = Image.objects.get(image_id=image_id)
if status == Image.SUCCEEDED:
template = 'successful.html'
elif status == Image.FAILED:
template = 'failed.html'
elif status == Image.INTERRUPTED:
template = 'interrupted.html'
else:
raise UnknownStatus
message = render_to_string(template, context={'username': image.user.username})
send_mail('CusDeb', message, None, [image.user.email])
@APP.task
def spawn():
"""Spawns the 'build' tasks. """
image = Image.objects.get_any()
if image:
image.set_started_at()
build.apply_async((image.image_id, ), queue=QUEUE_BUILD_NAME)
@APP.task
def watch(image_id):
"""Watches the corresponding Pieman container associated with an image id. The primary goal of
the task is to kill the container if it exceeds its time limit specified via TIMEOUT.
"""
container_name = CONTAINER_NAME.format(image_id=image_id)
LOGGER.info(f'Watching {container_name}')
try:
pieman = PiemanDocker(container_name, must_exist=True)
except DoesNotExist:
LOGGER.info(f'{container_name} does not exist, so finishing the task')
return
retries_number = TIMEOUT // POLLING_FREQUENCY
for _ in range(retries_number):
try:
status = pieman.get_status()
except DoesNotExist:
status = EXIT_STATUS
if status == EXIT_STATUS:
LOGGER.info(f'{container_name} finished in time')
break
time.sleep(POLLING_FREQUENCY)
else:
LOGGER.info(f'Killing {container_name} because it exceeded its time limit.')
pieman.kill()
| StarcoderdataPython |
1717078 | #!/usr/bin/env python
def bubble_sort(iterable):
"""Sorts the iterable.
Bubble sort is a simple, comparison sorting algorithm that iterates through a
collection, compares adjacent items, and swaps them if they are in the wrong
order. Iterations through the collection are repeated until no swaps are
needed.
* O(n^2) time complexity
* O(1) space complexity
* stable
* adaptive: O(n) when nearly sorted
Args:
iterable: A collection that is iterable.
Raises:
TypeError: If iterable is not iterable.
"""
try:
_ = iter(iterable)
except TypeError:
raise TypeError('\'{}\' object is not iterable'.format(
type(iterable).__name__))
length = len(iterable)
unsorted = True
j = 1
while unsorted:
unsorted = False
for i in range(length - j):
if iterable[i] > iterable[i + 1]:
iterable[i], iterable[i + 1] = iterable[i + 1], iterable[i]
unsorted = True
j += 1
| StarcoderdataPython |
3250314 | #! /usr/bin/env python
#
# The file 'command_line_options.py' in $QUEX_PATH/doc generates documentation
# for the command line options. This file double-checks whether the content
# of the generated files is consistent with the current setup.
#
# (C) <NAME>
#______________________________________________________________________________
import os
import sys
sys.path.insert(0, os.environ["QUEX_PATH"])
import re
from quex.input.setup import SETUP_INFO
if "--hwut-info" in sys.argv:
print "Command Line Option Documentation;"
print "CHOICES: man, sphinx;"
print "SAME;"
sys.exit()
def get_option_db():
result = {}
for name, value in SETUP_INFO.iteritems():
if name.find("XX_") == 0: continue # DEPRECATED
elif type(value) != list: continue # derived setup option
option_list, default = value
result.update((option, default) for option in option_list)
return result
option_db = get_option_db()
file_name = os.environ["QUEX_PATH"] + {
"sphinx": "/doc/source/appendix/command-line/intro.rst",
"man": "/doc/manpage/quex.1",
}[sys.argv[1]]
marker = {
"sphinx": "cmdoption::",
"man": ".B"
}[sys.argv[1]]
print "## Consider File:"
print "## %s" % file_name
# Verify that every option is documented.
print "(*) Options which are not documented (no output is good output)"
command_line_doc = open(file_name).read()
count_n = 0
for option in option_db:
if command_line_doc.find(option) == -1:
print "error: %s is not documented" % option
else:
count_n += 1
print "Documented options ((%i)) out of ((%i)) existing options." % (count_n, len(option_db))
# Find things which are documented but do not exist
print "(*) Options which are reported, but are not available in application (no output is good output)"
option_re = re.compile(" \-[_a-zA-Z\-0-9]+", re.UNICODE)
for line_i, line in enumerate(command_line_doc.splitlines()):
if line.find(marker) == -1: continue
for match in option_re.finditer(line):
lexeme = match.group().strip()
if lexeme in option_db: continue
# Tolerate the '-bullet' marker in man pages
if lexeme == "-bullet" and "man" in sys.argv: continue
print "%s:%i:error: %s reported but does not exist" % \
(file_name, line_i + 1, lexeme)
print lexeme in option_db
| StarcoderdataPython |
1606370 | # standard
from threading import Event
# internal
from src import settings
from .base import BaseWidget
from src.apps import InvoiceApp, CustomerApp, CallApp
# pyqt
from PyQt5.QtWidgets import QHBoxLayout, QPushButton, QLabel
from PyQt5.QtCore import Qt, QObject, QThread, pyqtSignal
##########
# Engine #
##########
class EngineSignals(QObject):
"""Engine Signals"""
error = pyqtSignal(object)
notification = pyqtSignal(str, str)
class Engine(QThread):
"""Engine"""
def __init__(self, apps, *args, **kwargs):
super().__init__(*args, **kwargs)
# event
self.stopEvent = Event()
self.resumeEvent = Event()
# signals
self.signals = EngineSignals()
# apps
self._apps = apps
def start(self, *args, **kwargs):
self.resumeEvent.set()
self.stopEvent.clear()
super().start(*args, **kwargs)
def stop(self):
self.stopEvent.set()
self.quit()
self.resumeEvent.set()
def resume(self):
self.resumeEvent.set()
def pause(self):
self.resumeEvent.clear()
def _do(self):
for app in self._apps:
report = app.run()
if report:
self.signals.notification.emit(report['title'], report['message'])
def run(self):
# do-while(stopEvent is not set)
while True:
try:
self._do()
except Exception as e:
self.pause()
self.signals.error.emit(e)
# wait for resume
self.resumeEvent.wait()
# check for stop event
if self.stopEvent.wait(10):
break
#################
# Engine Widget #
#################
class EngineWidget(BaseWidget):
"""Engine Widget"""
def _bootstrap(self):
super()._bootstrap()
# check for auto start
if settings.g('engine_auto_start'):
self.start()
def _initialize(self):
apps = [
InvoiceApp(
settings.g('invoice_interval', 10),
settings.g('invoice_sheet', [])
),
CustomerApp(
settings.g('customer_interval', 10),
settings.g('customer_sheet', [])
),
CallApp(
settings.g('call_interval', 10),
settings.g('call_sheet', []),
blacklist=settings.g('call_blacklist', [])
)
]
self.engine = Engine(apps)
def _createWidget(self):
# display
self.dispaly = QLabel()
self.dispaly.setObjectName('Display')
self.dispaly.setAlignment(Qt.AlignCenter)
self.generalLayout.addWidget(self.dispaly)
# control
self.controlLayout = QHBoxLayout()
self.generalLayout.addLayout(self.controlLayout)
# - start
self.btnStart = QPushButton('Start')
self.controlLayout.addWidget(self.btnStart)
# - stop
self.btnStop = QPushButton('Stop')
self.controlLayout.addWidget(self.btnStop)
# default state
self.engineFinishedHandler()
def _connectSignals(self):
self.btnStart.clicked.connect(self.start)
self.btnStop.clicked.connect(self.stop)
# engine
self.engine.started.connect(self.engineStartedHandler)
self.engine.finished.connect(self.engineFinishedHandler)
self.engine.signals.error.connect(self.engineErrorHandler)
self.engine.signals.notification.connect(self.engineNotificationHandler)
def _setStyles(self):
self.setStyleSheet("""
#Display{
font-size: 20px;
font-weight: bold;
}
""")
def start(self):
self.engine.start()
def stop(self):
self.engine.stop()
def engineStartedHandler(self):
self.btnStart.setDisabled(True)
self.btnStop.setEnabled(True)
self.dispaly.setText('Running...')
def engineFinishedHandler(self):
self.btnStart.setEnabled(True)
self.btnStop.setDisabled(True)
self.dispaly.setText('Stopped.')
def engineConnectingHandler(self):
self.btnStart.setDisabled(True)
self.btnStop.setDisabled(True)
self.dispaly.setText('Connecting...')
def engineErrorHandler(self, error):
# stop engine
self.engine.stop()
# show notification
self.ui.showNotification('ERROR', str(error))
def engineNotificationHandler(self, title, message):
self.ui.showNotification(title, message)
def networkCheckerTikHandler(self, tik):
pass
def networkCheckerConnectedHandler(self):
pass
| StarcoderdataPython |
3383436 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def go_back(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderTrait = apps.get_model("core", "Trait")
for provider in Provider.objects.all():
if provider.dns_server_ips.filter(ip_address="8.8.8.8"):
trait, _ = ProviderTrait.objects.get_or_create(
name="Google DNS", description="Google DNS")
provider.traits.add(trait)
elif provider.dns_server_ips.filter(ip_address="172.16.17.32"):
trait, _ = ProviderTrait.objects.get_or_create(
name="iPlant DNS", description="iPlant DNS")
provider.traits.add(trait)
elif provider.auto_imaging:
trait, _ = ProviderTrait.objects.get_or_create(
name="Auto-Imaging", description="Auto-Imaging")
provider.traits.add(trait)
return
def merge_applications(merged_app, app):
for pm in app.providermachine_set.all():
merged_app.providermachine_set.add(pm)
if merged_app.start_date > app.start_date:
merged_app.start_date = app.start_date
if merged_app.end_date:
if app.end_date:
merged_app.end_date = app.end_date
else:
merged_app.end_date = None
elif app.end_date:
merged_app.end_date = app.end_date
if len(merged_app.description) < len(app.description):
merged_app.description = app.description
if not merged_app.icon and app.icon:
merged_app.icon = app.icon
if not merged_app.private and app.private:
merged_app.private = app.private
if not (
merged_app.created_by_identity and merged_app.created_by_identity.provider.active) and (
app.created_by_identity and app.created_by_identity.provider.active):
merged_app.created_by = app.created_by
merged_app.created_by_identity = app.created_by_identity
return merged_app
def copy_data_to_new_models(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderDNSServerIP = apps.get_model("core", "ProviderDNSServerIP")
InstanceAction = apps.get_model("core", "InstanceAction")
ProviderInstanceAction = apps.get_model("core", "ProviderInstanceAction")
add_instance_actions(Provider, InstanceAction, ProviderInstanceAction)
for provider in Provider.objects.all():
for trait in provider.traits.all():
if trait.name == "Google DNS":
get_or_create_google_dns(ProviderDNSServerIP, provider)
elif trait.name == "iPlant DNS":
get_or_create_iplant_dns(ProviderDNSServerIP, provider)
elif trait.name == "Auto-Imaging":
add_auto_imaging(provider)
return
def add_instance_actions(Provider, InstanceAction, ProviderInstanceAction):
InstanceAction.objects.get_or_create(
name="Start",
description="""Starts an instance when it is in the 'stopped' State""")
InstanceAction.objects.get_or_create(
name="Stop",
description="""Stops an instance when it is in the 'active' State""")
InstanceAction.objects.get_or_create(
name="Resume",
description="""Resumes an instance when it is in the 'suspended' State""")
InstanceAction.objects.get_or_create(
name="Suspend",
description="""Suspends an instance when it is in the 'active' State""")
InstanceAction.objects.get_or_create(
name="Terminate",
description="""Destroys an in any non-error state. This is an irreversable action!""")
InstanceAction.objects.get_or_create(
name="Shelve Offload",
description="""Shelves an instance for long-term storage when it is in the 'active/shelved' State""")
InstanceAction.objects.get_or_create(
name="Shelve",
description="""Shelves an instance when it is in the 'active' State""")
InstanceAction.objects.get_or_create(
name="Unshelve",
description="""UnShelves an instance when it is in the 'shelved' State""")
InstanceAction.objects.get_or_create(
name="Reboot",
description="""Reboots an instance when it is in ANY State""")
InstanceAction.objects.get_or_create(
name="Hard Reboot",
description="""Hard Reboots an instance when it is in ANY State""")
InstanceAction.objects.get_or_create(
name="Resize",
description="""Represents the Resize/Confirm_Resize/Revert_Resize operations""")
InstanceAction.objects.get_or_create(
name="Imaging",
description="""Represents the ability to Image/Snapshot an instance""")
InstanceAction.objects.get_or_create(
name="Terminate",
description="""Represents the ability to Shutdown an instance""")
instance_actions = InstanceAction.objects.all()
for provider in Provider.objects.all():
for action in instance_actions:
# It's a feature not a bug. sure.
ProviderInstanceAction.objects.get_or_create(
provider_id=provider.id,
instance_action_id=action.id)
def add_auto_imaging(provider):
provider.auto_imaging = True
provider.save()
def get_or_create_google_dns(ProviderDNSServerIP, provider):
ProviderDNSServerIP.objects.get_or_create(
provider=provider,
ip_address="8.8.8.8",
order=1)
ProviderDNSServerIP.objects.get_or_create(
provider=provider,
ip_address="8.8.4.4",
order=2)
def get_or_create_iplant_dns(ProviderDNSServerIP, provider):
ProviderDNSServerIP.objects.get_or_create(
provider=provider,
ip_address="172.16.17.32",
order=1)
ProviderDNSServerIP.objects.get_or_create(
provider=provider,
ip_address="172.16.17.32",
order=2)
ProviderDNSServerIP.objects.get_or_create(
provider=provider,
ip_address="192.168.3.11",
order=3)
class Migration(migrations.Migration):
dependencies = [
('core', '0007_create_allocation_strategy_and_behaviors'),
]
operations = [
migrations.CreateModel(
name='InstanceAction', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=256)), ('description', models.TextField(
null=True, blank=True)), ], options={}, bases=(
models.Model,), ), migrations.CreateModel(
name='ProviderDNSServerIP', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ip_address', models.GenericIPAddressField(
null=True, unpack_ipv4=True)), ('order', models.IntegerField()), ('provider', models.ForeignKey(
related_name='dns_server_ips', to='core.Provider')), ], options={
'db_table': 'provider_dns_server_ip', }, bases=(
models.Model,), ), migrations.CreateModel(
name='ProviderInstanceAction', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('enabled', models.BooleanField(
default=True)), ('instance_action', models.ForeignKey(
to='core.InstanceAction')), ('provider', models.ForeignKey(
to='core.Provider')), ], options={
'db_table': 'provider_instance_action', }, bases=(
models.Model,), ), migrations.AlterUniqueTogether(
name='providerdnsserverip', unique_together=set(
[
('provider', 'ip_address'), ('provider', 'order')]), ), migrations.AddField(
model_name='provider', name='auto_imaging', field=models.BooleanField(
default=False), preserve_default=True, ), migrations.RunPython(
copy_data_to_new_models, go_back), migrations.RemoveField(
model_name='provider', name='traits', ), migrations.DeleteModel(
name='Trait', ), ]
| StarcoderdataPython |
99650 | """
Experiments to unconver the true nature of coroutines.
One goal is to be able to operate coroutines without an event loop (or some kind of stub of an event
loop)
Other goals are to be able to serialize coroutines, move them between processes and threads, implement
advanced error handling (could we back one up a step? could we implement a transaction processing monitor?)
"""
import types
from asyncio import iscoroutine
from pytest import raises
values = []
async def aprint(that: str):
values.append(that)
async def hello_world():
await aprint("Hello")
await aprint("World")
@types.coroutine
def schedule():
"""
Strange abusive code modeled after async.tasks.__sleep0() that injects a yield point. It does
that by embedding a generator into a coroutine with types.coroutine.
:return:
"""
yield
async def think_about_it():
i = 0
for j in range(0,4):
i += j
await aprint(i)
async def with_a_pause():
await aprint("Hello")
await schedule()
await aprint("World")
def test_abuse():
"""
Note when we run a coroutine that does nothing but await on other coroutines
that do nothing asynchronous (never yield) the `send` method runs once and
all of the side effects happen.
:return:
"""
a = hello_world()
assert(iscoroutine(a))
values.clear()
with raises(StopIteration):
a.send(None)
assert(values == ["Hello", "World"])
def test_more_complex():
a = think_about_it()
assert(iscoroutine(a))
values.clear()
with raises(StopIteration):
a.send(None)
assert(values == [0, 1, 3, 6])
def test_with_pause():
"""
This code is modeled after the asyncio.tasks.__sleep0() method. It shows that yield is the operation that
causes the execution to get frozen.
:return
"""
a = with_a_pause()
values.clear()
assert(not a.cr_running)
a.send(None)
#
# Note that cr_running did not get by the things that we did, probably it is
# if we invoke the normal way through await
#
assert(not a.cr_running)
assert(values == ["Hello"])
assert(not a.cr_running)
with raises(StopIteration):
a.send(None)
assert(not a.cr_running)
assert(values == ["Hello", "World"])
| StarcoderdataPython |
114727 | <gh_stars>1-10
import matplotlib.pyplot as plt
import numpy as np
from comb_step_ramp import comb_step_ramp
t = np.arange(-10, 10,0.01)
'TIME SCALING BY t/2'
x=[]
comb_step_ramp(t/2,x)
plt.subplot(2,2,1)
plt.step(t,x)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('time scaling by 1/2 function')
plt.show()
'AMPLITUDE SCALING BY 4'
y=[]
comb_step_ramp(t,y)
y[:] = [x*4 for x in y]
plt.subplot(2,2,2)
plt.step(t,y)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('amplitude scaling by 4 function')
plt.show()
'AMPLITUDE SCALING BY -4'
z=[]
comb_step_ramp(t,z)
z[:] = [x*-4 for x in z]
plt.subplot(2,2,3)
plt.step(t,z)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('amplitude scaling by -4 function')
plt.show()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.