blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13fe0385f21e9b197be0b5372f8b164fe95f2a6a | a184444cce71e15a7d6789bdf9850e85a6c0e655 | /setup.py | 63d6745bb4e773a32ee654a8cd808b03b8d569fb | [] | no_license | huiwenzhang/gym-rearrangement | 916a3b0a4d3841e3b692be8258bfe0a942b85f36 | f9fa5036966fc56ad0b8c96f92ad81adaa77c875 | refs/heads/master | 2020-05-22T04:49:45.513917 | 2019-07-25T14:57:58 | 2019-07-25T14:57:58 | 186,222,793 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from setuptools import setup
setup(name='gym_rearrangement',
version='0.0.1',
install_requires=['gym', 'mujoco_py', 'numpy', 'interval']
) | [
"[email protected]"
] | |
3810c8f20221af86b7b4f992411c4e1d29134305 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /rbeuWab36FAiLj65m_22.py | be8d1be7052d2551f6d8fb8766a11133bad6bc76 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
import re
def grouping(w):
group = {}
for word in w:
n = len(re.findall(r'[A-Z]',word))
if not n in group.keys():
group[n] = [word]
else:
group[n].append(word)
group[n].sort(key = lambda x: x.lower())
return group
| [
"[email protected]"
] | |
0daf4a67ce292dff1e3479d4e45974a795c2608a | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_1_01a/routing_system/route_map/content/set_/dampening/__init__.py | 9d8c2e0fff06dac57bb887b46f6d6c9f4c318100 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,582 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class dampening(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/route-map/content/set/dampening. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: BGP route flap damping
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__half_life','__reuse','__suppress','__max_suppress_time',)
_yang_name = 'dampening'
_rest_name = 'dampening'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__half_life = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 45']}), is_leaf=True, yang_name="half-life", rest_name="half-life", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
self.__reuse = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="reuse", rest_name="reuse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
self.__max_suppress_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="max-suppress-time", rest_name="max-suppress-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
self.__suppress = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'route-map', u'content', u'set', u'dampening']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'route-map', u'set', u'dampening']
def _get_half_life(self):
"""
Getter method for half_life, mapped from YANG variable /routing_system/route_map/content/set/dampening/half_life (uint32)
"""
return self.__half_life
def _set_half_life(self, v, load=False):
"""
Setter method for half_life, mapped from YANG variable /routing_system/route_map/content/set/dampening/half_life (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_half_life is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_half_life() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 45']}), is_leaf=True, yang_name="half-life", rest_name="half-life", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """half_life must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 45']}), is_leaf=True, yang_name="half-life", rest_name="half-life", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)""",
})
self.__half_life = t
if hasattr(self, '_set'):
self._set()
def _unset_half_life(self):
self.__half_life = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 45']}), is_leaf=True, yang_name="half-life", rest_name="half-life", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
def _get_reuse(self):
"""
Getter method for reuse, mapped from YANG variable /routing_system/route_map/content/set/dampening/reuse (uint32)
"""
return self.__reuse
def _set_reuse(self, v, load=False):
"""
Setter method for reuse, mapped from YANG variable /routing_system/route_map/content/set/dampening/reuse (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_reuse is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reuse() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="reuse", rest_name="reuse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reuse must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="reuse", rest_name="reuse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)""",
})
self.__reuse = t
if hasattr(self, '_set'):
self._set()
def _unset_reuse(self):
self.__reuse = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="reuse", rest_name="reuse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
def _get_suppress(self):
"""
Getter method for suppress, mapped from YANG variable /routing_system/route_map/content/set/dampening/suppress (uint32)
"""
return self.__suppress
def _set_suppress(self, v, load=False):
"""
Setter method for suppress, mapped from YANG variable /routing_system/route_map/content/set/dampening/suppress (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_suppress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_suppress() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """suppress must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)""",
})
self.__suppress = t
if hasattr(self, '_set'):
self._set()
def _unset_suppress(self):
self.__suppress = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20000']}), is_leaf=True, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
def _get_max_suppress_time(self):
"""
Getter method for max_suppress_time, mapped from YANG variable /routing_system/route_map/content/set/dampening/max_suppress_time (uint32)
"""
return self.__max_suppress_time
def _set_max_suppress_time(self, v, load=False):
"""
Setter method for max_suppress_time, mapped from YANG variable /routing_system/route_map/content/set/dampening/max_suppress_time (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_suppress_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_suppress_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="max-suppress-time", rest_name="max-suppress-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_suppress_time must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="max-suppress-time", rest_name="max-suppress-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)""",
})
self.__max_suppress_time = t
if hasattr(self, '_set'):
self._set()
def _unset_max_suppress_time(self):
self.__max_suppress_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="max-suppress-time", rest_name="max-suppress-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
half_life = __builtin__.property(_get_half_life, _set_half_life)
reuse = __builtin__.property(_get_reuse, _set_reuse)
suppress = __builtin__.property(_get_suppress, _set_suppress)
max_suppress_time = __builtin__.property(_get_max_suppress_time, _set_max_suppress_time)
_pyangbind_elements = {'half_life': half_life, 'reuse': reuse, 'suppress': suppress, 'max_suppress_time': max_suppress_time, }
| [
"[email protected]"
] | |
e06f22ec429ba76478781d135e74ba716c72403b | cc72013ede1b3bb02c32a3d0d199be4f7986c173 | /ch6/pizzacost2.py | 862eddfec218d0d2f770fe5b76b9d079962df5ba | [] | no_license | alextickle/zelle-exercises | b87d2a1476189954565f5cc97ee1448200eb00d4 | b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875 | refs/heads/master | 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # pizzacost2.py
import math
def pizzaarea(d):
return math.pi * (d/2.0)**2
def costperinch(area, p):
return area/p
def main():
price = input("Please enter the price of the pizza, in dollars: ")
diameter = input("Please enter the diameter, in inches: ")
print "The price per square inch is %0.2f." % (costperinch(pizzaarea(diameter), price))
main()
| [
"[email protected]"
] | |
3794e581b9ba71b6eb2afda6bdefddaf2ecc7217 | 072f8bffbfef6e149ad1934ea9183a79864c1acd | /venv/Lib/site-packages/ironic_inspector_client/test/test_shell.py | 8890ffd5519e9bc31021673fca72328a43fc0252 | [] | no_license | numvc/LuxoftBot | 77d9bf8f5f63aee63350f1ec82f4b940afe203d2 | 29d7ca8868ab86bc076509d103f7596039333417 | refs/heads/master | 2020-09-21T21:37:12.527546 | 2019-12-04T23:24:35 | 2019-12-04T23:24:35 | 224,939,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,708 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import sys
import tempfile
import mock
from osc_lib import exceptions
from osc_lib.tests import utils
import six
from ironic_inspector_client import shell
from ironic_inspector_client import v1
class BaseTest(utils.TestCommand):
def setUp(self):
super(BaseTest, self).setUp()
self.client = mock.Mock(spec=v1.ClientV1)
self.rules_api = mock.Mock(spec=v1.RulesAPI)
self.client.rules = self.rules_api
self.app.client_manager.baremetal_introspection = self.client
class TestIntrospect(BaseTest):
def test_introspect_one(self):
arglist = ['uuid1']
verifylist = [('node', arglist)]
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.assertEqual((shell.StartCommand.COLUMNS, []), result)
self.client.introspect.assert_called_once_with('uuid1')
def test_introspect_many(self):
arglist = ['uuid1', 'uuid2', 'uuid3']
verifylist = [('node', arglist)]
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cmd.take_action(parsed_args)
calls = [mock.call(node) for node in arglist]
self.assertEqual(calls, self.client.introspect.call_args_list)
def test_introspect_many_fails(self):
arglist = ['uuid1', 'uuid2', 'uuid3']
verifylist = [('node', arglist)]
self.client.introspect.side_effect = (None, RuntimeError())
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
self.assertRaises(RuntimeError, cmd.take_action, parsed_args)
calls = [mock.call(node) for node in arglist[:2]]
self.assertEqual(calls, self.client.introspect.call_args_list)
def test_reprocess(self):
node = 'uuid1'
arglist = [node]
verifylist = [('node', node)]
response_mock = mock.Mock(status_code=202, content=b'')
self.client.reprocess.return_value = response_mock
cmd = shell.ReprocessCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.client.reprocess.assert_called_once_with(node)
self.assertIsNone(result)
def test_wait(self):
nodes = ['uuid1', 'uuid2', 'uuid3']
arglist = ['--wait'] + nodes
verifylist = [('node', nodes), ('wait', True)]
self.client.wait_for_finish.return_value = {
'uuid1': {'finished': True, 'error': None},
'uuid2': {'finished': True, 'error': 'boom'},
'uuid3': {'finished': True, 'error': None},
}
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
_c, values = cmd.take_action(parsed_args)
calls = [mock.call(node) for node in nodes]
self.assertEqual(calls, self.client.introspect.call_args_list)
self.assertEqual([('uuid1', None), ('uuid2', 'boom'), ('uuid3', None)],
sorted(values))
def test_wait_with_check_errors_no_raise_exception(self):
nodes = ['uuid1', 'uuid2', 'uuid3']
arglist = ['--wait'] + ['--check-errors'] + nodes
verifylist = [('node', nodes), ('wait', True), ('check_errors', True)]
self.client.wait_for_finish.return_value = {
'uuid1': {'finished': True, 'error': None},
'uuid2': {'finished': True, 'error': None},
'uuid3': {'finished': True, 'error': None},
}
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
_c, values = cmd.take_action(parsed_args)
calls = [mock.call(node) for node in nodes]
self.assertEqual(calls, self.client.introspect.call_args_list)
self.assertEqual([('uuid1', None), ('uuid2', None), ('uuid3', None)],
sorted(values))
def test_wait_with_check_errors(self):
nodes = ['uuid1', 'uuid2', 'uuid3']
arglist = ['--wait'] + ['--check-errors'] + nodes
verifylist = [('node', nodes), ('wait', True), ('check_errors', True)]
self.client.wait_for_finish.return_value = {
'uuid1': {'finished': True, 'error': None},
'uuid2': {'finished': True, 'error': 'boom'},
'uuid3': {'finished': True, 'error': None},
}
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
msg = "Introspection failed for"
self.assertRaisesRegex(Exception, msg, cmd.take_action, parsed_args)
def test_check_errors_alone(self):
nodes = ['uuid1', 'uuid2', 'uuid3']
arglist = ['--check-errors'] + nodes
verifylist = [('node', nodes), ('check_errors', True)]
self.client.wait_for_finish.return_value = {
'uuid1': {'finished': True, 'error': None},
'uuid2': {'finished': True, 'error': 'boom'},
'uuid3': {'finished': True, 'error': None},
}
cmd = shell.StartCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
msg = "--check-errors can only be used with --wait"
self.assertRaisesRegex(exceptions.CommandError, msg, cmd.take_action,
parsed_args)
def test_abort(self):
node = 'uuid1'
arglist = [node]
verifylist = [('node', node)]
response_mock = mock.Mock(status_code=202, content=b'')
self.client.abort.return_value = response_mock
cmd = shell.AbortCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.client.abort.assert_called_once_with(node)
self.assertIsNone(result)
class TestGetStatus(BaseTest):
def test_get_status(self):
arglist = ['uuid1']
verifylist = [('node', 'uuid1')]
self.client.get_status.return_value = {'finished': True,
'error': 'boom'}
cmd = shell.StatusCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.assertEqual([('error', 'finished'), ('boom', True)], list(result))
self.client.get_status.assert_called_once_with('uuid1')
class TestStatusList(BaseTest):
def setUp(self):
super(TestStatusList, self).setUp()
self.COLUMNS = ('UUID', 'Started at', 'Finished at', 'Error')
self.status1 = {
'error': None,
'finished': True,
'finished_at': '1970-01-01T00:10',
'links': None,
'started_at': '1970-01-01T00:00',
'uuid': 'uuid1'
}
self.status2 = {
'error': None,
'finished': False,
'finished_at': None,
'links': None,
'started_at': '1970-01-01T00:01',
'uuid': 'uuid2'
}
def status_row(self, status):
status = dict(item for item in status.items()
if item[0] != 'links')
return (status['uuid'], status['started_at'], status['finished_at'],
status['error'])
def test_list_statuses(self):
status_list = [self.status1, self.status2]
self.client.list_statuses.return_value = status_list
arglist = []
verifylist = []
cmd = shell.StatusListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.assertEqual((self.COLUMNS, [self.status_row(status)
for status in status_list]),
result)
self.client.list_statuses.assert_called_once_with(limit=None,
marker=None)
def test_list_statuses_marker_limit(self):
self.client.list_statuses.return_value = []
arglist = ['--marker', 'uuid1', '--limit', '42']
verifylist = [('marker', 'uuid1'), ('limit', 42)]
cmd = shell.StatusListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
self.assertEqual((self.COLUMNS, []), result)
self.client.list_statuses.assert_called_once_with(limit=42,
marker='uuid1')
class TestRules(BaseTest):
def test_import_single(self):
f = tempfile.NamedTemporaryFile()
self.addCleanup(f.close)
f.write(b'{"foo": "bar"}')
f.flush()
arglist = [f.name]
verifylist = [('file', f.name)]
self.rules_api.from_json.return_value = {
'uuid': '1', 'description': 'd', 'links': []}
cmd = shell.RuleImportCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
self.assertEqual(('UUID', 'Description'), cols)
self.assertEqual([('1', 'd')], values)
self.rules_api.from_json.assert_called_once_with({'foo': 'bar'})
def test_import_multiple(self):
f = tempfile.NamedTemporaryFile()
self.addCleanup(f.close)
f.write(b'[{"foo": "bar"}, {"answer": 42}]')
f.flush()
arglist = [f.name]
verifylist = [('file', f.name)]
self.rules_api.from_json.side_effect = iter([
{'uuid': '1', 'description': 'd1', 'links': []},
{'uuid': '2', 'description': 'd2', 'links': []}
])
cmd = shell.RuleImportCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
self.assertEqual(('UUID', 'Description'), cols)
self.assertEqual([('1', 'd1'), ('2', 'd2')], values)
self.rules_api.from_json.assert_any_call({'foo': 'bar'})
self.rules_api.from_json.assert_any_call({'answer': 42})
def test_import_yaml(self):
f = tempfile.NamedTemporaryFile()
self.addCleanup(f.close)
f.write(b"""---
- foo: bar
- answer: 42
""")
f.flush()
arglist = [f.name]
verifylist = [('file', f.name)]
self.rules_api.from_json.side_effect = iter([
{'uuid': '1', 'description': 'd1', 'links': []},
{'uuid': '2', 'description': 'd2', 'links': []}
])
cmd = shell.RuleImportCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
self.assertEqual(('UUID', 'Description'), cols)
self.assertEqual([('1', 'd1'), ('2', 'd2')], values)
self.rules_api.from_json.assert_any_call({'foo': 'bar'})
self.rules_api.from_json.assert_any_call({'answer': 42})
def test_list(self):
self.rules_api.get_all.return_value = [
{'uuid': '1', 'description': 'd1', 'links': []},
{'uuid': '2', 'description': 'd2', 'links': []}
]
cmd = shell.RuleListCommand(self.app, None)
parsed_args = self.check_parser(cmd, [], [])
cols, values = cmd.take_action(parsed_args)
self.assertEqual(('UUID', 'Description'), cols)
self.assertEqual([('1', 'd1'), ('2', 'd2')], values)
self.rules_api.get_all.assert_called_once_with()
def test_show(self):
self.rules_api.get.return_value = {
'uuid': 'uuid1',
'links': [],
'description': 'd',
'conditions': [{}],
'actions': [{}]
}
arglist = ['uuid1']
verifylist = [('uuid', 'uuid1')]
cmd = shell.RuleShowCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
self.assertEqual(('actions', 'conditions', 'description', 'uuid'),
cols)
self.assertEqual(([{}], [{}], 'd', 'uuid1'), values)
self.rules_api.get.assert_called_once_with('uuid1')
def test_delete(self):
arglist = ['uuid1']
verifylist = [('uuid', 'uuid1')]
cmd = shell.RuleDeleteCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cmd.take_action(parsed_args)
self.rules_api.delete.assert_called_once_with('uuid1')
def test_purge(self):
cmd = shell.RulePurgeCommand(self.app, None)
parsed_args = self.check_parser(cmd, [], [])
cmd.take_action(parsed_args)
self.rules_api.delete_all.assert_called_once_with()
class TestDataSave(BaseTest):
def test_stdout(self):
self.client.get_data.return_value = {'answer': 42}
buf = six.StringIO()
arglist = ['uuid1']
verifylist = [('node', 'uuid1')]
cmd = shell.DataSaveCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
with mock.patch.object(sys, 'stdout', buf):
cmd.take_action(parsed_args)
self.assertEqual('{"answer": 42}', buf.getvalue())
self.client.get_data.assert_called_once_with('uuid1', raw=False)
def test_file(self):
self.client.get_data.return_value = b'{"answer": 42}'
with tempfile.NamedTemporaryFile() as fp:
arglist = ['--file', fp.name, 'uuid1']
verifylist = [('node', 'uuid1'), ('file', fp.name)]
cmd = shell.DataSaveCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cmd.take_action(parsed_args)
content = fp.read()
self.assertEqual(b'{"answer": 42}', content)
self.client.get_data.assert_called_once_with('uuid1', raw=True)
class TestInterfaceCmds(BaseTest):
def setUp(self):
super(TestInterfaceCmds, self).setUp()
self.inspector_db = {
"all_interfaces":
{
'em1': {'mac': "00:11:22:33:44:55", 'ip': "10.10.1.1",
"lldp_processed": {
"switch_chassis_id": "99:aa:bb:cc:dd:ff",
"switch_port_id": "555",
"switch_port_vlans":
[{"id": 101, "name": "vlan101"},
{"id": 102, "name": "vlan102"},
{"id": 104, "name": "vlan104"},
{"id": 201, "name": "vlan201"},
{"id": 203, "name": "vlan203"}],
"switch_port_mtu": 1514
}
}
}
}
def test_list(self):
self.client.get_all_interface_data.return_value = [
["em1", "00:11:22:33:44:55", [101, 102, 104, 201, 203],
"99:aa:bb:cc:dd:ff", "555"],
["em2", "00:11:22:66:77:88", [201, 203],
"99:aa:bb:cc:dd:ff", "777"],
["em3", "00:11:22:aa:bb:cc", '', '', '']]
arglist = ['uuid1']
verifylist = [('node_ident', 'uuid1')]
cmd = shell.InterfaceListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("Interface", "MAC Address", "Switch Port VLAN IDs",
"Switch Chassis ID", "Switch Port ID")
# Note that em3 has no lldp data
expected_rows = [["em1", "00:11:22:33:44:55",
[101, 102, 104, 201, 203],
"99:aa:bb:cc:dd:ff",
"555"],
["em2", "00:11:22:66:77:88",
[201, 203],
"99:aa:bb:cc:dd:ff",
"777"],
["em3", "00:11:22:aa:bb:cc", '', '', '']]
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
def test_list_field(self):
self.client.get_all_interface_data.return_value = [
["em1", 1514],
["em2", 9216],
["em3", '']]
arglist = ['uuid1', '--fields', 'interface',
"switch_port_mtu"]
verifylist = [('node_ident', 'uuid1'),
('fields', ["interface", "switch_port_mtu"])]
cmd = shell.InterfaceListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("Interface", "Switch Port MTU")
expected_rows = [["em1", 1514],
["em2", 9216],
["em3", '']]
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
def test_list_filtered(self):
self.client.get_all_interface_data.return_value = [
["em1",
"00:11:22:33:44:55",
[101, 102, 104, 201, 203],
"99:aa:bb:cc:dd:ff",
"555"]]
arglist = ['uuid1', '--vlan', '104']
verifylist = [('node_ident', 'uuid1'),
('vlan', [104])]
cmd = shell.InterfaceListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("Interface", "MAC Address", "Switch Port VLAN IDs",
"Switch Chassis ID", "Switch Port ID")
expected_rows = [["em1", "00:11:22:33:44:55",
[101, 102, 104, 201, 203],
"99:aa:bb:cc:dd:ff",
"555"]]
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
def test_list_no_data(self):
self.client.get_all_interface_data.return_value = [[]]
arglist = ['uuid1']
verifylist = [('node_ident', 'uuid1')]
cmd = shell.InterfaceListCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("Interface", "MAC Address", "Switch Port VLAN IDs",
"Switch Chassis ID", "Switch Port ID")
expected_rows = [[]]
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
def test_show(self):
self.client.get_data.return_value = self.inspector_db
data = collections.OrderedDict(
[('node_ident', "uuid1"),
('interface', "em1"),
('mac', "00:11:22:33:44:55"),
('switch_chassis_id', "99:aa:bb:cc:dd:ff"),
('switch_port_id', "555"),
('switch_port_mtu', 1514),
('switch_port_vlans',
[{"id": 101, "name": "vlan101"},
{"id": 102, "name": "vlan102"},
{"id": 104, "name": "vlan104"},
{"id": 201, "name": "vlan201"},
{"id": 203, "name": "vlan203"}])]
)
self.client.get_interface_data.return_value = data
arglist = ['uuid1', 'em1']
verifylist = [('node_ident', 'uuid1'), ('interface', 'em1')]
cmd = shell.InterfaceShowCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("node_ident", "interface", "mac",
"switch_chassis_id", "switch_port_id",
"switch_port_mtu", "switch_port_vlans")
expected_rows = ("uuid1", "em1", "00:11:22:33:44:55",
"99:aa:bb:cc:dd:ff", "555", 1514,
[{"id": 101, "name": "vlan101"},
{"id": 102, "name": "vlan102"},
{"id": 104, "name": "vlan104"},
{"id": 201, "name": "vlan201"},
{"id": 203, "name": "vlan203"}])
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
def test_show_field(self):
self.client.get_data.return_value = self.inspector_db
data = collections.OrderedDict([('node_ident', "uuid1"),
('interface', "em1"),
('switch_port_vlans',
[{"id": 101, "name": "vlan101"},
{"id": 102, "name": "vlan102"},
{"id": 104, "name": "vlan104"},
{"id": 201, "name": "vlan201"},
{"id": 203, "name": "vlan203"}])
])
self.client.get_interface_data.return_value = data
arglist = ['uuid1', 'em1', '--fields', 'node_ident', 'interface',
"switch_port_vlans"]
verifylist = [('node_ident', 'uuid1'), ('interface', 'em1'),
('fields', ["node_ident", "interface",
"switch_port_vlans"])]
cmd = shell.InterfaceShowCommand(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
cols, values = cmd.take_action(parsed_args)
expected_cols = ("node_ident", "interface", "switch_port_vlans")
expected_rows = ("uuid1", "em1",
[{"id": 101, "name": "vlan101"},
{"id": 102, "name": "vlan102"},
{"id": 104, "name": "vlan104"},
{"id": 201, "name": "vlan201"},
{"id": 203, "name": "vlan203"}])
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, values)
| [
"[email protected]"
] | |
bc23a716f2508b19b5cce40ae33c07d4540e1ea0 | b76e39e535499704368eddc26237dc0016ef7d06 | /TCSPLC/readgeneral_v2.py | 121d8ae89a266ee83d5309e0e456fb504addffdc | [] | no_license | BUBAIMITRA2018/castersimulation | 0532e53df7d346c2824e577cc91cd0ac2ce4694c | eca5fddff5c0f33f785168f6b1e9f572c1622be0 | refs/heads/master | 2022-12-10T02:45:04.207196 | 2020-09-09T05:35:54 | 2020-09-09T05:35:54 | 260,110,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | from snap7.snap7types import areas, S7WLBit, S7WLWord, S7WLReal, S7WLDWord
from clientcomm_v1 import *
__all__ = ['ReadGeneral']
class ReadGeneral():
def __init__(self, client):
self.client = client
self.mylock = threading.Lock()
def readsymbolvalue(self, address, datatype, dataclass):
addressconverted = float(address)
self.byte = int(addressconverted)
self.bit = round((addressconverted - self.byte)*10)
self.daat = str(dataclass)
if datatype == 'S7WLBit':
self.result = self.client.read_area(areas[self.daat], 0, self.byte, S7WLBit)
return get_bool(self.result, 0, self.bit)
elif datatype == 'S7WLByte' or datatype == 'S7WLWord':
self.result = self.client.read_area(areas[self.daat], 0, self.byte, S7WLWord)
return get_int(self.result, 0)
elif datatype == S7WLReal:
return get_real(self.result, 0)
elif datatype == 'S7WLDWord':
self.result = self.client.read_area(areas[self.daat], 0, self.byte, S7WLDWord)
print(("the result is ", get_int(self.result, 0)))
return get_dword(self.result, 0)
else:
return None
def readDBvalue(self, address, datatype):
addressconverted = str(address)
data1 = addressconverted[addressconverted.find("b") + 1:addressconverted.find(".")]
data2 = addressconverted[addressconverted.find("d", 2) + 1:]
data3 = data2[data2.find("b") + 1:]
data3 = float(data3[1:])
self.byte = int(data3)
self.bit = round((data3 - self.byte) * 10)
self.dataarea = int(data1)
if datatype == 'S7WLBit':
self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLBit)
return get_bool(self.result, 0, self.bit)
elif datatype == 'S7WLByte' or datatype == 'S7WLWord':
self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLWord)
return get_int(self.result, 0)
elif datatype == 'S7WLReal':
self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLReal)
return get_real(self.result, 0)
elif datatype == S7WLDWord:
return get_dword(self.result, 0)
else:
return None
def __getstate__(self):
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['mylock']
return state
| [
"[email protected]"
] | |
7768a053d82a17eba8b241704beef65be6e66f04 | 3b63434159d3ffe2f8af0586137b155820f6fd0d | /3rdparty/openmm/docs-source/developerguide/conf.py | cc2cd2c3893c8b31bc0e5e30c88a29dfff4e26f5 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | samuelflores/MMB | 2e50204f5bc5a9bd80e56ec4cf8901e782d2f822 | 96da444290ab1af4385013c566079d381d8051b4 | refs/heads/master | 2023-06-25T23:24:17.861167 | 2023-06-21T09:10:20 | 2023-06-21T09:10:47 | 29,155,968 | 6 | 10 | NOASSERTION | 2022-01-03T10:34:12 | 2015-01-12T20:43:46 | C++ | UTF-8 | Python | false | false | 8,607 | py | # -*- coding: utf-8 -*-
#
# OpenMM Developer Guide documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 7 12:42:06 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('../sphinx'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['autonumber', 'numsec']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenMM Developer Guide'
copyright = u'2011-2017, Stanford University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = os.getenv('OPENMM_VERSION')
# The full version, including alpha/beta/rc tags.
release = os.getenv('OPENMM_VERSION')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenMMDeveloperGuidedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper,openany',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': """
\\usepackage[none]{hyphenat}
\\usepackage{xstring}
\\usepackage{color}
\\usepackage{caption}
\\setcounter{tocdepth}{3}
\\captionsetup[figure]{labelformat=empty}
% Backwards compatibility for sphinx < 1.5
\\let\\DUspan\\null % force DUspan to be defined
\\renewcommand{\DUspan}[2]{%
\\IfEqCase{#1}{%
{code}{\\small{}\\texttt{#2}\\normalsize{}}
}[\\PackageError{DUspan}{Unrecognized option passed to DUspan: #1}{}]%
}%
% Sphinx > 1.5 compatibility (github.com/sphinx-doc/sphinx/issues/2231)
\\newcommand{\\DUrolecode}[1]{%
\\small{}\\texttt{#1}\\normalsize{}%
}%""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenMMDeveloperGuide.tex', u'OpenMM Developer Guide',
u'Peter Eastman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openmmdeveloperguide', u'OpenMM Developer Guide',
[u'Peter Eastman'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenMMDeveloperGuide', u'OpenMM Developer Guide',
u'Peter Eastman', 'OpenMMDeveloperGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"[email protected]"
] | |
216b5bec34fb1562d40bec74a6c32dfa1c426795 | dd31235062b1f66fcb7bf9e6d444242e0cdbc447 | /SVR/hebbe/run_sbatch.py | 16ecc4fbc2672aeb43471ebf9f056f0a37a6447f | [] | no_license | LiYanChalmers/AllstateClaimSeverity | a25b4ebaae767dc674d320b4b5c9de84a41dc539 | 819304e9dd44c1c12e6251ab84facc180fb484a7 | refs/heads/master | 2020-03-21T20:34:25.145931 | 2016-12-12T22:38:24 | 2016-12-12T22:38:24 | 139,015,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 12 03:00:49 2016
@author: ly
"""
from subprocess import call
n_rounds = 400
for i in range(n_rounds):
dst = 'svr'+str(i)+'.sh'
call(['sbatch', dst]) | [
"[email protected]"
] | |
66b7e804a7b61895eeb29a188e27522454be4b49 | f952cc77989cb330c82e792a935101b15a2121aa | /myenv/bin/easy_install-3.7 | d90c17ee6834a3a6ec1667559db67a7d053176a2 | [] | no_license | sanix-sandel/Api-Blog | 6e86061469f9357d43f166b989b91c7194130e4d | f2650da6cc1f2f4cb55b4cbe82bdf7c4d8eacccc | refs/heads/master | 2022-11-13T06:27:03.678517 | 2020-07-02T19:17:31 | 2020-07-02T19:17:31 | 254,863,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | 7 | #!/home/sanix/Documents/Api-Blog/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
ffe970e02a5d956bd21d1ea08782d7edd77f9c66 | 8e92584fcbc7b5ed393ab778e45ca5aa67c781b8 | /generate_nullarbor_table.py | 6508e1db126553e0a78246f0adfacbff41facf05 | [
"MIT"
] | permissive | cdeitrick/gists | cd4261087deb7eadef19953efaa9be40630ba181 | ef0e1ee2b2b9de448e5aceef083b7027e4444317 | refs/heads/master | 2020-04-01T18:40:29.294645 | 2019-04-15T20:35:21 | 2019-04-15T20:35:21 | 153,507,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | from pathlib import Path
from typing import Tuple
def get_sample_files(folder:Path)->Tuple[Path,Path]:
forward = [i for i in folder.iterdir() if 'R1' in i.name][0]
reverse = [i for i in folder.iterdir() if 'R2' in i.name][0]
return forward, reverse
if __name__ == "__main__":
folder = Path("/home/cld100/projects/lipuma/samples/")
filename = folder / "lipuma_samples.tsv"
with filename.open('w') as file1:
for sample_folder in folder.iterdir():
if not sample_folder.is_dir(): continue
try:
f, r = get_sample_files(sample_folder)
sample_name = sample_folder.name.split('_')[0]
file1.write(f"{sample_name}\t{f}\t{r}\n")
except:
pass
| [
"[email protected]"
] | |
3dbf7e36dfa28c5e7bf2dd870a8172ad1b4c905e | 711c11d0111a40055ba110e7089a231c2ba42b8e | /toontown/coderedemption/TTCodeRedemptionMgrAI.py | b67fb0f0528c12a748432e77f42d02ffe11814b4 | [
"Apache-2.0"
] | permissive | DeadMemez/ProjectAltis-OldAcornAcres | 03c8dc912ecccae8456d89790f6b332547b75cc3 | e8e0087389933795973e566782affcaec65a2980 | refs/heads/master | 2021-01-19T13:59:07.234192 | 2017-08-20T14:41:45 | 2017-08-20T14:41:45 | 100,869,782 | 0 | 2 | null | 2017-08-20T15:14:35 | 2017-08-20T15:14:35 | null | UTF-8 | Python | false | false | 7,236 | py | '''
Created on Mar 21, 2017
@author: Drew
'''
import time
from datetime import datetime
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.catalog import CatalogItem
from toontown.catalog.CatalogInvalidItem import CatalogInvalidItem
from toontown.catalog.CatalogClothingItem import CatalogClothingItem
from toontown.catalog.CatalogItemList import CatalogItemList
from toontown.catalog.CatalogPoleItem import CatalogPoleItem
from toontown.catalog.CatalogBeanItem import CatalogBeanItem
from toontown.catalog.CatalogChatItem import CatalogChatItem
from toontown.catalog.CatalogAccessoryItem import CatalogAccessoryItem
from toontown.catalog.CatalogRentalItem import CatalogRentalItem
from toontown.catalog.CatalogGardenItem import CatalogGardenItem
from toontown.catalog.CatalogGardenStarterItem import CatalogGardenStarterItem
from toontown.coderedemption import TTCodeRedemptionConsts, TTCodeRedemptionGlobals
from toontown.toonbase import ToontownGlobals
class TTCodeRedemptionMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("TTCodeRedemptionMgrAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.air = air
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
def delete(self):
DistributedObjectAI.delete(self)
def giveAwardToToonResult(self, todo0, todo1):
pass
def redeemCode(self, context, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to redeem a code from an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Invalid avatar tried to redeem a code')
return
# Default values. They will get modified if needed
isValid = True
hasExpired = False
isEligible = True
beenDelivered = False
code = str(code.lower().replace(' ', '').replace('-', '').replace('_', '')) # Make every code lower case with no spaces or dashes of any sort
avCodes = av.getRedeemedCodes()
print avCodes
if not avCodes:
avCodes = [code]
av.setRedeemedCodes(avCodes)
else:
if not code in avCodes:
avCodes.append(code)
av.setRedeemedCodes(avCodes)
isEligible = True
else:
isEligible = False
expirationDate = TTCodeRedemptionGlobals.codeToExpiration.get(code)
if not expirationDate:
hasExpired = False
else:
if datetime.now() > expirationDate:
hasExpired = True
avId = self.air.getAvatarIdFromSender()
print("%s entered %s" %(avId, code))
if not avId:
self.air.writeServerEvent('suspicious', avId = avId, issue = 'Tried to redeem a code from an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId = avId, issue = 'Invalid avatar tried to redeem a code')
return
if not isValid:
self.air.writeServerEvent('code-redeemed', avId = avId, issue = 'Invalid code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_INVALID, 0])
return
# Make sure its not expired, which it shouldnt be considering there is none that have expirations :thinking:
if hasExpired:
self.air.writeServerEvent('code-redeemed', avId = avId, issue = 'Expired code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_EXPIRED, 0])
return
# Make sure the toon is allowed to use this code
if not isEligible:
self.air.writeServerEvent('code-redeemed', avId = avId, issue = 'Ineligible for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_INELIGIBLE, 0])
return
items = self.getItemsForCode(code)
for item in items:
if isinstance(item, CatalogInvalidItem): # Incase theres an invalid item type
self.air.writeServerEvent('suspicious', avId = avId, issue = 'uh oh! invalid item type for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_INVALID, 0])
break
if len(av.mailboxContents) + len(av.onGiftOrder) >= ToontownGlobals.MaxMailboxContents:
# Targets mailbox is full
beenDelivered = False
break
item.deliveryDate = int(time.time() / 60) + 1 # Basically instant delivery
av.onOrder.append(item)
av.b_setDeliverySchedule(av.onOrder)
beenDelivered = True
if not beenDelivered:
# Something went wrong!
self.air.writeServerEvent('code-redeemed', avId = avId, issue = 'Could not deliver items for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_INVALID, 0])
return
# send
self.air.writeServerEvent('code-redeemed', avId = avId, issue = 'Successfuly redeemed code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, ToontownGlobals.CODE_SUCCESS, 0])
def getItemsForCode(self, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId = avId, issue = 'AVID is none')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId = avId, issue = 'Avatar doesnt exist')
return
code = str(code.lower().replace(' ', '').replace('-', '').replace('_', '')) # Make every code lower case with no spaces or dashes of any sort
if code == "sillymeter":
shirt = CatalogClothingItem(1753, 0)
return [shirt]
if code == "getconnected":
shirt = CatalogClothingItem(1752, 0)
return [shirt]
if code == "toontastic":
shirt = CatalogClothingItem(1820, 0)
return [shirt]
if code == "gardens":
gardenStarter = CatalogGardenStarterItem()
return [gardenStarter]
if code == "sweet":
beans = CatalogBeanItem(12000, tagCode = 2)
return [beans]
return []
def redeemCodeAiToUd(self, avId, context, code):
self.sendUpdate('redeemCodeAiToUd', [avId, context, code])
def redeemCodeResultUdToAi(self, avId, context, result, awardMgrResult):
self.d_redeemCodeResult(avId, context, result, awardMgrResult)
def d_redeemCodeResult(self, avId, context, result, awardMgrResult):
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, result, awardMgrResult])
| [
"[email protected]"
] | |
d2fb3d6b96ef10ef390bf7df4126f75fab56c27e | f4afb7a16696803942a999b0687e08997defb114 | /build/rotors_simulator_demos/catkin_generated/generate_cached_setup.py | 5f65bc27f9863b023889c9944fb330469b9f3650 | [] | no_license | EmanueleAucone/ethz_ws | 8d4760109be3b1882d875aa28f7ecfe793b1b9e6 | 883efd2936e8f67e783790d7ac8f3a40e749d1b9 | refs/heads/main | 2023-01-22T04:03:45.341419 | 2020-11-25T10:30:49 | 2020-11-25T10:30:49 | 308,606,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/emanuele/ethz_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/emanuele/ethz_ws/devel/.private/rotors_simulator_demos/env.sh')
output_filename = '/home/emanuele/ethz_ws/build/rotors_simulator_demos/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
b09211d8fbcf1bd15aba309e8e9fd4059204322d | 057289814dd3222ba719b76b5ead124a12fa7f34 | /fpga/mqnic/AU200/fpga_10g/tb/fpga_core/test_fpga_core.py | 73e65ecb4c70ad14b0761c915c6612adbf086617 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | JimZGChow/corundum | 61547803eb996d9e9be701454715ab6e038d0420 | e3fb7d19b29df753c054eb56889fa8223f3292fb | refs/heads/master | 2023-08-20T13:27:10.793792 | 2020-12-16T22:58:19 | 2020-12-16T22:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,137 | py | """
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
from cocotbext.eth import XgmiiSource, XgmiiSink
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_entity=dut,
rq_name="m_axis_rq",
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_entity=dut,
rc_name="s_axis_rc",
# Completer reQuest Interface
cq_entity=dut,
cq_name="s_axis_cq",
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_entity=dut,
cc_name="m_axis_cc",
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.qsfp0_rx_clk_1, 6.4, units="ns").start())
self.qsfp0_1_source = XgmiiSource(dut.qsfp0_rxd_1, dut.qsfp0_rxc_1, dut.qsfp0_rx_clk_1, dut.qsfp0_rx_rst_1)
cocotb.fork(Clock(dut.qsfp0_tx_clk_1, 6.4, units="ns").start())
self.qsfp0_1_sink = XgmiiSink(dut.qsfp0_txd_1, dut.qsfp0_txc_1, dut.qsfp0_tx_clk_1, dut.qsfp0_tx_rst_1)
cocotb.fork(Clock(dut.qsfp0_rx_clk_2, 6.4, units="ns").start())
self.qsfp0_2_source = XgmiiSource(dut.qsfp0_rxd_2, dut.qsfp0_rxc_2, dut.qsfp0_rx_clk_2, dut.qsfp0_rx_rst_2)
cocotb.fork(Clock(dut.qsfp0_tx_clk_2, 6.4, units="ns").start())
self.qsfp0_2_sink = XgmiiSink(dut.qsfp0_txd_2, dut.qsfp0_txc_2, dut.qsfp0_tx_clk_2, dut.qsfp0_tx_rst_2)
cocotb.fork(Clock(dut.qsfp0_rx_clk_3, 6.4, units="ns").start())
self.qsfp0_3_source = XgmiiSource(dut.qsfp0_rxd_3, dut.qsfp0_rxc_3, dut.qsfp0_rx_clk_3, dut.qsfp0_rx_rst_3)
cocotb.fork(Clock(dut.qsfp0_tx_clk_3, 6.4, units="ns").start())
self.qsfp0_3_sink = XgmiiSink(dut.qsfp0_txd_3, dut.qsfp0_txc_3, dut.qsfp0_tx_clk_3, dut.qsfp0_tx_rst_3)
cocotb.fork(Clock(dut.qsfp0_rx_clk_4, 6.4, units="ns").start())
self.qsfp0_4_source = XgmiiSource(dut.qsfp0_rxd_4, dut.qsfp0_rxc_4, dut.qsfp0_rx_clk_4, dut.qsfp0_rx_rst_4)
cocotb.fork(Clock(dut.qsfp0_tx_clk_4, 6.4, units="ns").start())
self.qsfp0_4_sink = XgmiiSink(dut.qsfp0_txd_4, dut.qsfp0_txc_4, dut.qsfp0_tx_clk_4, dut.qsfp0_tx_rst_4)
cocotb.fork(Clock(dut.qsfp1_rx_clk_1, 6.4, units="ns").start())
self.qsfp1_1_source = XgmiiSource(dut.qsfp1_rxd_1, dut.qsfp1_rxc_1, dut.qsfp1_rx_clk_1, dut.qsfp1_rx_rst_1)
cocotb.fork(Clock(dut.qsfp1_tx_clk_1, 6.4, units="ns").start())
self.qsfp1_1_sink = XgmiiSink(dut.qsfp1_txd_1, dut.qsfp1_txc_1, dut.qsfp1_tx_clk_1, dut.qsfp1_tx_rst_1)
cocotb.fork(Clock(dut.qsfp1_rx_clk_2, 6.4, units="ns").start())
self.qsfp1_2_source = XgmiiSource(dut.qsfp1_rxd_2, dut.qsfp1_rxc_2, dut.qsfp1_rx_clk_2, dut.qsfp1_rx_rst_2)
cocotb.fork(Clock(dut.qsfp1_tx_clk_2, 6.4, units="ns").start())
self.qsfp1_2_sink = XgmiiSink(dut.qsfp1_txd_2, dut.qsfp1_txc_2, dut.qsfp1_tx_clk_2, dut.qsfp1_tx_rst_2)
cocotb.fork(Clock(dut.qsfp1_rx_clk_3, 6.4, units="ns").start())
self.qsfp1_3_source = XgmiiSource(dut.qsfp1_rxd_3, dut.qsfp1_rxc_3, dut.qsfp1_rx_clk_3, dut.qsfp1_rx_rst_3)
cocotb.fork(Clock(dut.qsfp1_tx_clk_3, 6.4, units="ns").start())
self.qsfp1_3_sink = XgmiiSink(dut.qsfp1_txd_3, dut.qsfp1_txc_3, dut.qsfp1_tx_clk_3, dut.qsfp1_tx_rst_3)
cocotb.fork(Clock(dut.qsfp1_rx_clk_4, 6.4, units="ns").start())
self.qsfp1_4_source = XgmiiSource(dut.qsfp1_rxd_4, dut.qsfp1_rxc_4, dut.qsfp1_rx_clk_4, dut.qsfp1_rx_rst_4)
cocotb.fork(Clock(dut.qsfp1_tx_clk_4, 6.4, units="ns").start())
self.qsfp1_4_sink = XgmiiSink(dut.qsfp1_txd_4, dut.qsfp1_txc_4, dut.qsfp1_tx_clk_4, dut.qsfp1_tx_rst_4)
dut.sw.setimmediatevalue(0)
dut.i2c_scl_i.setimmediatevalue(1)
dut.i2c_sda_i.setimmediatevalue(1)
dut.qsfp0_rx_error_count_1.setimmediatevalue(0)
dut.qsfp0_rx_error_count_2.setimmediatevalue(0)
dut.qsfp0_rx_error_count_3.setimmediatevalue(0)
dut.qsfp0_rx_error_count_4.setimmediatevalue(0)
dut.qsfp1_rx_error_count_1.setimmediatevalue(0)
dut.qsfp1_rx_error_count_2.setimmediatevalue(0)
dut.qsfp1_rx_error_count_3.setimmediatevalue(0)
dut.qsfp1_rx_error_count_4.setimmediatevalue(0)
dut.qsfp0_modprsl.setimmediatevalue(0)
dut.qsfp0_intl.setimmediatevalue(1)
dut.qsfp1_modprsl.setimmediatevalue(0)
dut.qsfp1_intl.setimmediatevalue(1)
dut.qspi_dq_i.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.qsfp0_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_4.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp0_rx_rst_1.setimmediatevalue(1)
self.dut.qsfp0_tx_rst_1.setimmediatevalue(1)
self.dut.qsfp0_rx_rst_2.setimmediatevalue(1)
self.dut.qsfp0_tx_rst_2.setimmediatevalue(1)
self.dut.qsfp0_rx_rst_3.setimmediatevalue(1)
self.dut.qsfp0_tx_rst_3.setimmediatevalue(1)
self.dut.qsfp0_rx_rst_4.setimmediatevalue(1)
self.dut.qsfp0_tx_rst_4.setimmediatevalue(1)
self.dut.qsfp1_rx_rst_1.setimmediatevalue(1)
self.dut.qsfp1_tx_rst_1.setimmediatevalue(1)
self.dut.qsfp1_rx_rst_2.setimmediatevalue(1)
self.dut.qsfp1_tx_rst_2.setimmediatevalue(1)
self.dut.qsfp1_rx_rst_3.setimmediatevalue(1)
self.dut.qsfp1_tx_rst_3.setimmediatevalue(1)
self.dut.qsfp1_rx_rst_4.setimmediatevalue(1)
self.dut.qsfp1_tx_rst_4.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp0_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp0_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp0_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp1_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp1_tx_rst_4.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.qsfp0_1_sink.empty():
self.qsfp0_1_source.send(self.qsfp0_1_sink.recv())
if not self.qsfp0_2_sink.empty():
self.qsfp0_2_source.send(self.qsfp0_2_sink.recv())
if not self.qsfp0_3_sink.empty():
self.qsfp0_3_source.send(self.qsfp0_3_sink.recv())
if not self.qsfp0_4_sink.empty():
self.qsfp0_4_source.send(self.qsfp0_4_sink.recv())
if not self.qsfp1_1_sink.empty():
self.qsfp1_1_source.send(self.qsfp1_1_sink.recv())
if not self.qsfp1_2_sink.empty():
self.qsfp1_2_source.send(self.qsfp1_2_sink.recv())
if not self.qsfp1_3_sink.empty():
self.qsfp1_3_source.send(self.qsfp1_3_sink.recv())
if not self.qsfp1_4_sink.empty():
self.qsfp1_4_source.send(self.qsfp1_4_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
await tb.qsfp0_1_sink.wait()
pkt = tb.qsfp0_1_sink.recv()
tb.log.info("Packet: %s", pkt)
tb.qsfp0_1_source.send(pkt)
await tb.driver.interfaces[0].wait()
pkt = tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# await tb.qsfp1_1_sink.wait()
# pkt = tb.qsfp1_1_sink.recv()
# tb.log.info("Packet: %s", pkt)
# tb.qsfp1_1_source.send(pkt)
# await tb.driver.interfaces[1].wait()
# pkt = tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
await tb.qsfp0_1_sink.wait()
pkt = tb.qsfp0_1_sink.recv()
tb.log.info("Packet: %s", pkt)
tb.qsfp0_1_source.send(pkt)
await tb.driver.interfaces[0].wait()
pkt = tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
await tb.driver.interfaces[0].wait()
pkt = tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
await tb.driver.interfaces[0].wait()
pkt = tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pcie_tag_manager.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['BAR0_APERTURE'] = 24
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir,
"sim_build_"+request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"[email protected]"
] | |
9d83a56dc0bb592784356093f88c7ba0707cd132 | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/CodeChef/NW1.py | 20f6b9480cad21ebfdc60ecc02a793fd70598c8b | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | days = [ "mon", "tues", "wed", "thurs", "fri", "sat", "sun"]
t = int(input())
for i in range(t):
n, d = input().split()
nw = [4, 4, 4, 4, 4, 4, 4]
for i in range(int(n)-28):
nw[(days.index(d)+i)%7] += 1
print(*nw)
| [
"[email protected]"
] | |
9b579abd1e2cbdcc1cbbbab8a3fc0bd6f4128332 | c7979f4f6435fe8d0d07fff7a430da55e3592aed | /ABC037/C.py | 6f2c9cd58755ef8a8c60865d3ae965e11ffd2c30 | [] | no_license | banboooo044/AtCoder | cee87d40bb98abafde19017f4f4e2f984544b9f8 | 7541d521cf0da848ecb5eb10ffea7d75a44cbbb6 | refs/heads/master | 2020-04-14T11:35:24.977457 | 2019-09-17T03:20:27 | 2019-09-17T03:20:27 | 163,818,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | N,K = map(int,input().split(" "))
a = list(map(int,input().split(" ")))
val = sum(a[:K])
sumv = val
for i in range(N-K):
val += (sumv - a[i] + a[K+i])
sumv = (sumv - a[i] + a[K+i])
print(val) | [
"[email protected]"
] | |
f8944242142d963cb8c49dc5938d45255961c0bd | 7d6a8a62c117bbf15da9fa4240ce60cd98efb935 | /venv/lib/python3.7/hashlib.py | 82f21adb7d37206e8735570b4cdc701d425120da | [] | no_license | Sem31/creating_API | 040281c14a510072c2194e76864f84afa0a4dfb9 | d468b1d97d0bb272087beea06dc21dda02f7efc1 | refs/heads/master | 2022-10-14T13:33:39.548294 | 2019-09-24T06:20:26 | 2019-09-24T06:20:26 | 210,194,810 | 0 | 0 | null | 2022-09-16T18:10:40 | 2019-09-22T18:27:03 | Python | UTF-8 | Python | false | false | 44 | py | /home/sem/anaconda3/lib/python3.7/hashlib.py | [
"[email protected]"
] | |
660569dd91e5570134ba2800ce12ab9c558fef2d | 1d47d6865522051a248b14211ef630933097480f | /snaked/core/prefs.py | 93f2f5816c9222de820ccd7cd5f50692b706bb88 | [
"MIT"
] | permissive | basaundi/snaked | e85584899ec8c3a488abe152a50e80e6ad156008 | 52a9146eae4a91d3776311d9c632682d55c307b2 | refs/heads/master | 2021-01-18T09:36:38.852448 | 2012-05-23T18:20:07 | 2012-05-23T18:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,092 | py | import anydbm, whichdb
import os.path
import json
from itertools import chain
from inspect import cleandoc
import gtk, pango
from uxie.utils import make_missing_dirs, join_to_settings_dir
def init(injector):
injector.bind('window', 'editor-prefs', 'Prefs/_Editor settings#1', show_editor_preferences)
injector.bind('window', 'default-config', 'Prefs/Global/_Config', show_default_config)
injector.bind('window', 'default-contexts', 'Prefs/Global/Conte_xts',
show_contexts_config, 'default')
injector.bind('window', 'session-config', 'Prefs/Session/_Config', show_session_config)
injector.bind('window', 'session-contexts', 'Prefs/Session/Conte_xts',
show_contexts_config, 'session')
injector.bind('window', 'project-contexts', 'Prefs/_Project/Conte_xts',
show_contexts_config, 'project')
injector.bind_menu('Prefs').to('<ctrl>p')
def show_editor_preferences(window):
from snaked.core.gui.editor_prefs import PreferencesDialog
dialog = PreferencesDialog(window.manager.lang_prefs)
dialog.show(window)
def show_default_config(window):
window.manager.default_config.save()
uri = join_to_settings_dir('snaked', 'snaked.conf')
e = window.manager.open(uri, contexts='python')
window.attach_editor(e)
e.connect('file-saved', on_config_saved, window.manager.default_config, uri)
def show_session_config(window):
window.manager.session_config.save()
uri = join_to_settings_dir('snaked', window.manager.session, 'config')
e = window.manager.open(uri, contexts='python')
window.attach_editor(e)
e.connect('file-saved', on_config_saved, window.manager.session_config, uri)
def on_config_saved(editor, config, config_path):
editor.message('Config updated', 'done')
config.load(config_path)
def show_contexts_config(window, config_type):
import shutil
from os.path import join, exists, dirname
from uxie.utils import make_missing_dirs
manager = window.manager
if config_type == 'default':
processor = manager.default_ctx_processor
elif config_type == 'session':
processor = manager.session_ctx_processor
elif config_type == 'project':
editor = window.get_editor_context()
if not editor:
window.message('Hmm. Project?', 'warn')
return
root = editor.project_root
if not root:
editor.message('Current project root is not defined', 'warn')
return
processor = manager.get_context_manager(root).project_processor
else:
raise Exception('Unknown context config type: ' + str(config_type))
uri = processor.filename
if not exists(uri):
make_missing_dirs(uri)
shutil.copy(join(dirname(__file__), 'contexts.template'), uri)
e = window.manager.open(uri)
window.attach_editor(e)
e.connect('file-saved', on_context_saved)
def on_context_saved(editor):
editor.message('Contexts updated', 'done')
for m in editor.window.manager.ctx_managers.values():
m.invalidate()
default_prefs = {
'default': {
'font': 'Monospace 11',
'use-tabs': True,
'tab-width': 4,
'show-right-margin': False,
'right-margin': 100,
'show-line-numbers': True,
'wrap-text': False,
'style': 'classic',
'auto-indent': True,
'indent-on-tab': True,
'smart-home-end': True,
'highlight-current-line': True,
'show-whitespace': False,
'line-spacing': 0,
'remove-trailing-space': False,
},
'python': {
'use-tabs': False,
'show-right-margin': True,
'remove-trailing-space': True,
},
'snippets': {
'use-tabs': True,
'remove-trailing-space': False,
},
'rst': {
'use-tabs': False,
'tab-width': 3,
'remove-trailing-space': False,
'right-margin': 80,
'show-right-margin': True,
}
}
def update_view_preferences(view, buf):
# Try to fix screen flickering
# No hope, should mail bug to upstream
#text_style = style_scheme.get_style('text')
#if text_style and editor.view.window:
# color = editor.view.get_colormap().alloc_color(text_style.props.background)
# editor.view.modify_bg(gtk.STATE_NORMAL, color)
pref = buf.config
font = pango.FontDescription(pref['font'])
view.modify_font(font)
view.set_auto_indent(pref['auto-indent'])
view.set_indent_on_tab(pref['indent-on-tab'])
view.set_insert_spaces_instead_of_tabs(not pref['use-tabs'])
view.set_smart_home_end(pref['smart-home-end'])
view.set_highlight_current_line(pref['highlight-current-line'])
view.set_show_line_numbers(pref['show-line-numbers'])
view.set_tab_width(pref['tab-width'])
view.set_draw_spaces(pref['show-whitespace'])
view.set_right_margin_position(pref['right-margin'])
view.set_show_right_margin(pref['show-right-margin'])
view.set_wrap_mode(gtk.WRAP_WORD if pref['wrap-text'] else gtk.WRAP_NONE)
view.set_pixels_above_lines(pref['line-spacing'])
def load_json_settings(name, default=None):
filename = get_settings_path(name)
try:
with open(filename) as f:
try:
return json.load(f)
except ValueError:
pass
except IOError:
pass
return default
def save_json_settings(name, value):
filename = get_settings_path(name)
with open(filename, 'w') as f:
json.dump(value, f, sort_keys=True, indent=4)
def get_settings_path(*name):
filename = join_to_settings_dir('snaked', *name)
make_missing_dirs(filename)
return filename
options = {}
def add_option(name, default, desc=''):
options[name] = (default, desc)
internal_options = {}
def add_internal_option(name, default, desc=''):
internal_options[name] = (default, desc)
def add_editor_preferences(on_dialog_created, on_pref_refresh, default_values):
import snaked.core.gui.editor_prefs
for k, v in default_values.iteritems():
default_prefs.setdefault(k, {}).update(v)
snaked.core.gui.editor_prefs.on_dialog_created_hooks.append(on_dialog_created)
snaked.core.gui.editor_prefs.on_pref_refresh_hooks.append(on_pref_refresh)
class CompositePreferences(object):
def __init__(self, *prefs):
self.prefs = list(prefs)
def __getitem__(self, key):
for p in self.prefs:
try:
return p[key]
except KeyError:
pass
raise KeyError('There is no %s in preferences' % key)
def __setitem__(self, key, value):
for p in self.prefs:
if key in p:
p[key] = value
return
raise KeyError('There is no %s in preferences' % key)
def __contains__(self, key):
raise NotImplementedError()
class KVSettings(object):
def __init__(self, *name):
filename = get_settings_path(*name)
# Dirty. Try to avoid locking of gdbm files
result = whichdb.whichdb(filename)
if result is None:
result = anydbm._defaultmod.__name__
elif result == "":
raise Exception("db type of %s could not be determined" % filename)
if result == 'gdbm':
flags = 'cu'
else:
flags = 'c'
self.db = anydbm.open(filename, flags)
def get_key(self, key):
if isinstance(key, unicode):
return key.encode('utf-8')
else:
return key
def __getitem__(self, key):
return self.db[self.get_key(key)]
def __contains__(self, key):
return self.db.has_key(self.get_key(key))
def __setitem__(self, key, value):
self.db[self.get_key(key)] = value
def save(self):
self.db.sync()
class ListSettings(object):
def __init__(self, name):
self.path = get_settings_path(name)
def exists(self):
return os.path.exists(self.path)
def load(self, default):
try:
return [l.strip() for l in open(self.path)]
except IOError:
return default
def store(self, data):
open(self.path, 'w').write('\n'.join(data))
class DefaultValue(object):
def __init__(self, conf, name, additional=None):
self.conf = conf
self.name = name
self.additional = additional
@property
def value(self):
try:
return self._value
except AttributeError:
pass
default_value = self.conf[self.name]
if isinstance(default_value, dict):
value = DefaultDictValue(default_value, self.additional)
elif isinstance(default_value, list):
value = DefaultListValue(default_value, self.additional)
else:
raise Exception('Unsupported default type: ' + str(type(default_value)))
self._value = value
return value
def __iter__(self):
return self.value.__iter__()
def __add__(self, x):
return DefaultValue(self.conf, self.name, x)
def __getitem__(self, name):
return self.value[name]
def __contains__(self, name):
return name in self.value
def __setitem__(self, name, value):
self.value[name] = value
def __repr__(self):
if self.additional is None:
return "default['%s']" % self.name
else:
return "default['%s'] + %s" % (self.name, repr(self.additional))
class DefaultListValue(object):
def __init__(self, default, x):
self.default = default + x
def __iter__(self):
return iter(self.default)
class DefaultDictValue(object):
def __init__(self, default, x):
self.default = default.copy()
self.default.update(x)
self.additional = x
def __getitem__(self, name):
return self.default[name]
def __contains__(self, name):
return name in self.default
def __setitem__(self, name, value):
self.additional[name] = value
self.default[name] = value
def __iter__(self):
return iter(self.default)
class DefaultValuesProvider(object):
def __init__(self, conf):
self.conf = conf
def __getitem__(self, name):
return DefaultValue(self.conf, name)
class PySettings(object):
def __init__(self, options=None, parent=None):
assert options or parent
if parent:
self.parent = parent
self.options = parent.options
else:
self.options = options
self.parent = None
self.data = {}
def __getitem__(self, name):
try:
return self.data[name]
except KeyError:
pass
if self.parent:
v = self.parent[name]
if isinstance(v, list):
v = v[:]
elif isinstance(v, dict):
v = v.copy()
else:
v = self.get_default(name)
self.data[name] = v
return v
def __contains__(self, name):
return name in self.options
def get_default(self, name):
value = self.options[name][0]
if callable(value):
value = value()
return value
def __setitem__(self, name, value):
self.data[name] = value
def get_config(self):
result = ''
for name in sorted(set(chain(self.data, self.options))):
doc = cleandoc(self.options.get(name, (0, 'Unknown option'))[1])
if doc:
for l in doc.splitlines():
result += '# ' + l + '\n'
if name not in self.options:
value = self.data[name]
is_default = False
elif name not in self.data:
is_default = True
if self.parent:
value = self.parent[name]
else:
value = self.get_default(name)
else:
value = self.data[name]
if (self.parent and value == self.parent[name]) or (
not self.parent and value == self.get_default(name)):
is_default = True
else:
is_default = False
value = '%s = %s' % (name, repr(value))
if is_default:
value = '# ' + value
result += value + '\n\n'
return result
def load(self, filename):
self.filename = filename
self.data.clear()
if self.parent:
self.data['default'] = DefaultValuesProvider(self.parent)
try:
execfile(self.filename, self.data)
except IOError:
pass
except SyntaxError, e:
print 'Error on loading config: %s' % self.filename, e
try:
del self.data['__builtins__']
except KeyError:
pass
if self.parent:
del self.data['default']
def save(self):
with open(self.filename, 'w') as f:
f.write(self.get_config())
| [
"[email protected]"
] | |
9b4650366e2834047484b64cd106dbf11d26d0a2 | 4d42b57a4ab24b301c4503002ed1038ec12030ba | /satsearch/main.py | be03dccf9316dce990838bb91aa64ce34c60fa9d | [
"MIT"
] | permissive | cgore/sat-search | 53435155aee3f0f0dc687387aac68c1c01b48432 | 230af9b57ad06c1754de6ce97f6ae6893791d8b7 | refs/heads/master | 2020-03-17T15:38:15.935161 | 2018-03-19T03:24:52 | 2018-03-19T03:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | import os
import sys
import json
import logging
from .version import __version__
from satsearch import Search, Scenes
from satsearch.parser import SatUtilsParser
def main(review=False, printsearch=False, printmd=None, printcal=False,
load=None, save=None, append=False, download=None, **kwargs):
""" Main function for performing a search """
if load is None:
if printsearch:
txt = 'Search for scenes matching criteria:\n'
for kw in kwargs:
if kw == 'intersects':
geom = json.dumps(json.loads(kwargs[kw])['geometry'])
txt += ('{:>20}: {:<40} ...\n'.format(kw, geom[0:70]))
else:
txt += ('{:>20}: {:<40}\n'.format(kw, kwargs[kw]))
print(txt)
# get scenes from search
search = Search(**kwargs)
scenes = Scenes(search.scenes(), metadata={'search': kwargs})
else:
search = None
scenes = Scenes.load(load)
if review:
if not os.getenv('IMGCAT', None):
raise ValueError('Set IMGCAT envvar to terminal image display program to use review feature')
scenes.review_thumbnails()
# print summary
if printmd is not None:
scenes.print_scenes(printmd)
# print calendar
if printcal:
print(scenes.text_calendar())
# save all metadata in JSON file
if save is not None:
scenes.save(filename=save, append=append)
print('%s scenes found' % len(scenes))
# download files given keys
if download is not None:
for key in download:
scenes.download(key=key)
return scenes
def cli():
parser = SatUtilsParser(description='sat-search (v%s)' % __version__)
args = parser.parse_args(sys.argv[1:])
# read the GeoJSON file
if 'intersects' in args:
if os.path.exists(args['intersects']):
with open(args['intersects']) as f:
args['intersects'] = json.dumps(json.loads(f.read()))
# enable logging
logging.basicConfig(stream=sys.stdout, level=args.pop('verbosity') * 10)
scenes = main(**args)
return len(scenes)
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
263c396672343716ef9ec10e457d8d309663d35f | 20bb1ae805cd796a7c377e55966633441d1d9fd5 | /URI Online Judge/Beginner/1006 Average 2/avg.py | 6de3475fc4d7cf70be9a81814caf67fa331224bd | [] | no_license | nathantheinventor/solved-problems | 1791c9588aefe2ebdc9293eb3d58317346d88e83 | c738e203fa77ae931b0ec613e5a00f9a8f7ff845 | refs/heads/master | 2022-10-27T08:58:23.860159 | 2022-10-13T20:18:43 | 2022-10-13T20:18:43 | 122,110,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | a, b, c = float(input()), float(input()), float(input())
ans = (2 * a + 3 * b + 5 * c) / 10
print("MEDIA = {:.1f}".format(ans)) | [
"[email protected]"
] | |
d29e7eb7a15f9925854b04676ff98319ee3708a9 | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py | be31af99d6dfd8c2920ee6fcae86f65a6003c213 | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 61,571 | py | # coding=utf-8
# Copyright 2021 Microsoft Research The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LayoutLMv2 model."""
import math
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, torch_int_div
from ...utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_detectron2_available,
logging,
replace_return_docstrings,
requires_backends,
)
from .configuration_layoutlmv2 import LayoutLMv2Config
# soft dependency
if is_detectron2_available():
import detectron2
from detectron2.modeling import META_ARCH_REGISTRY
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "microsoft/layoutlmv2-base-uncased"
_CONFIG_FOR_DOC = "LayoutLMv2Config"
_TOKENIZER_FOR_DOC = "LayoutLMv2Tokenizer"
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/layoutlmv2-base-uncased",
"microsoft/layoutlmv2-large-uncased",
# See all LayoutLMv2 models at https://huggingface.co/models?filter=layoutlmv2
]
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _calc_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(
attention_mask.to(torch.bool), torch.finfo(attention_scores.dtype).min
)
attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->LayoutLMv2
class LayoutLMv2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
class LayoutLMv2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small
absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions
>=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should
allow for more graceful generalization to longer sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.gradient_checkpointing = False
def _calculate_1d_position_embeddings(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = nn.functional.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _calculate_2d_position_embeddings(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = nn.functional.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = nn.functional.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
rel_pos = (
self._calculate_1d_position_embeddings(hidden_states, position_ids)
if self.has_relative_attention_bias
else None
)
rel_2d_pos = (
self._calculate_2d_position_embeddings(hidden_states, bbox) if self.has_spatial_attention_bias else None
)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LayoutLMv2Encoder):
module.gradient_checkpointing = value
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class LayoutLMv2VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = config.get_detectron2_config()
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
)
self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
self.out_feature_key = "p2"
if torch.are_deterministic_algorithms_enabled():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
def synchronize_batch_norm(self):
if not (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
raise RuntimeError("Make sure torch.distributed is set up properly.")
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
if not (world_size & node_size == 0):
raise RuntimeError("Make sure the number of processes can be divided by the number of nodes")
node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
LAYOUTLMV2_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LAYOUTLMV2_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LayoutLMv2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class LayoutLMv2Pooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@add_start_docstrings(
"The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top.",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
requires_backends(self, "detectron2")
super().__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = LayoutLMv2VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if inputs_embeds is None:
inputs_embeds = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape):
visual_bbox_x = torch_int_div(
torch.arange(
0,
1000 * (image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[1],
)
visual_bbox_y = torch_int_div(
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[0],
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
return visual_bbox
def _get_input_shape(self, input_ids=None, inputs_embeds=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
return input_ids.size()
elif inputs_embeds is not None:
return inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Return:
Examples:
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2Model, set_seed
>>> from PIL import Image
>>> import torch
>>> from datasets import load_dataset
>>> set_seed(88)
>>> processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
>>> image_path = dataset["test"][0]["file"]
>>> image = Image.open(image_path).convert("RGB")
>>> encoding = processor(image, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
>>> last_hidden_states.shape
torch.Size([1, 342, 768])
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = self._get_input_shape(input_ids, inputs_embeds)
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
# needs a new copy of input_shape for tracing. Otherwise wrong dimensions will occur
final_shape = list(self._get_input_shape(input_ids, inputs_embeds))
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand(input_shape)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
embeddings, e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Example:
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2ForSequenceClassification, set_seed
>>> from PIL import Image
>>> import torch
>>> from datasets import load_dataset
>>> set_seed(88)
>>> dataset = load_dataset("rvl_cdip", split="train", streaming=True)
>>> data = next(iter(dataset))
>>> image = data["image"].convert("RGB")
>>> processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2ForSequenceClassification.from_pretrained(
... "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes
... )
>>> encoding = processor(image, return_tensors="pt")
>>> sequence_label = torch.tensor([data["label"]])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss, logits = outputs.loss, outputs.logits
>>> predicted_idx = logits.argmax(dim=-1).item()
>>> predicted_answer = dataset.info.features["label"].names[4]
>>> predicted_idx, predicted_answer
(4, 'advertisement')
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self.layoutlmv2._calc_visual_bbox(
self.config.image_feature_pool_shape, bbox, device, final_shape
)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
initial_image_embeddings = self.layoutlmv2._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
cls_final_output = sequence_output[:, 0, :]
# average-pool the visual embeddings
pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1)
pooled_final_image_embeddings = final_image_embeddings.mean(dim=1)
# concatenate with cls_final_output
sequence_output = torch.cat(
[cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1
)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden
states) e.g. for sequence labeling (information extraction) tasks such as
[FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13),
[CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda).
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Returns:
Example:
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification, set_seed
>>> from PIL import Image
>>> from datasets import load_dataset
>>> set_seed(88)
>>> datasets = load_dataset("nielsr/funsd", split="test")
>>> labels = datasets.features["ner_tags"].feature.names
>>> id2label = {v: k for v, k in enumerate(labels)}
>>> processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
>>> model = LayoutLMv2ForTokenClassification.from_pretrained(
... "microsoft/layoutlmv2-base-uncased", num_labels=len(labels)
... )
>>> data = datasets[0]
>>> image = Image.open(data["image_path"]).convert("RGB")
>>> words = data["words"]
>>> boxes = data["bboxes"] # make sure to normalize your bounding boxes
>>> word_labels = data["ner_tags"]
>>> encoding = processor(
... image,
... words,
... boxes=boxes,
... word_labels=word_labels,
... padding="max_length",
... truncation=True,
... return_tensors="pt",
... )
>>> outputs = model(**encoding)
>>> logits, loss = outputs.logits, outputs.loss
>>> predicted_token_class_ids = logits.argmax(-1)
>>> predicted_tokens_classes = [id2label[t.item()] for t in predicted_token_class_ids[0]]
>>> predicted_tokens_classes[:5]
['B-ANSWER', 'B-HEADER', 'B-HEADER', 'B-HEADER', 'B-HEADER']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as
[DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
compute `span start logits` and `span end logits`).
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel):
def __init__(self, config, has_visual_segment_embedding=True):
super().__init__(config)
self.num_labels = config.num_labels
config.has_visual_segment_embedding = has_visual_segment_embedding
self.layoutlmv2 = LayoutLMv2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Returns:
Example:
In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us
a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image).
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2ForQuestionAnswering, set_seed
>>> import torch
>>> from PIL import Image
>>> from datasets import load_dataset
>>> set_seed(88)
>>> processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
>>> image_path = dataset["test"][0]["file"]
>>> image = Image.open(image_path).convert("RGB")
>>> question = "When is coffee break?"
>>> encoding = processor(image, question, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_start_idx = outputs.start_logits.argmax(-1).item()
>>> predicted_end_idx = outputs.end_logits.argmax(-1).item()
>>> predicted_start_idx, predicted_end_idx
(154, 287)
>>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1]
>>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens)
>>> predicted_answer # results are not very good without further fine-tuning
'council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public ...
```
```python
>>> target_start_index = torch.tensor([7])
>>> target_end_index = torch.tensor([14])
>>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index)
>>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item()
>>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item()
>>> predicted_answer_span_start, predicted_answer_span_end
(154, 287)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"[email protected]"
] | |
e84bacade8b32fd4e66d9b8f06b6668ab4d79cb4 | 13e91d812e7e0133f45273945ccca5523b1eefe5 | /task 3/spacex/migrations/0001_initial.py | fb8a9ca3ac5611143e838d992ff22a71d3619a63 | [] | no_license | Harshvartak/Unicode | 30d7298253f1feba4c47b89bdb8403e88b1707a1 | 2903d445fa5435b835f1543b8a67fb417749e1c3 | refs/heads/master | 2020-07-10T15:29:48.115326 | 2020-01-20T18:34:42 | 2020-01-20T18:34:42 | 204,299,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # Generated by Django 2.2.3 on 2019-08-22 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='spacex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('flight_number', models.IntegerField(verbose_name='flight_number')),
('launch_date', models.DateTimeField(verbose_name='launch_date')),
('rocket_name', models.CharField(max_length=150)),
('mission_patch', models.URLField()),
],
),
]
| [
"[email protected]"
] | |
d5c03328da088452bf7533b3175f69967f01d4a5 | 73297a722d6bfc1d495786f28e8e5f1593c3f68b | /shiyanlou_cs596-1805f3c438/mymodule/__init__.py | c04005530ec0789e04917f78b757e5df1619d95b | [
"Apache-2.0"
] | permissive | tongxindao/shiyanlou | 669d2d5641ccbf871d9190ff45b096d3cfd2990c | 1d002ea342deb69066c287db9935f77f49f0a09e | refs/heads/master | 2022-12-13T02:15:27.997568 | 2018-11-06T17:05:08 | 2018-11-06T17:05:08 | 96,234,067 | 0 | 0 | Apache-2.0 | 2022-12-08T00:46:05 | 2017-07-04T15:54:47 | HTML | UTF-8 | Python | false | false | 60 | py | from mymodule.bars import simplebar
__all__ = [simplebar, ]
| [
"[email protected]"
] | |
939cb1a21804f194fc568a0cb554bb20519a3adf | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/extensions/test_check_mccabe.py | 9d995891fd7893e6a664334889981216ab537d0c | [
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] | permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 2,593 | py | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Tests for the pylint checker in :mod:`pylint.extensions.check_mccabe
"""
import os.path as osp
import unittest
from pylint import checkers
from pylint.extensions.mccabe import register
from pylint.lint import PyLinter
from pylint.reporters import BaseReporter
class TestReporter(BaseReporter):
def handle_message(self, msg):
self.messages.append(msg)
def on_set_current_module(self, module, filepath):
self.messages = []
class TestMcCabeMethodChecker(unittest.TestCase):
"""Test McCabe Method Checker"""
expected_msgs = [
"'f1' is too complex. The McCabe rating is 1",
"'f2' is too complex. The McCabe rating is 1",
"'f3' is too complex. The McCabe rating is 3",
"'f4' is too complex. The McCabe rating is 2",
"'f5' is too complex. The McCabe rating is 2",
"'f6' is too complex. The McCabe rating is 2",
"'f7' is too complex. The McCabe rating is 3",
"'f8' is too complex. The McCabe rating is 4",
"'f9' is too complex. The McCabe rating is 9",
"'method1' is too complex. The McCabe rating is 1",
"This 'for' is too complex. The McCabe rating is 4",
"'method3' is too complex. The McCabe rating is 2",
"'f10' is too complex. The McCabe rating is 11",
"'method2' is too complex. The McCabe rating is 18",
]
@classmethod
def setUpClass(cls):
cls._linter = PyLinter()
cls._linter.set_reporter(TestReporter())
checkers.initialize(cls._linter)
register(cls._linter)
cls._linter.disable('all')
cls._linter.enable('too-complex')
def setUp(self):
self.fname_mccabe_example = osp.join(
osp.dirname(osp.abspath(__file__)), 'data', 'mccabe.py')
def test_too_complex_message(self):
self._linter.global_set_option('max-complexity', 0)
self._linter.check([self.fname_mccabe_example])
real_msgs = [message.msg for message in self._linter.reporter.messages]
self.assertEqual(sorted(self.expected_msgs), sorted(real_msgs))
def test_max_mccabe_rate(self):
self._linter.global_set_option('max-complexity', 9)
self._linter.check([self.fname_mccabe_example])
real_msgs = [message.msg for message in self._linter.reporter.messages]
self.assertEqual(sorted(self.expected_msgs[-2:]), sorted(real_msgs))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
fc0535302e88c9a984876df5e89680510528d42a | 99799383b4e618061fe9261aa70cfe420e02a5aa | /gift/migrations/0001_initial.py | 90133582de0ce017c9343a8fc104570f32f46924 | [
"MIT"
] | permissive | openkamer/openkamer | f311a97d5c9e182eabd6602f42475e8e049912b0 | bb99963c00ad90299deccd44d977c27aee7eb16c | refs/heads/master | 2023-07-20T10:45:11.402427 | 2023-07-18T17:41:56 | 2023-07-18T17:41:56 | 57,322,204 | 62 | 7 | MIT | 2023-07-17T18:15:43 | 2016-04-28T17:43:23 | Python | UTF-8 | Python | false | false | 920 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-08 11:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('person', '0005_person_twitter_username'),
]
operations = [
migrations.CreateModel(
name='Gift',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, default='', max_length=1000)),
('date', models.DateField(blank=True, null=True)),
('value_euro', models.FloatField(blank=True, null=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='person.Person')),
],
),
]
| [
"[email protected]"
] | |
0ebde3b4ef5910eecb4d8d73ce4bd1ddff79edcc | 1d3ccfb4330475f12ecd1e6f1396bfa064a7019c | /output/drivers/pygame_emulator_factory.py | 93df4c306e8fbe432c0b52c18f57d48589f2a049 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | ZeroPhone/ZPUI | c4efaa730315c8c220e7cc76ed8ab9ee7251020c | 430a4b6e1e869cbd68fd89bbf97261710fd7db6b | refs/heads/master | 2021-06-06T05:07:29.859464 | 2018-12-23T08:18:39 | 2018-12-23T08:24:43 | 81,014,670 | 56 | 27 | NOASSERTION | 2019-03-22T19:05:00 | 2017-02-05T19:01:36 | Python | UTF-8 | Python | false | false | 852 | py | """
factory for pygame emulator device
sets minimum attributes,
creates device
returns it to caller
"""
import logging
import luma.emulator.device
# ignore PIL debug messages
logging.getLogger("PIL").setLevel(logging.ERROR)
def get_pygame_emulator_device(width=128, height=64):
"""
Creates and returns pygame emulator device.
Width and height must match the size of the splash screen
or an execption will be thrown during initializion.
"""
#these are the bare minimum attributes needed to construct the emulator
emulator_attributes = {}
emulator_attributes['display'] = 'pygame'
#width and height are in pixels
emulator_attributes['width'] = width
emulator_attributes['height'] = height
Device = getattr(luma.emulator.device, 'pygame')
device = Device(**emulator_attributes)
return device
| [
"[email protected]"
] | |
28e291884d43f0687260c85caa18a685f60752fc | 6426682dd4b4ee2a84b5bb6160ccdbd37016a0a9 | /setup.py | 0e00b2fc75b700a2fa3ec2fbb85829c3c9977d9c | [] | no_license | jidar/mush | e9645a830f31729ebaf0dbeede98cfa91dacc788 | 921e6094108a857683c65a86eb5557126dce90a8 | refs/heads/master | 2021-01-22T07:10:41.142290 | 2017-02-09T23:30:46 | 2017-02-09T23:30:46 | 37,157,916 | 1 | 1 | null | 2016-05-22T01:10:01 | 2015-06-09T20:55:50 | Python | UTF-8 | Python | false | false | 374 | py | from setuptools import setup, find_packages
# Normal setup stuff
setup(
name='mushtool',
description="multi-use-shell-helper...tool...ok, it's a backronymn :)",
version='1.0.0',
install_requires=['prettytable'],
packages=find_packages(),
zip_safe=False,
entry_points={
'console_scripts':
['mush = mush.cli:entry_point']},
)
| [
"[email protected]"
] | |
fa812feda839644d1aebcc30b544855cf7ade4d3 | 51885da54b320351bfea42c7dd629f41985454cd | /arc105/d.py | 8ceb3f17e61bbd4f457a29ee80337759fac4f0a7 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #
# arc105 d
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """3
1
10
2
1 2
21
476523737 103976339 266993 706803678 802362985 892644371 953855359 196462821 817301757 409460796 773943961 488763959 405483423 616934516 710762957 239829390 55474813 818352359 312280585 185800870 255245162"""
output = """Second
First
Second"""
self.assertIO(input, output)
def resolve():
TN = int(input())
TC = []
for i in range(TN):
N = int(input())
A = [N]
A += list(map(int, input().split()))
TC.append(A)
for tc in TC:
n, *T = tc
if __name__ == "__main__":
unittest.main()
# resolve()
| [
"[email protected]"
] | |
1fd3a4b362882d05a0c246a971eb7f028d9b02a1 | a4287764ce486a037df9acda33be98adf1df9d7e | /configs/centernet/centernet512_dla34.py | 5b45838471009de1d6bb28afcf3603253ca2117e | [] | no_license | YLyeliang/Autodetection | fab984c8b2425756c55e05c343335d4abe78e984 | b1d8e42adbed65ff73943b1bec41c4b67056bf61 | refs/heads/master | 2023-04-30T05:48:03.821507 | 2021-05-25T03:02:33 | 2021-05-25T03:02:33 | 284,577,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | _base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# training and testing settings
train_cfg = None
test_cfg = dict(
center_topk=100,
local_maximum_kernel=3,
max_per_img=100,
nms_cfg=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
total_epochs = 210
| [
"[email protected]"
] | |
c99c7f1d22a1d7921303978cedf80e6513a7ba7a | 0bab87d3d3bc6f790f6d924330acf7ae1c6ebc30 | /kunyi/data_structure/hash_table/find-pivot-index.py | 8c646b01e3e5e3daa6254f67d970019683e23a41 | [] | no_license | KunyiLiu/algorithm_problems | 2032b9488cd2f20b23b47c456107475f609b178f | b27a1d4d65429101ef027f5e1e91ba2afd13bd32 | refs/heads/master | 2020-04-26T15:29:43.875656 | 2019-10-21T19:09:01 | 2019-10-21T19:09:01 | 173,648,702 | 0 | 0 | null | 2019-07-21T19:15:23 | 2019-03-04T00:48:02 | Python | UTF-8 | Python | false | false | 1,093 | py | ##### subarray ######
class Solution:
"""
@param nums: an array
@return: the "pivot" index of this array
"""
def pivotIndex(self, nums):
# get the whole sum, hash table sub_sum
# for loop: sum - sub_sum[3] = 11 == sub_sum[3-1]
# O(n)
sub_sum = {}
whole_sum = sum(nums)
for i in range(len(nums)):
if i == 0:
sub_sum[i] = nums[i]
if whole_sum - sub_sum[i] == 0:
return i
else:
sub_sum[i] = sub_sum[i-1] + nums[i]
if whole_sum - sub_sum[i] == sub_sum[i-1]:
return i
return -1
###### partition to left and right ####
# 从左向右枚举中心索引
class Solution(object):
def pivotIndex(self, nums):
# Time: O(n)
# Space: O(1)
left, right = 0, sum(nums)
for index, num in enumerate(nums):
right -= num
if left == right:
return index
left += num
return -1
| [
"[email protected]"
] | |
c621b5137655d9d046d914dd195cfad427a2230e | 7dc495401ea92c4007e5ee6e19d05a0d2b75afab | /fae2/accounts/admin.py | b552485525776beed6112437ff6c7e24381601b9 | [
"Apache-2.0"
] | permissive | scasagrande/fae2 | ed3ff3bdf9b533cd23891fd78beed7f8ac8b3de1 | 78e2f883e39014c2addef28871cf9b53ad74f585 | refs/heads/master | 2021-01-14T10:16:44.603672 | 2016-03-12T00:57:09 | 2016-03-12T00:57:09 | 53,964,802 | 0 | 0 | null | 2016-03-15T17:29:17 | 2016-03-15T17:29:17 | null | UTF-8 | Python | false | false | 640 | py | """
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import admin
# Register your models here.
| [
"[email protected]"
] | |
eddec9a08cbe885274908af925283f8669a3b71b | 5789f30bc942dde4235668c56408575b0bd25599 | /scripts/Temoins_ABL1_check.py | eed407f82246d4ec357a4edbf830df9a8549da04 | [] | no_license | bioinfo-chu-bdx/ngs-somatic | bc9dfa60872a644f18650593d144726d0ab22767 | 8cc6411e16784f2891b92241a97c71788408ffb5 | refs/heads/master | 2023-04-25T19:48:52.073672 | 2021-03-19T14:21:49 | 2021-03-19T14:21:49 | 374,675,975 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,928 | py | #!/usr/bin/env python
import sys
import os
import openpyxl
import subprocess
def representsInt(s): # pour eviter avertissement "nombre ecrit en texte" sous excel
try:
s = int(s)
return s
except ValueError:
return s
def cell_format(cell, font=None, alignment=None, color=None, format=None, border=None,exterior_border=None):
if font == 'bold':
cell.font = openpyxl.styles.Font(name='Calibri', size=11, bold=True)
else:
cell.font = openpyxl.styles.Font(name='Calibri', size=11)
if alignment == 'center':
cell.alignment = openpyxl.styles.Alignment(horizontal='center',vertical='center',wrap_text=True)
elif alignment == 'left':
cell.alignment = openpyxl.styles.Alignment(horizontal='left',wrap_text=True)
if color == 'LightGreen':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='D8E4BC') # EBF1DE
elif color == 'LightRed':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='d28e8e') #F2DCDB
elif color == 'LightBlue':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='add8e6')
elif color == 'Yellow':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='feffa3')
elif color == 'Blue':
cell.font = openpyxl.styles.Font(name='Calibri', size=11, color='004c99')
elif color == 'DarkGrey':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='4d4f4e')
else:
cell.fill = openpyxl.styles.PatternFill(fill_type=None,start_color='FFFFFF')
if border:
cell.border = openpyxl.styles.Border(left=openpyxl.styles.Side(style='thin'),right=openpyxl.styles.Side(style='thin'), top=openpyxl.styles.Side(style='thin'),bottom=openpyxl.styles.Side(style='thin'))
if exterior_border:
cell.border = openpyxl.styles.Border(top=openpyxl.styles.Side(style='thin'),bottom=openpyxl.styles.Side(style='thin'))
if format == 'Percent':
cell.number_format = '0.0%'
###############################################################################
pipeline_folder = os.environ['NGS_PIPELINE_BX_DIR']
suivi_abl1_path = "/media/n06lbth/sauvegardes_pgm/LAM/EN_LAB_19_2333_Suivi_temoins_ABL1.xlsx"
temoin_abl1_finalreport_path = sys.argv[1]
sample = sys.argv[2]
run_name = sys.argv[3]
run_name = run_name.replace('Auto_user_S5-0198','S5')
###############################################################################
# i/o
fp = openpyxl.load_workbook(temoin_abl1_finalreport_path)
annotation_sheet = fp.get_sheet_by_name('Annotation')
annotation_rows = tuple(annotation_sheet.rows)
suivi_abl1 = openpyxl.load_workbook(suivi_abl1_path)
suivi_sheet = suivi_abl1.get_sheet_by_name('Temoins ABL1')
suivi_rows = tuple(suivi_sheet.rows)
img = openpyxl.drawing.image.Image('%s/scripts/ChuBordeaux_small.png' % pipeline_folder)
suivi_sheet.add_image(img,'A1')
column2write = len(suivi_rows[0])+1
# header 1
#suivi_sheet.cell(row=6,column=column2write).value = sample+'_'+run_name
suivi_sheet.cell(row=6,column=column2write).value = '%s\n\n%s' % (run_name,sample)
cell_format(suivi_sheet.cell(row=6,column=column2write),font='bold',alignment='center',border=True)
# header 2
suivi_sheet.cell(row=7,column=column2write).value = 'Var.freq'
cell_format(suivi_sheet.cell(row=7,column=column2write),border=True)
# variants lines
for i in range(len(annotation_rows[0])):
if annotation_rows[0][i].value == 'Transcript':
nm_index = i
if annotation_rows[0][i].value == 'c.':
c_index = i
if annotation_rows[0][i].value == 'c.(annovar)':
annovar_index = i
if annotation_rows[0][i].value == 'Var.Freq.' or annotation_rows[0][i].value == 'Freq':
freq_index = i
if annotation_rows[0][i].value == 'Var.Cov.':
var_cov_index = i
if annotation_rows[0][i].value == 'Pos.Cov.' or annotation_rows[0][i].value == 'Depth':
pos_cov_index = i
list_not_found = []
for i in range(7,len(suivi_rows)):
variant2check = (suivi_rows[i][1].value.split('.')[0],suivi_rows[i][5].value) # NM, c.
control2check = suivi_rows[i][7].value.replace(' ','')
if not control2check in sample.upper():
cell_format(suivi_sheet.cell(row=i+1,column=column2write),color='DarkGrey',border=True)
continue
for j in range(1,len(annotation_rows)):
if annotation_rows[j][nm_index].value:
variant = (annotation_rows[j][nm_index].value.split('.')[0],annotation_rows[j][c_index].value)
variant_annovar = (annotation_rows[j][nm_index].value.split('.')[0],annotation_rows[j][annovar_index].value)
variant_freq = '?'
if (variant2check == variant) or (variant2check == variant_annovar):
variant_freq = annotation_rows[j][freq_index].value
break
if variant_freq == '?': # not found!
cell_format(suivi_sheet.cell(row=i+1,column=column2write),font='bold',color='LightRed',border=True)
list_not_found.append(variant2check)
else:
suivi_sheet.cell(row=i+1,column=column2write).value = representsInt(variant_freq)
cell_format(suivi_sheet.cell(row=i+1,column=column2write),border=True)
suivi_abl1.save(suivi_abl1_path)
| [
"[email protected]"
] | |
686a8654b6bb525a32033706372872996ad70bdd | 71b2131be682e9184e68b5f42cdf2b15ef851c1f | /Python/htmlparse1.py | 62a4ae7d0f20d07e1072fbb943a4b9f343b360a0 | [
"MIT"
] | permissive | sockduct/Hackerrank | 23430fb5e7068e1689c502b2e803c7f630c17696 | 2a42f575d1f17181e6a9fa21bc4ca6aed187bd84 | refs/heads/master | 2023-08-08T22:33:58.202194 | 2023-07-28T00:48:43 | 2023-07-28T00:48:43 | 170,932,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | from html.parser import HTMLParser
example_html = '''
<html>
<head>
<title>HTML Parser - I</title>
</head>
<body data-modal-target class='1'>
<h1 class="header">HackerRank</h1>
<br id="main"/>
</body>
</html>
'''
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
'''
print(f' Found start tag: {tag}')
if attrs:
print(f' Found attributes: {attrs}')
'''
print(f'Start : {tag}')
for k, v in attrs:
print(f'-> {k} > {v}')
def handle_endtag(self, tag):
# print(f' Found end tag: {tag}')
print(f'End : {tag}')
# Empty tags:
def handle_startendtag(self, tag, attrs):
'''
print(f' Found an empty tag: {tag}')
if attrs:
print(f' Found attributes: {attrs}')
'''
print(f'Empty : {tag}')
for k, v in attrs:
print(f'-> {k} > {v}')
def main():
parser = MyHTMLParser()
lines = int(input())
for _ in range(lines):
parser.feed(input())
# Alternatively, collect all input and then parse:
# html += input()
# parser.feed(html)
#
# Need to explicitly close?
# parser.close()
# Example:
# parser.feed(example_html)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
98b4d08c20d9ad3f7e190f774a239d9751f01d68 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4187/codes/1595_1446.py | c56a025fa554401d7e7a6116d57edf197d81cbcb | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x = float(input("quantidade de litros"))
c = x*(1/3)
print(round(c, 3)) | [
"[email protected]"
] | |
a64c16d3ce4d5bb65727d8a6af67024e410df108 | ecaba173879f92f24e3c951866fda23c0a4fc426 | /perfkitbenchmarker/linux_packages/specjbb.py | b5a05208db413a147880facc09f87c8ae403818b | [
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | GoogleCloudPlatform/PerfKitBenchmarker | 2f4917fd796db4eb90822c557d8fa08a497fbd48 | d0699f32998898757b036704fba39e5471641f01 | refs/heads/master | 2023-09-02T08:14:54.110308 | 2023-09-01T20:28:01 | 2023-09-01T20:28:38 | 21,950,910 | 1,923 | 567 | Apache-2.0 | 2023-09-13T22:37:42 | 2014-07-17T17:23:26 | Python | UTF-8 | Python | false | false | 2,074 | py | # Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing installation functions for SPEC JBB 2015."""
from absl import flags
FLAGS = flags.FLAGS
_BENCHMARK_NAME = 'specjbb2015'
SPEC_JBB_2015_ISO = 'SPECjbb2015-1_03.iso'
SPEC_DIR = 'spec'
_MOUNT_SPECJBB_ISO = flags.DEFINE_bool(
'mount_specjbb_iso', True, 'Whether specjbb mounts iso or not')
def Install(vm) -> None:
"""Prepares a SPEC client by copying SPEC to the VM."""
mount_dir = 'spec_mnt'
vm.RemoteCommand(f'mkdir -p {mount_dir} {SPEC_DIR}')
vm.InstallPreprovisionedBenchmarkData(_BENCHMARK_NAME, [SPEC_JBB_2015_ISO],
'~/')
if _MOUNT_SPECJBB_ISO.value:
vm.RemoteCommand(
f'sudo mount -t iso9660 -o loop {SPEC_JBB_2015_ISO} {mount_dir}')
vm.RemoteCommand(f'cp -r {mount_dir}/* {SPEC_DIR}')
vm.RemoteCommand(f'sudo umount {mount_dir} && sudo rm -rf {mount_dir}')
else:
vm.InstallPackages('p7zip-full')
vm.InstallPackages('p7zip-rar')
vm.RemoteCommand(
f'7z x -o{mount_dir} {SPEC_JBB_2015_ISO}')
vm.RemoteCommand(f'cp -r {mount_dir}/* {SPEC_DIR}')
vm.RemoteCommand(f'rm -rf {mount_dir}')
def Uninstall(vm) -> None:
"""Cleanup Specjbb on the target vm."""
if _MOUNT_SPECJBB_ISO.value:
vm.RemoteCommand(f'sudo umount {SPEC_DIR}', ignore_failure=True)
vm.RemoteCommand(
f'rm -rf {SPEC_DIR} {SPEC_JBB_2015_ISO}', ignore_failure=True)
def AptInstall(vm) -> None:
Install(vm)
def YumInstall(vm) -> None:
Install(vm)
| [
"[email protected]"
] | |
00a6aff14bc99fe9474e83e5666735233ae29dfb | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_buildings_03/cleanup-originaldate-column.py | a748a4d463968eae9fc2d7c2fbbd1be361d099a4 | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | from openpyxl import load_workbook
import re
filename = 'aalh_iit_buildings_03.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 503
iterationrow = 7
targetcol = 15
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
print(testvar)
cleandate = None
approx = 'approximately '
try:
if testvar == None:
ws.cell(row=iterationrow, column=targetcol).value = ''
elif testvar.startswith('c'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('C'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('a'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.endswith('?'):
cleandate = testvar[:-1]
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate
elif testvar.find('-') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
elif testvar.find(',') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
elif testvar.find('/') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
else :
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = cleandate[0]
print(ws.cell(row=iterationrow, column=targetcol).value)
except:
print('STATUS = PROBLEM')
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_03.xlsx') | [
"[email protected]"
] | |
ad856f2f30281ddb79e3bf362020b12dc87c6356 | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/urls_20190914174430.py | 36de7db40b005ead81a37d045a4710614dacf611 | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 1,307 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('clerk/', views.clerk, name='clerk'),
path('clerk/register2',views.register2, name='register2'),
path('clerk/register3',views.register3, name='register3'),
path('transactions/register2',views.register2, name='register2'),
path('transactions/register3',views.register3, name='register3'),
path('booking',views.booking, name='booking'),
path('clerk/checkout',views.checkout, name='checkout'),
path('clerk/checkin',views.checkin, name='checkin'),
path('transactions/', views.transactions, name='transactions'),
path('userstbl/', views.userstbl, name='userstbl'),
path('clerk/deleteMovie',views.deleteMovie, name='deleteMovie'),
path('transactions/deleteTransaction',views.deleteTransaction, name='deleteTransaction'),
path('userstbl/deleteUser',views.deleteUser, name='deleteUser'),
path('user_detail/', views.user_detail, name='user_detail'),
path('accounts/registerCustomer',views.registerCustomer, name='registerCustomer'),
path('user_detail/updateCustomer',views.updateCustomer, name='updateCustomer'),
path('user_detail/updateUser',views.updateUser, name='updateUser'),
]
| [
"[email protected]"
] | |
27cee738666ad4dcd220cece1afed26b7244f2e2 | 09cc8367edb92c2f02a0cc1c95a8290ff0f52646 | /ipypublish/latex/ipypublish/contents_output.py | b77efa54be79edce10f9c64aabd7bb180ec95a7f | [
"BSD-3-Clause"
] | permissive | annefou/ipypublish | 7e80153316ab572a348afe26d309c2a9ee0fb52b | 917c7f2e84be006605de1cf8851ec13d1a163b24 | refs/heads/master | 2020-04-13T16:08:59.845707 | 2018-07-30T18:26:12 | 2018-07-30T18:26:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,808 | py | tplx_dict = {
'meta_docstring': 'with the main ipypublish content',
'document_packages': r"""
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
\usepackage{breqn}
((*- endif *))
((*- endif *))
""",
'notebook_input': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- elif cell.metadata.ipub.slideonly: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_input_markdown': r"""
((( cell.source | citation2latex | strip_files_prefix | convert_pandoc('markdown', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex'))))
""",
'notebook_input_raw': r"""
((*- if cell.metadata.raw_mimetype: -*))
((*- if cell.metadata.raw_mimetype == "text/latex" -*))
((( super() )))
((*- endif *))
((*- endif *))
""",
'notebook_output': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- elif cell.metadata.ipub.slideonly: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_output_markdown': """
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.mkdown: -*))
((( output.data['text/markdown'] | citation2latex | strip_files_prefix | convert_pandoc('markdown', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex'))))
((*- endif *))
((*- endif *))
""",
'notebook_output_stream': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_output_latex': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.table and cell.metadata.ipub.equation -*))
((*- if output.data['text/latex'] | is_equation -*))
((( draw_equation(cell.metadata, output.data['text/latex']) )))
((*- else -*))
((( draw_table(cell, resources, output.data['text/latex']) )))
((*- endif *))
((*- else -*))
((*- if cell.metadata.ipub.table: -*))
((( draw_table(cell, resources, output.data['text/latex']) )))
((*- elif cell.metadata.ipub.equation: -*))
((( draw_equation(cell.metadata, output.data['text/latex']) )))
((*- endif *))
((*- endif *))
((*- endif *))
""",
# 'notebook_output_markdown':'',
'notebook_output_png': r"""
((( draw_figure(output.metadata.filenames['image/png'],
cell.metadata) )))
""",
'notebook_output_jpg': r"""
((( draw_figure(output.metadata.filenames['image/jpeg'],
cell.metadata) )))
""",
'notebook_output_svg': r"""
((( draw_figure(output.metadata.filenames['image/svg+xml'],
cell.metadata) )))
""",
'notebook_output_pdf': r"""
((( draw_figure(output.metadata.filenames['application/pdf'],
cell.metadata) )))
""",
'jinja_macros': r"""
((* macro draw_figure(filename, meta) -*))
((*- if meta.ipub: -*))
((*- if meta.ipub.figure: -*))
((* set filename = filename | posix_path *))
((*- block figure scoped -*))
((*- if meta.ipub.figure.placement: -*))
((*- if meta.ipub.figure.widefigure: -*))
\begin{figure*}[(((meta.ipub.figure.placement)))]
((*- else -*))
\begin{figure}[(((meta.ipub.figure.placement)))]
((*- endif *))
((*- else -*))
((*- if meta.ipub.figure.widefigure: -*))
\begin{figure*}
((*- else -*))
\begin{figure}
((*- endif *))
((*- endif *))
((*- if meta.ipub.figure.width: -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight},width=(((meta.ipub.figure.width)))\linewidth}{((( filename )))}\end{center}
((*- elif meta.ipub.figure.height: -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight},height=(((meta.ipub.figure.height)))\paperheight}{((( filename )))}\end{center}
((*- else -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{((( filename )))}\end{center}
((*- endif *))
((*- if resources.captions: -*))
((*- if resources.captions[meta.ipub.figure.label]: -*))
\caption{((( resources.captions[meta.ipub.figure.label] )))}
((*- else -*))
\caption{((( meta.ipub.figure.caption )))}
((*- endif *))
((*- elif meta.ipub.figure.caption: -*))
\caption{((( meta.ipub.figure.caption )))}
((*- endif *))
((*- if meta.ipub.figure.label: -*))
\label{((( meta.ipub.figure.label )))}
((*- endif *))
\end{figure}
((*- endblock figure -*))
((*- endif *))
((*- endif *))
((*- endmacro *))
((* macro draw_table(cell, resources, text) -*))
((*- block table scoped -*))
((*- if cell.metadata.ipub.table.placement: -*))
\begin{table}[(((cell.metadata.ipub.table.placement)))]
((*- else -*))
\begin{table}
((*- endif *))
((*- if resources.captions and cell.metadata.ipub.table.label -*))
((*- if resources.captions[cell.metadata.ipub.table.label]: -*))
\caption{((( resources.captions[cell.metadata.ipub.table.label] )))}
((*- elif cell.metadata.ipub.table.caption -*))
\caption{((( cell.metadata.ipub.table.caption )))}
((*- endif *))
((*- elif cell.metadata.ipub.table.caption -*))
\caption{((( cell.metadata.ipub.table.caption )))}
((*- endif *))
((*- if cell.metadata.ipub.table.label -*))
\label{((( cell.metadata.ipub.table.label )))}
((*- endif *))
\centering
\begin{adjustbox}{max width=\textwidth}
((*- if cell.metadata.ipub.table.alternate: -*))
\rowcolors{2}{(((cell.metadata.ipub.table.alternate)))}{white}
((*- endif *))
((( text )))
\end{adjustbox}
\end{table}
((*- endblock table -*))
((*- endmacro *))
((* macro draw_equation(meta, text) -*))
((*- block equation scoped -*))
((* set environment = "none" *))
((*- if meta.ipub.equation.environment: -*))
((*- if meta.ipub.equation.environment == "none" -*))
((* set environment = "none" *))
((*- elif meta.ipub.equation.environment == "equation" -*))
((* set environment = "equation" *))
((*- elif meta.ipub.equation.environment == "equation*" -*))
((* set environment = "equation*" *))
((*- elif meta.ipub.equation.environment == "align" -*))
((* set environment = "align" *))
((*- elif meta.ipub.equation.environment == "align*" -*))
((* set environment = "align*" *))
((*- elif meta.ipub.equation.environment == "multline" -*))
((* set environment = "multline" *))
((*- elif meta.ipub.equation.environment == "multline*" -*))
((* set environment = "multline*" *))
((*- elif meta.ipub.equation.environment == "breqn" -*))
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
((* set environment = "dmath" *))
((*- endif *))
((*- endif *))
((*- elif meta.ipub.equation.environment == "breqn*" -*))
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
((* set environment = "dmath*" *))
((*- endif *))
((*- endif *))
((*- elif meta.ipub.equation.environment == "gather" -*))
((* set environment = "gather" *))
((*- elif meta.ipub.equation.environment == "gather*" -*))
((* set environment = "gather*" *))
((*- endif *))
((*- endif *))
((* if environment == "none" *))
((( text )))
((*- else -*))
((*- if meta.ipub.equation.label and not "*" in environment -*))
\begin{(((environment)))}\label{((( meta.ipub.equation.label )))}
((*- else -*))
\begin{(((environment)))}
((*- endif *))
((( text | remove_dollars )))
\end{(((environment)))}
((*- endif *))
((*- endblock equation -*))
((*- endmacro *))
"""
}
| [
"[email protected]"
] | |
593a5277c49cb351e6a1a0693bfb2ffa039b7d97 | f063232b59eb7535e4212ec2b6b477c472fdb56e | /palindrome-partition.py | ebe2bd618596f01e488555e1e5c598ce1eba0483 | [] | no_license | xzjh/OJ_LeetCode | a01d43f6925bb8888bb79ca8a03a75dd8a6eac07 | fa2cfe2ec7774ab4a356520668d5dbee9d63077c | refs/heads/master | 2021-01-20T11:13:36.291125 | 2015-10-01T09:04:47 | 2015-10-01T09:04:47 | 25,239,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | class Solution:
# @param s, a string
# @return a list of lists of string
def partition(self, s):
def is_palindrome(s):
if len(s) < 2:
return True
l = 0
r = len(s) - 1
while r > l:
if s[r] != s[l]:
return False
r -= 1
l += 1
return True
def dfs(s, output, result):
if len(s) == 0:
result.append(output)
return
for i in range(len(s)):
if is_palindrome(s[:i + 1]):
new_output = list(output)
new_output.append(s[:i + 1])
dfs(s[i + 1:], new_output, result)
result = []
dfs(s, [], result)
return result
s = Solution()
print s.partition('aab') | [
"[email protected]"
] | |
87b5f7cbad951f7e894f38e1220685c8c084589d | cca3f6a0accb18760bb134558fea98bb87a74806 | /aising2020/C/main.py | ee81245bfc173a7e47f1ff52ccae2ee72c34ddb4 | [] | no_license | Hashizu/atcoder_work | 5ec48cc1147535f8b9d0f0455fd110536d9f27ea | cda1d9ac0fcd56697ee5db93d26602dd8ccee9df | refs/heads/master | 2023-07-15T02:22:31.995451 | 2021-09-03T12:10:57 | 2021-09-03T12:10:57 | 382,987,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/env python3
import sys
MAX = 10**2
def solve(N: int):
ans = [0]*N
for xi in range(1, MAX):
x2 = xi**2
for yi in range(1, MAX):
y2 = yi**2
for zi in range(1, MAX):
k = x2 + y2 + zi**2 + xi*yi + xi * zi + yi*zi
if k > N: break
else: ans[k-1] +=1
for ai in ans:
print(ai)
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
solve(N)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
30b5b0bebe33ffc13ec4780f11739d0ea5554e96 | a3c86385115ea1831974624ac0d293f97ea40e48 | /129/sum_path.py | dd7c64306a1809d57371d983221340595a15ddbc | [] | no_license | yaolizheng/leetcode | 7adba764d2d913eb7b07bdb62e74460dea755e66 | bb2a657fa4e2894b3dcb1e3cc57a17b53787d0f6 | refs/heads/master | 2021-07-08T22:21:31.991385 | 2019-01-25T18:52:59 | 2019-01-25T18:52:59 | 128,838,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from tree import TreeNode
def helper(root, val):
if not root:
return 0
val = val * 10 + root.value
if root.left is None and root.right is None:
return val
return helper(root.left, val) + helper(root.right, val)
def sum_path(root):
return helper(root, 0)
if __name__ == '__main__':
root = TreeNode(4)
root.left = TreeNode(9)
root.right = TreeNode(0)
root.left.left = TreeNode(5)
root.left.right = TreeNode(1)
print sum_path(root)
| [
"[email protected]"
] | |
8e05700d0271f8372d294336dbb969c824e222aa | 488e0934b8cd97e202ae05368c855a57b299bfd1 | /Python/oop_extra_prac.py | 550260dc1f74eeee4619947ee51884506e4c159f | [] | no_license | didemertens/udemy_webdev | 4d96a5e7abeec1848ecedb97f0c440cd50eb27ac | 306215571be8e4dcb939e79b18ff6b302b75c952 | refs/heads/master | 2020-04-25T00:24:45.654136 | 2019-04-13T16:00:47 | 2019-04-13T16:00:47 | 172,377,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | class Dog:
# Class attribute
species = 'mammal'
# Initializer / Instance attributes
def __init__(self, name, age):
self.name = name
self.age = age
self.is_hungry = True
# Instance method
def description(self):
return self.name, self.age
# Instance method
def speak(self, sound):
return "%s says %s" % (self.name, sound)
# Instance method
def eat(self):
self.is_hungry = False
def walk(self):
return f"{self.name} is walking!"
# Child class (inherits from Dog class)
class RussellTerrier(Dog):
def run(self, speed):
return "%s runs %s" % (self.name, speed)
# Child class (inherits from Dog class)
class Bulldog(Dog):
def run(self, speed):
return "%s runs %s" % (self.name, speed)
# sam = Dog("Sam",9)
# bobby = Dog("Bobby",2)
nora = Dog("Nora",4)
# def get_biggest_number(*args):
# return max(args)
# oldest = get_biggest_number(sam.age,bobby.age,nora.age)
# print(f"The oldest dog is {oldest} years old.")
class Pets(Dog):
animals = []
def __init__(self, animals):
self.animals = animals
def amount_pets(self):
return f"I have {len(self.animals)} pets."
def walkie(self):
for dog in self.animals:
print(dog.walk())
def list_animals(self):
return self.animals
my_dogs = [
Bulldog("Tom", 6),
RussellTerrier("Fletcher", 7),
Dog("Larry", 9)
]
my_pets = Pets(my_dogs)
# print(my_pets.amount_pets())
# for dog in my_dogs:
# print(f"{dog.name} is {dog.age} years old.")
# print(f"And they are all {dog.species}s of course.")
# for dog in my_dogs:
# dog.eat()
# dogs_are_hungry = False
# for dog in my_dogs:
# if dog.is_hungry:
# dogs_are_hungry = True
# if dogs_are_hungry == True:
# print("My dogs are hungry")
# else:
# print("My dogs are not hungry.")
my_pets.walkie()
| [
"[email protected]"
] | |
0880858575b45283344ed989aec4b157ea264f64 | 9a8fe99c7316dfce343be81d2c3c1a6c4f22572c | /set98.py | d9b34c9ec1fe53465331624d5c3eba8db807b6bf | [] | no_license | Srija-U/codekatabeginner | 5e4d540484529dbafada04d3eac96eab7f98a693 | 8d088e04de1d48d9befb975697e9121f06bb164a | refs/heads/master | 2020-04-30T00:58:51.445394 | 2019-07-01T15:43:05 | 2019-07-01T15:43:05 | 176,516,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | import math
l=[int(i) for i in input().split()]
print(int(((l[0]*l[1])/(math.gcd(l[0],l[1])))))
| [
"[email protected]"
] | |
32a7c8b65a4dc828d2d1f6a85ccb90bfb8478f72 | f6b1db8c0503a292f6a1da31800269e0bb5f39bd | /web_flask/3-python_route.py | 91e5ddd0433581ce73379129958444807f16a642 | [] | no_license | arleybri18/AirBnB_clone_v2 | 142883fde2629c7eb75dddc8e4375a9ca1714555 | 111cabf15cadba09f018b2fe359eec68495035dc | refs/heads/master | 2020-07-07T03:44:31.456739 | 2019-09-09T15:16:55 | 2019-09-09T15:16:55 | 203,235,771 | 0 | 0 | null | 2019-08-19T19:21:54 | 2019-08-19T19:21:54 | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/python3
""" Import flask class """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
""" Function to handle request """
return 'Hello HBNB!'
@app.route('/hbnb')
def hello_hbnb():
""" Function to handle request to path /hbnb """
return 'HBNB'
@app.route('/c/<text>')
def c_route(text):
""" Function to handle request with a variable """
return 'C %s' % text.replace('_', ' ')
@app.route('/python/')
@app.route('/python/<text>')
def python(text='is cool'):
""" Function to handle request with a variable and data default """
return 'Python %s' % text.replace('_', ' ')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
] | |
7b45092077b7942834601a747d56cdfaf4309e09 | ee79e734486c0ca550bb8238ef54c78c7727384a | /Classifier/methClassifier.py | 400e6e2a59a3f992417353d314c2404717242201 | [] | no_license | neilrobertson/BICRCode | 212636e5395f0c0e4dfb3ac3c133f01eb07273ee | 7b3f4da9cdefd7680f07b707339aee59faece1d2 | refs/heads/master | 2020-03-30T16:07:22.637571 | 2018-10-03T10:12:57 | 2018-10-03T10:12:57 | 151,394,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,632 | py | #!/usr/bin/env python
import os
import sys
import csv
import getopt
import math
from genemapping import Ensembl
from bed.treatment import Bed, ExtendedBed
from csvfile.indexedcsv import IndexedCSV
from affy.NetAffxAnnotation import NetAffxAnnotation
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:a:ms", [])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
# takes in a csv file of (point) coordinates and tells us some stuff about them
affyComparisonFile = None
onlyMethProbesWithAffyProbes = False
allRows = True
for o, a in opts:
if o=="-i":
infile = a
elif o=="-o":
outfile = a
elif o=="-a":
affyComparisonFile = a
elif o=="-m": # matching probes
onlyMethProbesWithAffyProbes = True
elif o=="-s": # significant probes
allRows = False
reader = csv.reader(open(infile), delimiter="\t")
writer = csv.writer(open(outfile, "w"), delimiter="\t")
###
TSS_TTS_Distance = 5000
TTS_TTS_Distance_Human = str(TSS_TTS_Distance / 1000) + "kb"
Small_TSS_TTS_Distance = 1000
Small_TTS_TTS_Distance_Human = str(Small_TSS_TTS_Distance / 1000) + "kb"
# load data
genedata = Ensembl.EnsemblGenes(assembly="hg18", annotation="ncbi36.1")
genes = Ensembl.ReverseGeneMapping(genedata)
exons = Ensembl.ReverseExonMapping(genedata)
transcriptionSites = Ensembl.TranscriptionSites(genedata)
cpgIslands = ExtendedBed(os.path.expanduser("~/mount/publicdata/hg18/cpgislands/cpgislands-0-index.bed"))
affyannotation = NetAffxAnnotation()
paddedGenes = Ensembl.ReverseGeneMapping(genedata, tssPadding = TSS_TTS_Distance)
def isUpstream(distance, strand):
if strand == "+":
return 'Y' if distance >= 0 else 'N'
elif strand == "-":
return'Y' if distance <= 0 else 'N'
else:
# wtf went wrong here
exit(-1)
def isDownstream(distance, strand):
if strand == "+":
return 'Y' if distance <= 0 else 'N'
elif strand == "-":
return'Y' if distance >= 0 else 'N'
else:
# wtf went wrong here
exit(-1)
if not affyComparisonFile == None:
#affyMapping = ExtendedBed(os.path.expanduser("~/mount/publicdata/positions2affy/HG-U133Plus2.csv"), chrPos=0, startPos = 2, stopPos=3, defaultkeys=["chr", "strand", "start", "stop", "affy"])
#print affyMapping.getValuesOfOverlappingIntervals("chr16", 72982016, 72983513)
affyComparison = IndexedCSV(affyComparisonFile)
headerRow = ['Index', 'ColumnID', 'Symbol', 'Chr', 'Mapinfo','Coord']
headerRow.extend(['PD30.Avg', 'PD56.Avg', 'Fold change', 'Log2MethFC', 'Bonferroni(p-value (PD56 vs. PD30))', 'Meth'])
headerRow.extend(['In Gene', 'Genes', 'Names', 'Gene Bounds'])
headerRow.extend([TTS_TTS_Distance_Human+' up or Gene Body', TTS_TTS_Distance_Human+' up or Gene Body Genes', TTS_TTS_Distance_Human+' up or Gene Body Names', TTS_TTS_Distance_Human+' up or Gene Body Gene Bounds'])
headerRow.extend(['Gene TSS Distance',
#'Gene TTS Distance',
'Nearest TSS',
'Nearest TSS Strand',
#'Nearest TTS',
#'Nearest TTS Strand'
]) #'Nearest TSS Distance', 'Nearest TTS Distance'])
headerRow.extend([TTS_TTS_Distance_Human+" TSS",
Small_TTS_TTS_Distance_Human+" TSS",
#TTS_TTS_Distance_Human+" TTS",
#TTS_TTS_Distance_Human+" up chr of nearest TSS",
#TTS_TTS_Distance_Human+" down chr of nearest TSS",
#TTS_TTS_Distance_Human+" up chr of nearest TTS",
#TTS_TTS_Distance_Human+" down chr of nearest TTS"
])
headerRow.extend([TTS_TTS_Distance_Human+" upstream of nearest TSS",
TTS_TTS_Distance_Human+" downstream of nearest TSS",
Small_TTS_TTS_Distance_Human+" upstream of nearest TSS",
Small_TTS_TTS_Distance_Human+" downstream of nearest TSS",
#TTS_TTS_Distance_Human+" upstream of nearest TTS",
#TTS_TTS_Distance_Human+" downstream of nearest TTS"
])
#headerRow.extend([TTS_TTS_Distance_Human+" of TSS of Gene it's on",
#TTS_TTS_Distance_Human+" of TTS of Gene it's on"
# ])
headerRow.extend([ 'In Exon', 'Exons', 'In Intron', 'Intergenic'])
headerRow.extend(['In CPG Island','cpg.start', 'cpg.end', 'cpg.name', 'cpg.length', 'cpg.cpgNum', 'cpg.gcNum', 'cpg.perCpg', 'cpg.perGc', 'pgp.obsExp'])
if not affyComparisonFile == None:
headerRow.append("Meth Probe Location on Gene with Affy probe")
headerRow.append("Ensid")
for key in affyComparison.keys:
headerRow.append(key)
writer.writerow(headerRow)
for row in reader:
try:
index = int(row[0])
columnid = row[1]
symbol = row[2]
chr = row[3]
mapinfo = int(row[4]) # 1 indexed
coord = mapinfo-1 # 1 indexed convert to 0 indexed
pd30 = float(row[5])
pd56 = float(row[6])
fold = float(row[7])
pvalue = float(row[8])
methProbeSignificant = True if abs(fold) >= 1.1 and pvalue <= 0.05 else False
if not chr.startswith("chr"):
chr = "chr" + chr
except ValueError:
continue # this will be the header
# starting information
outputRow = [str(index), columnid, symbol, chr, str(mapinfo),str(coord)]
outputRow.extend([pd30, pd56, fold, math.log(abs(fold), 2) * (1 if fold > 0 else -1), pvalue])
if pvalue>0.05 or abs(fold)<1.1:
outputRow.append("NoChange")
else:
outputRow.append("Hypo" if fold <0.0 else "Hyper")
# genes
ingenes = genes.getValuesOfOverlappingIntervals(chr, coord, coord)
if len(ingenes)==0:
outputRow.append("N")
else:
outputRow.append('Y')
outputRow.append(", ".join(ingenes))
# gene names
geneNames = []
for gene in ingenes:
if genedata[gene].name != "--":
geneNames.append(genedata[gene].name)
outputRow.append(", ".join(geneNames))
# gene bounds
genebounds = []
for geneid in ingenes:
genebounds.append(str(genedata[geneid].start)+"-"+str(genedata[geneid].end))
outputRow.append(", ".join(genebounds))
# near genes
inPaddedGenes = paddedGenes.getValuesOfOverlappingIntervals(chr, coord, coord)
if len(inPaddedGenes)==0:
outputRow.append("N")
else:
outputRow.append('Y')
outputRow.append(", ".join(inPaddedGenes))
# gene names
geneNames = []
for gene in inPaddedGenes:
if genedata[gene].name != "--":
geneNames.append(genedata[gene].name)
outputRow.append(", ".join(geneNames))
# gene bounds
genebounds = []
for geneid in inPaddedGenes:
genebounds.append(str(genedata[geneid].start)+"-"+str(genedata[geneid].end))
outputRow.append(", ".join(genebounds))
# TSS distances
tsses = []
for gene in ingenes:
tsses.append(str(genedata[gene].tss()-coord))
outputRow.append(", ".join(tsses))
# TTS distances
ttses = []
for gene in ingenes:
ttses.append(str(genedata[gene].tts()-coord))
#
# outputRow.append(", ".join(ttses))
# nearest TSS
nearestTSS = transcriptionSites.getNearestStartSite(chr, coord)
nearestTSSAbs = abs(nearestTSS[0].key - coord)
tssValues = []
tssStrand = []
for tss in nearestTSS:
tssValues.append(str(tss.key-coord))
tssStrand.append(genedata[tss.data].strand)
outputRow.append(", ".join(tssValues))
outputRow.append(", ".join(tssStrand))
# nearest TTS
nearestTTS = transcriptionSites.getNearestTerminationSite(chr, coord)
nearestTTSAbs = abs(nearestTTS[0].key - coord)
ttsValues = []
ttsStrand = []
for tts in nearestTTS:
ttsValues.append(str(tts.key-coord))
ttsStrand.append(genedata[tss.data].strand)
# outputRow.append(", ".join(ttsValues))
# outputRow.append(", ".join(ttsStrand))
# nearest TSS & TTS distance
#outputRow.append(abs(nearestTSS))
#outputRow.append(abs(nearestTTS))
# TSS
if nearestTSSAbs<=TSS_TTS_Distance:
withindistofTSS = "Y"
withindistUpChrmofTSS = "Y" if min(tssValues)<=0 else "N"
withindistDownChrmofTSS = "Y" if max(tssValues)>=0 else "N"
else:
withindistofTSS = "N"
withindistUpChrmofTSS = ""
withindistDownChrmofTSS = ""
# small TSS
if nearestTSSAbs<=Small_TSS_TTS_Distance:
smallWithindistofTSS = "Y"
smallWithindistUpChrmofTSS = "Y" if min(tssValues)<=0 else "N"
smallWithindistDownChrmofTSS = "Y" if max(tssValues)>=0 else "N"
else:
smallWithindistofTSS = "N"
smallWithindistUpChrmofTSS = ""
smallWithindistDownChrmofTSS = ""
#TTS
if nearestTTSAbs<=TSS_TTS_Distance:
withindistofTTS = "Y"
withindistUpChrmofTTS = "Y" if min(ttsValues)<=0 else "N"
withindistDownChrmofTTS = "Y" if max(ttsValues)>=0 else "N"
else:
withindistofTTS = "N"
withindistUpChrmofTTS = ""
withindistDownChrmofTTS = ""
outputRow.extend([withindistofTSS, smallWithindistofTSS,
#withindistofTTS,
#withindistUpChrmofTSS,withindistDownChrmofTSS,
#withindistUpChrmofTTS, withindistDownChrmofTTS
])
# up / down stream of nearest TSS
tssUpstream = []
tssDownstream = []
for i in range(len(tssValues)):
distance = int(tssValues[i])
strand = tssStrand[i]
if abs(distance) <= TSS_TTS_Distance:
tssUpstream.append(isUpstream(distance, strand))
tssDownstream.append(isDownstream(distance, strand))
else:
tssUpstream.append('')
tssDownstream.append('')
outputRow.extend([", ".join(tssUpstream), ", ".join(tssDownstream) ])
# small up / down stream of nearest TSS
smallTssUpstream = []
smallTssDownstream = []
for i in range(len(tssValues)):
distance = int(tssValues[i])
strand = tssStrand[i]
if abs(distance) <= Small_TSS_TTS_Distance:
smallTssUpstream.append(isUpstream(distance, strand))
smallTssDownstream.append(isDownstream(distance, strand))
else:
smallTssUpstream.append('')
smallTssDownstream.append('')
outputRow.extend([", ".join(smallTssUpstream), ", ".join(smallTssDownstream) ])
# # up / down stream of nearest TTS
# ttsUpstream = []
# ttsDownstream = []
# for i in range(len(ttsValues)):
# distance = int(ttsValues[i])
# strand = ttsStrand[i]
# if abs(distance) <= TSS_TTS_Distance:
# ttsUpstream.append(isUpstream(distance, strand))
# ttsDownstream.append(isDownstream(distance, strand))
# else:
# ttsUpstream.append('')
# ttsDownstream.append('')
#
# outputRow.extend([", ".join(ttsUpstream), ", ".join(ttsDownstream) ])
# TTS / TTS of gene it's on
# nearTSSofGene = False
# for tss in tsses:
# if abs(int(tss))<TSS_TTS_Distance:
# nearTSSofGene = True
#
# nearTTSofGene = False
# for tts in ttses:
# if abs(int(tts))<TSS_TTS_Distance:
# nearTTSofGene = True
#
# if len(ingenes)==0:
# # not on a gene
# outputRow.extend(["",
# #""
# ])
# else:
# outputRow.extend(["Y" if nearTSSofGene else "N",
# #"Y" if nearTTSofGene else "N"
# ])
# exons
inexons = exons.getValuesOfOverlappingIntervals(chr, coord, coord)
if len(inexons)==0:
outputRow.append("N")
else:
outputRow.append('Y')
outputRow.append(", ".join(inexons))
# introns
outputRow.append("Y" if (len(ingenes)>0 and len(inexons)==0) else "N")
# intergenic
outputRow.append("Y" if (len(ingenes)==0 and nearestTSSAbs>TSS_TTS_Distance and nearestTTSAbs>TSS_TTS_Distance) else "N")
# cpg islands
incpg = cpgIslands.getValuesOfOverlappingIntervals(chr, coord, coord)
if len(incpg)==0:
outputRow.append("N")
else:
outputRow.append('Y')
# be paranoid and assume it could be in multiple cpg islands (this shouldnt ever be the case but who knows what the bed file could contain
cpg_Starts = []
cpg_Ends = []
cpg_Names = []
cpg_Lengths = []
cpg_cpgNum = []
cpg_gcNum = []
cpg_perCpg = []
cpg_perGc = []
cpg_obsExp = []
for cpg in incpg:
cpg_Starts.append(cpg['chromStart'])
cpg_Ends.append(cpg['chromEnd'])
cpg_Names.append(cpg['name'])
cpg_Lengths.append(cpg['length'])
cpg_cpgNum.append(cpg['cpgNum'])
cpg_gcNum.append(cpg['gcNum'])
cpg_perCpg.append(cpg['perCpg'])
cpg_perGc.append(cpg['perGc'])
cpg_obsExp.append(cpg['obsExp'])
outputRow.append(", ".join(cpg_Starts))
outputRow.append(", ".join(cpg_Ends))
outputRow.append(", ".join(cpg_Names))
outputRow.append(", ".join(cpg_Lengths))
outputRow.append(", ".join(cpg_cpgNum))
outputRow.append(", ".join(cpg_gcNum))
outputRow.append(", ".join(cpg_perCpg))
outputRow.append(", ".join(cpg_perGc))
outputRow.append(", ".join(cpg_obsExp))
if affyComparisonFile == None:
writer.writerow(outputRow)
else:
inaffys = {}
for inPaddedGene in inPaddedGenes:
for affyprobe in affyannotation.getAffysForEnsembl(inPaddedGene):
inaffys[affyprobe] = inPaddedGene
if len(inaffys) == 0:
# no affys found that match up but we output the line if we arent only interested in affy probe match ups
if onlyMethProbesWithAffyProbes==False:
writer.writerow(outputRow)
else:
# print out every affy
for inaffy in inaffys: # one row per affy
affyrow = outputRow[:] # clone row
# where is the meth probe in relation to the gene that this affy probe measures
genechr = genedata[inaffys[inaffy]].chr
genestrand = genedata[inaffys[inaffy]].strand
genestart = genedata[inaffys[inaffy]].start
geneend = genedata[inaffys[inaffy]].end
assert genestrand == "+" or genestrand == "-"
tss = genestart if genestrand == "+" else geneend
if abs(tss - coord) < TSS_TTS_Distance:
if abs(tss-coord) < Small_TSS_TTS_Distance: # within 1kb of the TSS in either direction
category = "Promoter"
elif isUpstream(tss-coord, genestrand): # is upstream (this catches all of the 5kb upstream)
category = "Promoter"
else:
category = "GeneBody"
else:
category = "GeneBody"
if len(incpg)>0:
category = category + "-cpgIsland"
affyrow.append(category)
affyrow.append(inaffys[inaffy])
affycomparisonrow = affyComparison[inaffy]
for key in affyComparison.keys:
affyrow.append(affycomparisonrow[key])
affyProbeSignificant = True if float(affycomparisonrow['BY-fdr'])<=0.05 and abs(float(affycomparisonrow['fc']))>=1.5 else False
# only significant probes when allRows == False
if (methProbeSignificant == True and affyProbeSignificant == True) or allRows == True:
writer.writerow(affyrow)
| [
"[email protected]"
] | |
f0a6d0beca860b552d6f96fa60b61179b6c93ab1 | 16487965d6cce8f4034beca9b7021b1e8881a346 | /media/My_program/Engineering_Calculator/main.py | ea259d0bcc70256970705e2917d6e957d70706d7 | [] | no_license | bataysyk/site_resume | f20de5b00d8e37e7a5495da28c00fce976a07d42 | 2d00dce0a4618c1b36e99f8587f6d88eec0f5e45 | refs/heads/master | 2023-01-19T20:19:27.138973 | 2020-11-12T16:07:50 | 2020-11-12T16:07:50 | 285,685,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from gui import *
if __name__ == "__main__":
root = Tk()
root["bg"] = "#000"
root.geometry("480x550+100+100")
root.title("Engineering Calculator.")
root.resizable(False, False)
app = Main(root)
app.pack()
root.mainloop()
| [
"[email protected]"
] | |
9ec080b479bbd6e52bf35e76780b6142c1d7497b | c63bf01b632c52dcfb19e78b47c36fb5efcab507 | /src/components/special_effects.py | 2abb26addac47772cecfe64447d520fb9b599d11 | [] | no_license | Grimmys/BubbleTanks2 | 3292173eb6abd66d40aa5306e65af381a47867bd | a015ece36b4bea80b92656ffc37e947b0919a536 | refs/heads/main | 2023-06-26T12:27:15.150425 | 2021-07-29T19:47:51 | 2021-07-29T19:47:51 | 400,833,006 | 1 | 0 | null | 2021-08-28T15:58:14 | 2021-08-28T15:58:13 | null | UTF-8 | Python | false | false | 19,550 | py | import pygame as pg
from random import uniform
from math import pi, sin, cos
from components.circle import make_circle
from components.utils import *
from data.constants import *
from data.bullets import BULLETS
from assets.paths import *
# load all images only one time, to increase game performance
images = {
"conversion": pg.image.load(DRONE_CONVERSION).convert_alpha(),
"teleport": pg.image.load(TELEPORTATION).convert_alpha(),
"damage_burst": pg.image.load(DAMAGE_BURST_IMAGE).convert_alpha(),
"damage_burst_bg": pg.image.load(DAMAGE_BURST_BG_IMAGE).convert_alpha(),
"stun_burst": pg.image.load(STUN_BURST_IMAGE).convert_alpha()
}
class Line:
def __init__(self, x, y, size, alpha, duration):
if size == 'SmallHitLines':
self.widths = [H(3), H(5), H(6)]
length = uniform(HF(59), HF(251))
else:
self.widths = [H(8), H(11), H(14)]
length = uniform(HF(216), HF(616))
radius = HF(32)
cosa, sina = cos(alpha), sin(alpha)
self.X0 = x + radius * cosa
self.Y0 = y - radius * sina
self.X1, self.Y1 = self.X0, self.Y0
self.vel_x = length * cosa / duration
self.vel_y = -length * sina / duration
def update(self, dt):
self.X1 += self.vel_x * dt
self.Y1 += self.vel_y * dt
def draw(self, surface, dx, dy):
pg.draw.line(surface, HIT_COLOR,
(self.X0 - dx, self.Y0 - dy),
(self.X1 - dx, self.Y1 - dy), self.widths[0])
pg.draw.line(surface, HIT_COLOR,
(self.X0 + (self.X1 - self.X0)*0.125 - dx,
self.Y0 + (self.Y1 - self.Y0)*0.125 - dy),
(self.X1 - (self.X1 - self.X0)*0.125 - dx,
self.Y1 - (self.Y1 - self.Y0)*0.125 - dy), self.widths[1])
pg.draw.line(surface, HIT_COLOR,
(self.X0 + (self.X1 - self.X0)*0.25 - dx,
self.Y0 + (self.Y1 - self.Y0)*0.25 - dy),
(self.X1 - (self.X1 - self.X0)*0.25 - dx,
self.Y1 - (self.Y1 - self.Y0)*0.25 - dy), self.widths[2])
class SpecialEffect:
def __init__(self, x, y, duration):
self.x = x
self.y = y
self.t = 0
self.duration = duration
self.running = True
@staticmethod
def set_image(name, size):
return pg.transform.scale(images[name], (size, size))
def update(self, dt):
self.t = min(self.t + dt, self.duration)
if self.t == self.duration:
self.running = False
def draw(self, screen, dx, dy):
pass
class BulletHitLines(SpecialEffect):
def __init__(self, x, y, size: str):
super().__init__(x, y, duration=90)
self.lines = self.create_lines(size)
def create_lines(self, size):
lines = []
beta = 0
for i in range(4):
angle = uniform(pi/16, 7*pi/16) + beta
beta += pi/2
lines.append(Line(self.x, self.y, size, angle, self.duration))
return lines
def update(self, dt):
super().update(dt)
for line in self.lines:
line.update(dt)
def draw(self, surface, dx, dy):
for line in self.lines:
line.draw(surface, dx, dy)
class LeechEffect(SpecialEffect):
circles_data = [
# radius | width
(H(4.224), H(1)),
(H(13.704), H(1)),
(H(23.232), H(1)),
(H(32.664), H(1)),
(H(42.144), H(1.224)),
(H(51.48), H(1.512)),
(H(60.936), H(1.776)),
(H(70.488), H(2.04)),
(H(79.944), H(2.28)),
(H(89.424), H(2.544))
]
frames = {
0: [0],
1: [1],
2: [2, 0],
3: [3, 1],
4: [4, 2, 0],
5: [5, 3, 1],
6: [6, 4, 2],
7: [7, 5, 3],
8: [8, 6, 4],
9: [9, 7, 5],
10: [9, 8, 6],
11: [9, 7],
12: [9, 8],
13: [9],
}
def __init__(self, x, y):
super().__init__(x, y, duration=249)
def draw(self, screen, dx, dy):
frame = min(13, int(14 * self.t / self.duration))
for index in self.frames[frame]:
r, w = self.circles_data[index]
pg.draw.circle(screen, LEECH_EFFECT_COLOR, (self.x-dx, self.y-dy), r, w)
class StarsAroundMob(SpecialEffect):
def __init__(self, mob_x, mob_y, mob_radius):
super().__init__(mob_x, mob_y, duration=2000)
self.angle = uniform(0, 2*pi)
self.timer = 0
self.radius = mob_radius + HF(60)
self.big_stars_marker = True
def get_stars_coords(self, dx, dy):
pos_1 = (
round(self.x + self.radius * cos(self.angle) - dx),
round(self.y - self.radius * sin(self.angle) - dy)
)
pos_2 = (
round(self.x + self.radius * cos(self.angle+2/3*pi) - dx),
round(self.y - self.radius * sin(self.angle+2/3*pi) - dy)
)
pos_3 = (
round(self.x + self.radius * cos(self.angle+4/3*pi) - dx),
round(self.y - self.radius * sin(self.angle+4/3*pi) - dy)
)
return pos_1, pos_2, pos_3
def update_stars_marker(self, dt):
self.timer += dt
if self.timer >= 80:
self.timer -= 80
self.angle += 0.4 * pi
self.big_stars_marker = not self.big_stars_marker
def update(self, dt):
super().update(dt)
self.update_stars_marker(dt)
@staticmethod
def draw_big_star(screen, x, y):
pg.draw.circle(screen, WHITE, (x, y), H(8), H(3))
pg.draw.line(screen, WHITE, (x, y - H(27)), (x, y + H(11)), H(3))
pg.draw.line(screen, WHITE, (x - H(10), y), (x + H(13), y), H(3))
@staticmethod
def draw_small_star(screen, x, y):
pg.draw.circle(screen, WHITE, (x, y), H(5))
def draw(self, screen, dx, dy):
if self.big_stars_marker:
for pos in self.get_stars_coords(dx, dy):
self.draw_big_star(screen, *pos)
else:
for pos in self.get_stars_coords(dx, dy):
self.draw_small_star(screen, *pos)
class SpriteEffect(SpecialEffect):
def __init__(self, x, y, surfaces, duration, fixed=False):
super().__init__(x, y, duration)
self.surfaces = surfaces
self.index = 0
self.fixed = fixed
def update(self, dt):
super().update(dt)
self.index = min(len(self.surfaces) - 1, int(self.t/self.duration * len(self.surfaces)))
def draw(self, screen, dx, dy):
surface = self.surfaces[self.index]
if self.fixed:
dx = dy = 0
screen.blit(surface, (self.x - surface.get_width()/2 - dx,
self.y - surface.get_height()/2 - dy))
def _init_conversion_surfaces() -> list:
surfaces = []
start_diam = HF(75.84)
delta_diam = HF(97.4)
for i in range(19):
diam = round(start_diam + i * delta_diam)
image = pg.transform.scale(images["conversion"], (diam, diam))
if i >= 15:
alpha = round((19 - i)/5 * 255)
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_flash_surfaces() -> list:
surfaces = []
n = 4
for i in range(n):
alpha = round(255 * (n - i) / n)
surface = pg.Surface(SCR_SIZE, pg.SRCALPHA)
surface.fill((255, 255, 255, alpha))
surfaces.append(surface)
return surfaces
def _init_teleport_surfaces() -> list:
surfaces = []
alphas = [255, 254, 247, 235, 218, 197, 171, 140, 104, 64]
diameters = [HF(264.24), HF(261.84), HF(254.64), HF(242.88), HF(226.32),
HF(204.96), HF(178.8), HF(148.08), HF(112.32), HF(72.0)]
for alpha, diam in zip(alphas, diameters):
size = (round(diam), round(diam))
image = pg.transform.scale(images["teleport"], size)
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_stun_burst_surfaces(size) -> list:
scale = size / 600
surfaces = []
alphas = [207, 164, 125, 92, 64, 41, 23]
diameters = [HF(57.6), HF(132.24), HF(201.6), HF(265.2), HF(323.28),
HF(375.84), HF(422.88), HF(464.4), HF(500.4), HF(530.88),
HF(555.84), HF(575.04), HF(588.96), HF(597.36), HF(600)]
for diam in diameters:
diam *= scale
size = (round(diam), round(diam))
surface = pg.transform.scale(images["stun_burst"], size)
surfaces.append(surface)
size = surfaces[-1].get_size()
base_surface = pg.transform.scale(images["stun_burst"], size)
for alpha in alphas:
base_surface.set_alpha(alpha)
surface = pg.Surface(size, pg.SRCALPHA)
surface.blit(base_surface, (0, 0))
surfaces.append(surface)
return surfaces
def _init_damage_burst_surfaces(size) -> list:
scale = size / 720
surfaces = []
bg_alphas = [255, 236, 217, 197, 177, 158, 138, 118, 98, 79, 59, 39, 20, 0]
alphas = [255, 255, 243, 230, 217, 204, 191, 178, 165, 152, 139, 126, 113, 100]
diameters = [HF(0), HF(72), HF(126), HF(180), HF(234), HF(288), HF(342),
HF(396), HF(450), HF(504), HF(558), HF(612), HF(666), HF(720)]
max_diam = round(diameters[-1] * scale)
max_size = (max_diam, max_diam)
bg_image = pg.transform.scale(images["damage_burst_bg"], max_size)
for diam, alpha, bg_alpha in zip(diameters, alphas, bg_alphas):
diam = round(diam * scale)
size = (diam, diam)
image = pg.transform.scale(images["damage_burst"], size)
image.set_alpha(alpha)
image_pos = round((max_diam - diam) / 2), round((max_diam - diam) / 2)
bg_image.set_alpha(bg_alpha)
surface = pg.Surface(max_size, pg.SRCALPHA)
surface.blit(image, image_pos)
surface.blit(bg_image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_sticky_circle_surfaces() -> list:
surfaces = []
circle = make_circle(BULLETS["sticky"]["circles"][0], 20)
circle.update_pos(circle.radius, circle.radius, 0, 0)
circle.update_glares(0)
max_diam = round(circle.max_radius * 2)
base_surface = pg.Surface((max_diam, max_diam), pg.SRCALPHA)
circle.draw(base_surface)
diameters = [H(52.8), H(76.8), H(100.32), H(123.84), H(147.36)]
alphas = [255, 205, 154, 102, 51]
for diam, alpha in zip(diameters, alphas):
image = pg.transform.smoothscale(base_surface, (diam, diam))
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_light_red_circle_surfaces():
surfaces = []
circle_data = {
"type": "fixed",
"color": "light red",
"radius": 150,
"edge factor": 0.087,
"distance": 0,
"angle": 0
}
circle = make_circle(circle_data)
circle.update_pos(circle.max_radius, circle.max_radius, 0, 0)
circle.update_glares(0)
max_diam = round(circle.max_radius * 2)
base_surface = pg.Surface((max_diam, max_diam), pg.SRCALPHA)
circle.draw(base_surface)
diameters = [H(20.64), H(64.32), H(103.2), H(135.36), H(161.76), H(182.4), H(196.32)]
alphas = [255, 196, 144, 100, 64, 36, 16]
for diam, alpha in zip(diameters, alphas):
image = pg.transform.smoothscale(base_surface, (diam, diam))
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_red_circle_surfaces():
surfaces = []
circle_data = {
"type": "fixed",
"color": "red",
"radius": 150,
"edge factor": 0.086,
"distance": 0,
"angle": 0
}
circle = make_circle(circle_data)
circle.update_pos(circle.max_radius, circle.max_radius, 0, 0)
circle.update_glares(0)
max_diam = round(circle.max_radius * 2)
base_surface = pg.Surface((max_diam, max_diam), pg.SRCALPHA)
circle.draw(base_surface)
diameters = [H(20.64), H(64.32), H(103.2), H(135.36), H(161.76), H(182.4), H(196.32)]
alphas = [255, 196, 144, 100, 64, 36, 16]
for diam, alpha in zip(diameters, alphas):
image = pg.transform.smoothscale(base_surface, (diam, diam))
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_spawner_burst_surfaces():
surfaces = []
circle_data = {
"type": "fixed",
"color": "orange",
"radius": 98.16,
"edge factor": 0.04,
"distance": 0,
"angle": 0
}
circle = make_circle(circle_data)
circle.update_pos(circle.max_radius, circle.max_radius, 0, 0)
circle.update_glares(0)
max_diam = round(circle.max_radius * 2)
base_surface = pg.Surface((max_diam, max_diam), pg.SRCALPHA)
circle.draw(base_surface)
diameters = [H(239.2), H(280.32), H(322.56), H(182.52)]
alphas = [205, 154, 102, 51]
for diam, alpha in zip(diameters, alphas):
image = pg.transform.smoothscale(base_surface, (diam, diam))
image.set_alpha(alpha)
surface = pg.Surface(image.get_size(), pg.SRCALPHA)
surface.blit(image, (0, 0))
surfaces.append(surface)
return surfaces
def _init_shield_surfaces() -> list:
surfaces = []
radius = H(160)
surf_size = (2*radius, 2*radius)
alphas = [254, 177, 162, 146, 131, 115, 100, 85, 69, 54, 38, 23, 8]
for alpha in alphas:
surface = pg.Surface(surf_size, pg.SRCALPHA)
pg.draw.circle(surface, (255, 255, 255, alpha), (radius, radius), radius)
surfaces.append(surface)
return surfaces
def _init_sapper_attack_surfaces() -> list:
size = (H(166), H(166))
surfaces = [
pg.transform.scale(pg.image.load(SAPPER_IMG_1).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_2).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_3).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_4).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_5).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_6).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_7).convert_alpha(), size),
pg.transform.scale(pg.image.load(SAPPER_IMG_8).convert_alpha(), size),
]
return surfaces
def _init_sapper_surfaces() -> list:
surfaces = []
diam = H(55)
radius = H(27.5)
surf_size = (diam, diam)
circle_data = {
"type": "fixed",
"color": "red",
"radius": 98.16,
"edge factor": 0.122,
"distance": 0,
"angle": 0
}
circle = make_circle(circle_data)
circle.update_pos(circle.max_radius, circle.max_radius, 0, 0)
circle.update_glares(0)
circle_diam = round(circle.max_radius * 2)
surface_1 = pg.Surface((circle_diam, circle_diam), pg.SRCALPHA)
circle.draw(surface_1)
surface_2 = pg.Surface(surf_size, pg.SRCALPHA)
pg.draw.circle(surface_2, WHITE, (radius, radius), radius)
for i in range(10):
alpha = round(51 + 128 * i/9)
d = round((0.653 + 0.347 * i/9) * HF(55))
scaled_surface = pg.transform.smoothscale(surface_1, (d, d))
surface_2.set_alpha(alpha)
surface = pg.Surface(surf_size, pg.SRCALPHA)
surface.blit(scaled_surface, (round(diam - d)/2, round(diam - d)/2))
surface.blit(surface_2, (0, 0))
surfaces.append(surface)
for i in range(8, -1, -1):
surfaces.append(surfaces[i])
return surfaces
def _init_infection_surfaces() -> list:
surfaces = []
w, h = HF(120.286), HF(114.887)
circle_surfaces = []
k = 0.181
for surface in red_circle_surfaces:
diam = round(k * surface.get_width())
circle_surfaces.append(pg.transform.smoothscale(surface, (diam, diam)))
positions = [
(HF(44.284), 0.508 * pi),
(HF(39.183), 0.267 * pi),
(HF(40.364), 0.844 * pi),
(HF(4.759), 0.41 * pi),
(HF(12.402), -0.9 * pi),
(HF(42.413), 0.871 * pi),
(HF(42.86), -0.549 * pi),
(HF(35.49), -0.24 * pi),
(HF(48.775), 0.015 * pi)
]
for circle_surf in circle_surfaces:
surface = pg.Surface((w, h), pg.SRCALPHA)
for distance, angle in positions:
x = round(w/2 + distance * cos(angle) - circle_surf.get_width()/2)
y = round(h/2 - distance * sin(angle) - circle_surf.get_height()/2)
surface.blit(circle_surf, (x, y))
surfaces.append(surface)
return surfaces
conversion_surfaces = _init_conversion_surfaces()
flash_surfaces = _init_flash_surfaces()
teleport_surfaces = _init_teleport_surfaces()
stun_burst_surfaces = _init_stun_burst_surfaces(800)
stun_burst_large_surfaces = _init_stun_burst_surfaces(1100)
damage_burst_surfaces = _init_damage_burst_surfaces(360)
damage_burst_large_surfaces = _init_damage_burst_surfaces(720)
sticky_circle_surfaces = _init_sticky_circle_surfaces()
light_red_circle_surfaces = _init_light_red_circle_surfaces()
red_circle_surfaces = _init_red_circle_surfaces()
shield_surfaces = _init_shield_surfaces()
spawner_burst_surfaces = _init_spawner_burst_surfaces()
sapper_attack_surfaces = _init_sapper_attack_surfaces()
sapper_surfaces = _init_sapper_surfaces()
infection_surfaces = _init_infection_surfaces()
def add_effect(name, effects, x=0, y=0, radius=0):
if name in ('SmallHitLines', 'BigHitLines'):
effects.append(BulletHitLines(x, y, name))
elif name == 'LightRedCircle':
effects.append(SpriteEffect(x, y, light_red_circle_surfaces, 126))
elif name == 'RedCircle':
effects.append(SpriteEffect(x, y, red_circle_surfaces, 126))
elif name == 'StickyCircle':
effects.append(SpriteEffect(x, y, sticky_circle_surfaces, 108))
elif name == 'Shield':
effects.append(SpriteEffect(x, y, shield_surfaces, 452, fixed=True))
elif name == "StunBurst":
effects.append(SpriteEffect(x, y, stun_burst_surfaces, 397))
elif name == 'StunBurstLarge':
effects.append(SpriteEffect(x, y, stun_burst_large_surfaces, 397))
elif name == 'DamageBurst':
effects.append(SpriteEffect(x, y, damage_burst_surfaces, 253))
elif name == 'DamageBurstLarge':
effects.append(SpriteEffect(x, y, damage_burst_large_surfaces, 253))
elif name == "Conversion":
effects.append(SpriteEffect(x, y, conversion_surfaces, 344))
elif name == "Flash":
effects.append(SpriteEffect(SCR_W2, SCR_H2, flash_surfaces, 83, fixed=True))
elif name == 'StarsAroundMob':
effects.append(StarsAroundMob(x, y, radius))
elif name == "Teleport":
effects.append(SpriteEffect(x, y, teleport_surfaces, 193))
elif name == "SpawnerBurst":
effects.append(SpriteEffect(x, y, spawner_burst_surfaces, 108))
elif name == "SapperAttack":
effects.append(SpriteEffect(SCR_W2, SCR_H2, sapper_attack_surfaces, 144, fixed=True))
elif name == "LeechEffect":
effects.append(LeechEffect(x, y))
__all__ = ["add_effect", "sapper_surfaces", "infection_surfaces"]
| [
"[email protected]"
] | |
205efccfb0036487f9f1a754630e7d68c6c5890a | 692b907d07eee8ce3ee32a1fda74b6d92fd6c548 | /tests/mock/server/v1_2_10.py | 324de6099e4309d6ca515aaeb22bc61c98bf0785 | [
"MIT"
] | permissive | AltusConsulting/dnacentercli | 04c9c7d00b25753a26c643994388dd4e23bf4c54 | 26ea46fdbd40fc30649ea1d8803158655aa545aa | refs/heads/master | 2022-12-16T04:50:30.076420 | 2020-07-17T22:12:39 | 2020-07-17T22:12:39 | 212,206,213 | 0 | 0 | MIT | 2022-12-08T06:39:49 | 2019-10-01T21:50:42 | Python | UTF-8 | Python | false | false | 237,343 | py | from http.server import BaseHTTPRequestHandler
import re
import json
import requests
class MockServerRequestHandler_v1_2_10(BaseHTTPRequestHandler):
AUTHENTICATION_ac8ae94c4e69a09d_PATTERN = re.compile(r"/dna/system/api/v1/auth/token")
TEMPLATE_PROGRAMMER_00aec9b1422ab27e_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/project")
TEMPLATE_PROGRAMMER_109d1b4f4289aecd_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/project")
TEMPLATE_PROGRAMMER_9480fa1f47ca9254_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/project")
TEMPLATE_PROGRAMMER_d0a1abfa435b841d_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/project/string")
TEMPLATE_PROGRAMMER_f6b119ad4d4aaf16_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/project/string/template")
TEMPLATE_PROGRAMMER_01b09a254b9ab259_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template")
TEMPLATE_PROGRAMMER_7781fa0548a98342_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template")
TEMPLATE_PROGRAMMER_83a3b9404cb88787_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/string")
TEMPLATE_PROGRAMMER_a7b42836408a8e74_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/string")
TEMPLATE_PROGRAMMER_6099da82477b858a_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/deploy")
TEMPLATE_PROGRAMMER_9c9a785741cbb41f_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/deploy/status/string")
TEMPLATE_PROGRAMMER_f393abe84989bb48_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/preview")
TEMPLATE_PROGRAMMER_62b05b2c40a9b216_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/version")
TEMPLATE_PROGRAMMER_c8bf6b65414a9bc7_PATTERN = re.compile(r"/dna/intent/api/v1/template-programmer/template/version/string")
TAG_1399891c42a8be64_PATTERN = re.compile(r"/dna/intent/api/v1/tag")
TAG_4d86a993469a9da9_PATTERN = re.compile(r"/dna/intent/api/v1/tag")
TAG_ee9aab01487a8896_PATTERN = re.compile(r"/dna/intent/api/v1/tag")
TAG_429c28154bdaa13d_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string")
TAG_c1a359b14c89b573_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string")
TAG_00a2fa6146089317_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string/member")
TAG_eab7abe048fb99ad_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string/member")
TAG_caa3ea704d78b37e_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string/member/string")
TAG_2e9db85840fbb1cf_PATTERN = re.compile(r"/dna/intent/api/v1/tag/string/member/count")
TAG_8091a9b84bfba53b_PATTERN = re.compile(r"/dna/intent/api/v1/tag/count")
TAG_45bc7a8344a8bc1e_PATTERN = re.compile(r"/dna/intent/api/v1/tag/member")
TAG_4695090d403b8eaa_PATTERN = re.compile(r"/dna/intent/api/v1/tag/member/type")
NETWORK_DISCOVERY_55b439dc4239b140_PATTERN = re.compile(r"/dna/intent/api/v1/discovery")
NETWORK_DISCOVERY_9788b8fc4418831d_PATTERN = re.compile(r"/dna/intent/api/v1/discovery")
NETWORK_DISCOVERY_db8e09234a988bab_PATTERN = re.compile(r"/dna/intent/api/v1/discovery")
NETWORK_DISCOVERY_4c8cab5f435a80f4_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string")
NETWORK_DISCOVERY_63bb88b74f59aa17_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string")
NETWORK_DISCOVERY_99872a134d0a9fb4_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string/job")
NETWORK_DISCOVERY_f6ac994f451ba011_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string/network-device")
NETWORK_DISCOVERY_a6b798ab4acaa34e_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string/network-device/0/0")
NETWORK_DISCOVERY_a6965b454c9a8663_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string/network-device/count")
NETWORK_DISCOVERY_3d9b99c343398a27_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/string/summary")
NETWORK_DISCOVERY_c1ba9a424c08a01b_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/0/0")
NETWORK_DISCOVERY_33b799d04d0a8907_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/0/0")
NETWORK_DISCOVERY_069d9823451b892d_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/count")
NETWORK_DISCOVERY_a4967be64dfaaa1a_PATTERN = re.compile(r"/dna/intent/api/v1/discovery/job")
NETWORK_DISCOVERY_ff816b8e435897eb_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential")
NETWORK_DISCOVERY_709fda3c42b8877a_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/string")
NETWORK_DISCOVERY_f5ac590c4ca9975a_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/string")
NETWORK_DISCOVERY_58a3699e489b9529_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/string")
NETWORK_DISCOVERY_948ea8194348bc0b_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/cli")
NETWORK_DISCOVERY_fba0d80747eb82e8_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/cli")
NETWORK_DISCOVERY_bf859ac64a0ba19c_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/http-read")
NETWORK_DISCOVERY_89b36b4649999d81_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/http-read")
NETWORK_DISCOVERY_4d9ca8e2431a8a24_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/http-write")
NETWORK_DISCOVERY_b68a6bd8473a9a25_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/http-write")
NETWORK_DISCOVERY_17929bc7465bb564_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/netconf")
NETWORK_DISCOVERY_c5acd9fa4c1a8abc_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/netconf")
NETWORK_DISCOVERY_7aa3da9d4e098ef2_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv2-read-community")
NETWORK_DISCOVERY_47a1b84b4e1b8044_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv2-read-community")
NETWORK_DISCOVERY_10b06a6a4f7bb3cb_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv2-write-community")
NETWORK_DISCOVERY_6bacb8d14639bdc7_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv2-write-community")
NETWORK_DISCOVERY_1da5ebdd434aacfe_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv3")
NETWORK_DISCOVERY_979688084b7ba60d_PATTERN = re.compile(r"/dna/intent/api/v1/global-credential/snmpv3")
NETWORK_DISCOVERY_44974ba5435a801d_PATTERN = re.compile(r"/dna/intent/api/v1/snmp-property")
NETWORK_DISCOVERY_a5ac99774c6bb541_PATTERN = re.compile(r"/dna/intent/api/v1/snmp-property")
TASK_e78bb8a2449b9eed_PATTERN = re.compile(r"/dna/intent/api/v1/task")
TASK_a1a9387346ba92b1_PATTERN = re.compile(r"/dna/intent/api/v1/task/string")
TASK_f5a269c44f2a95fa_PATTERN = re.compile(r"/dna/intent/api/v1/task/string/tree")
TASK_26b44ab04649a183_PATTERN = re.compile(r"/dna/intent/api/v1/task/count")
TASK_e487f8d3481b94f2_PATTERN = re.compile(r"/dna/intent/api/v1/task/operation/string/0/0")
COMMAND_RUNNER_33bb2b9d40199e14_PATTERN = re.compile(r"/dna/intent/api/v1/network-device-poller/cli/legit-reads")
COMMAND_RUNNER_d6b8ca774739adf4_PATTERN = re.compile(r"/dna/intent/api/v1/network-device-poller/cli/read-request")
FILE_9698c8ec4a0b8c1a_PATTERN = re.compile(r"/dna/intent/api/v1/file/string")
FILE_3f89bbfc4f6b8b50_PATTERN = re.compile(r"/dna/intent/api/v1/file/namespace")
FILE_42b6a86e44b8bdfc_PATTERN = re.compile(r"/dna/intent/api/v1/file/namespace/string")
PATH_TRACE_55bc3bf94e38b6ff_PATTERN = re.compile(r"/dna/intent/api/v1/flow-analysis")
PATH_TRACE_a395fae644ca899c_PATTERN = re.compile(r"/dna/intent/api/v1/flow-analysis")
PATH_TRACE_8a9d2b76443b914e_PATTERN = re.compile(r"/dna/intent/api/v1/flow-analysis/string")
PATH_TRACE_7ab9a8bd4f3b86a4_PATTERN = re.compile(r"/dna/intent/api/v1/flow-analysis/string")
SWIM_fb9beb664f2aba4c_PATTERN = re.compile(r"/dna/intent/api/v1/image/activation/device")
SWIM_8cb6783b4faba1f4_PATTERN = re.compile(r"/dna/intent/api/v1/image/distribution")
SWIM_0c8f7a0b49b9aedd_PATTERN = re.compile(r"/dna/intent/api/v1/image/importation")
SWIM_4dbe3bc743a891bc_PATTERN = re.compile(r"/dna/intent/api/v1/image/importation/source/file")
SWIM_bc8aab4746ca883d_PATTERN = re.compile(r"/dna/intent/api/v1/image/importation/source/url")
PNP_e6b3db8046c99654_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device")
PNP_f3b26b5544cabab9_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device")
PNP_09b0f9ce4239ae10_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/string")
PNP_bab6c9e5440885cc_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/string")
PNP_cdab9b474899ae06_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/string")
PNP_d8a619974a8a8c48_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/claim")
PNP_d9a1fa9c4068b23c_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/count")
PNP_f09319674049a7d4_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/history")
PNP_21a6db2540298f55_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/import")
PNP_9e857b5a4a0bbcdb_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/reset")
PNP_0a9c988445cb91c8_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/sacct/string/vacct/string/sync-result")
PNP_5889fb844939a13b_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/site-claim")
PNP_cf9418234d9ab37e_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/site-config-preview")
PNP_0b836b7b4b6a9fd5_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/unclaim")
PNP_a4b6c87a4ffb9efa_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-device/vacct-sync")
PNP_8da0391947088a5a_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings")
PNP_7e92f9eb46db8320_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings")
PNP_3cb24acb486b89d2_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings/sacct")
PNP_70a479a6462a9496_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings/sacct/string/vacct")
PNP_1e962af345b8b59f_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings/savacct")
PNP_6f9819e84178870c_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings/savacct")
PNP_2499e9ad42e8ae5b_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-settings/vacct")
PNP_aeb4dad04a99bbe3_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow")
PNP_848b5a7b4f9b8c12_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow")
PNP_3086c9624f498b85_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow/string")
PNP_80acb88e4ac9ac6d_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow/string")
PNP_af8d7b0e470b8ae2_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow/string")
PNP_7989f86846faaf99_PATTERN = re.compile(r"/dna/intent/api/v1/onboarding/pnp-workflow/count")
SITE_PROFILE_828828f44f28bd0d_PATTERN = re.compile(r"/dna/intent/api/v1/business/nfv")
SITE_PROFILE_7fbe4b804879baa4_PATTERN = re.compile(r"/dna/intent/api/v1/business/nfv/provisioningDetail")
DEVICES_89b2fb144f5bb09b_PATTERN = re.compile(r"/dna/intent/api/v1/device-detail")
DEVICES_f5947a4c439a8bf0_PATTERN = re.compile(r"/dna/intent/api/v1/interface")
DEVICES_b888792d43baba46_PATTERN = re.compile(r"/dna/intent/api/v1/interface/string")
DEVICES_3d923b184dc9a4ca_PATTERN = re.compile(r"/dna/intent/api/v1/interface/count")
DEVICES_cd8469e647caab0e_PATTERN = re.compile(r"/dna/intent/api/v1/interface/ip-address/string")
DEVICES_84ad8b0e42cab48a_PATTERN = re.compile(r"/dna/intent/api/v1/interface/isis")
DEVICES_ba9dc85b4b8a9a17_PATTERN = re.compile(r"/dna/intent/api/v1/interface/network-device/string")
DEVICES_349c888443b89a58_PATTERN = re.compile(r"/dna/intent/api/v1/interface/network-device/string/0/0")
DEVICES_5b8639224cd88ea7_PATTERN = re.compile(r"/dna/intent/api/v1/interface/network-device/string/count")
DEVICES_4eb56a614cc9a2d2_PATTERN = re.compile(r"/dna/intent/api/v1/interface/network-device/string/interface-name")
DEVICES_70ad397649e9b4d3_PATTERN = re.compile(r"/dna/intent/api/v1/interface/ospf")
DEVICES_20b19b52464b8972_PATTERN = re.compile(r"/dna/intent/api/v1/network-device")
DEVICES_4bb22af046fa8f08_PATTERN = re.compile(r"/dna/intent/api/v1/network-device")
DEVICES_aeb9eb67460b92df_PATTERN = re.compile(r"/dna/intent/api/v1/network-device")
DEVICES_1c894b5848eab214_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string")
DEVICES_8fa8eb404a4a8d96_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string")
DEVICES_819f9aa54feab7bf_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/brief")
DEVICES_82918a1b4d289c5c_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/collection-schedule")
DEVICES_84b37ae54c59ab28_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/meraki-organization")
DEVICES_288df9494f2a9746_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/vlan")
DEVICES_f6826a8e41bba242_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/wireless-info")
DEVICES_84b33a9e480abcaf_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/string/config")
DEVICES_f49548c54be8a3e2_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/0/0")
DEVICES_ffa748cc44e9a437_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/autocomplete")
DEVICES_b9855ad54ae98156_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/brief")
DEVICES_38bd0b884b89a785_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/collection-schedule/global")
DEVICES_b7bcaa084e2b90d0_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/config")
DEVICES_888f585c49b88441_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/config/count")
DEVICES_5db21b8e43fab7d8_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/count")
DEVICES_cd98780f4888a66d_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/file")
DEVICES_c3b3c9ef4e6b8a09_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/functional-capability")
DEVICES_81bb4804405a8d2f_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/functional-capability/string")
DEVICES_d0a4b88145aabb51_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/ip-address/string")
DEVICES_eb8249e34f69b0f1_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/module")
DEVICES_0db7da744c0b83d8_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/module/string")
DEVICES_8db939744649a782_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/module/count")
DEVICES_d888ab6d4d59a8c1_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/serial-number/string")
DEVICES_3b9ef9674429be4c_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/sync")
DEVICES_c9809b6744f8a502_PATTERN = re.compile(r"/dna/intent/api/v1/network-device/tenantinfo/macaddress")
SITES_17a82ac94cf99ab0_PATTERN = re.compile(r"/dna/intent/api/v1/site-health")
SITES_eeb168eb41988e07_PATTERN = re.compile(r"/dna/intent/api/v1/site/string/device")
SITES_50b589fd4c7a930a_PATTERN = re.compile(r"/dna/system/api/v1/site")
NETWORKS_ca91da84401abba1_PATTERN = re.compile(r"/dna/intent/api/v1/network-health")
NETWORKS_b9b48ac8463a8aba_PATTERN = re.compile(r"/dna/intent/api/v1/topology/l2/string")
NETWORKS_c2b5fb764d888375_PATTERN = re.compile(r"/dna/intent/api/v1/topology/l3/string")
NETWORKS_b2b8cb91459aa58f_PATTERN = re.compile(r"/dna/intent/api/v1/topology/physical-topology")
NETWORKS_9ba14a9e441b8a60_PATTERN = re.compile(r"/dna/intent/api/v1/topology/site-topology")
NETWORKS_6284db4649aa8d31_PATTERN = re.compile(r"/dna/intent/api/v1/topology/vlan/vlan-names")
CLIENTS_e2adba7943bab3e9_PATTERN = re.compile(r"/dna/intent/api/v1/client-detail")
CLIENTS_149aa93b4ddb80dd_PATTERN = re.compile(r"/dna/intent/api/v1/client-health")
NON_FABRIC_WIRELESS_db9f997f4e59aec1_PATTERN = re.compile(r"/dna/intent/api/v1/business/ssid")
NON_FABRIC_WIRELESS_cca098344a489dfa_PATTERN = re.compile(r"/dna/intent/api/v1/business/ssid/string/string")
NON_FABRIC_WIRELESS_8a96fb954d09a349_PATTERN = re.compile(r"/dna/intent/api/v1/enterprise-ssid")
NON_FABRIC_WIRELESS_cca519ba45ebb423_PATTERN = re.compile(r"/dna/intent/api/v1/enterprise-ssid")
NON_FABRIC_WIRELESS_c7a6592b4b98a369_PATTERN = re.compile(r"/dna/intent/api/v1/enterprise-ssid/string")
FABRIC_WIRED_bead7b3443b996a7_PATTERN = re.compile(r"/dna/intent/api/v1/business/border-device")
FABRIC_WIRED_98a39bf4485a9871_PATTERN = re.compile(r"/dna/intent/api/v1/business/border-device/string")
FABRIC_WIRED_cb81b93540baaab0_PATTERN = re.compile(r"/dna/intent/api/v1/business/border-device/string")
def matches_AUTHENTICATION_ac8ae94c4e69a09d(self):
return re.search(
self.AUTHENTICATION_ac8ae94c4e69a09d_PATTERN,
self.path
)
def authentication_authentication_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({"Token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1ZWNmZDViMjc1MTYxMjAwY2M1NzI3ZGEiLCJhdXRoU291cmNlIjoiaW50ZXJuYWwiLCJ0ZW5hbnROYW1lIjoiVE5UMCIsInJvbGVzIjpbIjVlNWE0MzI2NzUxNjEyMDBjYzRhYzk2MyJdLCJ0ZW5hbnRJZCI6IjVlNWE0MzI1NzUxNjEyMDBjYzRhYzk1YyIsImV4cCI6MTU5NDM1NTA1NCwiaWF0IjoxNTk0MzUxNDU0LCJqdGkiOiJkYjdhODcyZC1mNzI3LTRhODUtOWU1NC00YzM4NzM0YmFjMDkiLCJ1c2VybmFtZSI6ImRldm5ldHVzZXIifQ.WuKZUPJZgqZeKCG9UZ_C22Up1Yp7CKbImjmc9Is0xEuiy2TsB07Jl7Ov__oabNhuM2KjQyrj7k62zaopg7GyC3JGkpU7-vhYdy2c1aIBLoeeEYKOJocEE-ImUeVtFqo3md3lzMVn9hdfwQkyIuU_GwXHrDrxXY9umHKiWm9aGuP1VgRpqJKxTTsHF2iLQjmgVNHon4qqBv3McjlDNZ5nBVUzvO143xQ0ztHjebFrGGBogCt4hTVbqTdaFLowW6ovdA2qt6gktjr709gkZUkxLfa5Ntbt7DjQ-HmSTZmZHIItf2RVx9P3ENvr9RQFAQ5nWCr-rMeXceyWKr9uj75Oeg"})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_00aec9b1422ab27e(self):
return re.search(
self.TEMPLATE_PROGRAMMER_00aec9b1422ab27e_PATTERN,
self.path
)
def template_programmer_create_project_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_109d1b4f4289aecd(self):
return re.search(
self.TEMPLATE_PROGRAMMER_109d1b4f4289aecd_PATTERN,
self.path
)
def template_programmer_get_projects_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([{'name': 'string', 'id': 'string', 'templates': [{'name': 'string', 'composite': True, 'id': 'string'}]}])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_9480fa1f47ca9254(self):
return re.search(
self.TEMPLATE_PROGRAMMER_9480fa1f47ca9254_PATTERN,
self.path
)
def template_programmer_update_project_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_d0a1abfa435b841d(self):
return re.search(
self.TEMPLATE_PROGRAMMER_d0a1abfa435b841d_PATTERN,
self.path
)
def template_programmer_delete_project_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_f6b119ad4d4aaf16(self):
return re.search(
self.TEMPLATE_PROGRAMMER_f6b119ad4d4aaf16_PATTERN,
self.path
)
def template_programmer_create_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_01b09a254b9ab259(self):
return re.search(
self.TEMPLATE_PROGRAMMER_01b09a254b9ab259_PATTERN,
self.path
)
def template_programmer_gets_the_templates_available_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_7781fa0548a98342(self):
return re.search(
self.TEMPLATE_PROGRAMMER_7781fa0548a98342_PATTERN,
self.path
)
def template_programmer_update_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_83a3b9404cb88787(self):
return re.search(
self.TEMPLATE_PROGRAMMER_83a3b9404cb88787_PATTERN,
self.path
)
def template_programmer_get_template_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'author': 'string', 'composite': True, 'containingTemplates': [{'composite': True, 'id': 'string', 'name': 'string', 'version': 'string'}], 'createTime': 0, 'description': 'string', 'deviceTypes': [{'productFamily': 'string', 'productSeries': 'string', 'productType': 'string'}], 'failurePolicy': 'ABORT_ON_ERROR', 'id': 'string', 'lastUpdateTime': 0, 'name': 'string', 'parentTemplateId': 'string', 'projectId': 'string', 'projectName': 'string', 'rollbackTemplateContent': 'string', 'rollbackTemplateParams': [{'binding': 'string', 'dataType': 'STRING', 'defaultValue': 'string', 'description': 'string', 'displayName': 'string', 'group': 'string', 'id': 'string', 'instructionText': 'string', 'key': 'string', 'notParam': True, 'order': 0, 'paramArray': True, 'parameterName': 'string', 'provider': 'string', 'range': [{'id': 'string', 'maxValue': 0, 'minValue': 0}], 'required': True, 'selection': {'id': 'string', 'selectionType': 'SINGLE_SELECT', 'selectionValues': {}}}], 'softwareType': 'string', 'softwareVariant': 'string', 'softwareVersion': 'string', 'tags': ['string'], 'templateContent': 'string', 'templateParams': [{'binding': 'string', 'dataType': 'STRING', 'defaultValue': 'string', 'description': 'string', 'displayName': 'string', 'group': 'string', 'id': 'string', 'instructionText': 'string', 'key': 'string', 'notParam': True, 'order': 0, 'paramArray': True, 'parameterName': 'string', 'provider': 'string', 'range': [{'id': 'string', 'maxValue': 0, 'minValue': 0}], 'required': True, 'selection': {'id': 'string', 'selectionType': 'SINGLE_SELECT', 'selectionValues': {}}}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_a7b42836408a8e74(self):
return re.search(
self.TEMPLATE_PROGRAMMER_a7b42836408a8e74_PATTERN,
self.path
)
def template_programmer_delete_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_6099da82477b858a(self):
return re.search(
self.TEMPLATE_PROGRAMMER_6099da82477b858a_PATTERN,
self.path
)
def template_programmer_deploy_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'deploymentId': 'string', 'deploymentName': 'string', 'devices': [{'deviceId': 'string', 'duration': 'string', 'endTime': 'string', 'ipAddress': 'string', 'name': 'string', 'startTime': 'string', 'status': 'string'}], 'duration': 'string', 'endTime': 'string', 'projectName': 'string', 'startTime': 'string', 'status': 'string', 'templateName': 'string', 'templateVersion': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_9c9a785741cbb41f(self):
return re.search(
self.TEMPLATE_PROGRAMMER_9c9a785741cbb41f_PATTERN,
self.path
)
def template_programmer_get_template_deployment_status_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'deploymentId': 'string', 'deploymentName': 'string', 'devices': [{'deviceId': 'string', 'duration': 'string', 'endTime': 'string', 'ipAddress': 'string', 'name': 'string', 'startTime': 'string', 'status': 'string'}], 'duration': 'string', 'endTime': 'string', 'projectName': 'string', 'startTime': 'string', 'status': 'string', 'templateName': 'string', 'templateVersion': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_f393abe84989bb48(self):
return re.search(
self.TEMPLATE_PROGRAMMER_f393abe84989bb48_PATTERN,
self.path
)
def template_programmer_preview_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'cliPreview': 'string', 'templateId': 'string', 'validationErrors': {}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_62b05b2c40a9b216(self):
return re.search(
self.TEMPLATE_PROGRAMMER_62b05b2c40a9b216_PATTERN,
self.path
)
def template_programmer_version_template_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TEMPLATE_PROGRAMMER_c8bf6b65414a9bc7(self):
return re.search(
self.TEMPLATE_PROGRAMMER_c8bf6b65414a9bc7_PATTERN,
self.path
)
def template_programmer_get_template_versions_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([{'name': 'string', 'projectName': 'string', 'projectId': 'string', 'templateId': 'string', 'versionsInfo': [{'id': 'string', 'description': 'string', 'versionTime': 0}], 'composite': True}])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_1399891c42a8be64(self):
return re.search(
self.TAG_1399891c42a8be64_PATTERN,
self.path
)
def tag_create_tag_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_4d86a993469a9da9(self):
return re.search(
self.TAG_4d86a993469a9da9_PATTERN,
self.path
)
def tag_update_tag_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_ee9aab01487a8896(self):
return re.search(
self.TAG_ee9aab01487a8896_PATTERN,
self.path
)
def tag_get_tag_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': [{'systemTag': True, 'description': 'string', 'dynamicRules': [{'memberType': 'string', 'rules': {'values': ['string'], 'items': ['string'], 'operation': 'string', 'name': 'string', 'value': 'string'}}], 'name': 'string', 'id': 'string', 'instanceTenantId': 'string'}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_429c28154bdaa13d(self):
return re.search(
self.TAG_429c28154bdaa13d_PATTERN,
self.path
)
def tag_delete_tag_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_c1a359b14c89b573(self):
return re.search(
self.TAG_c1a359b14c89b573_PATTERN,
self.path
)
def tag_get_tag_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'systemTag': True, 'description': 'string', 'dynamicRules': [{'memberType': 'string', 'rules': {'values': ['string'], 'items': ['string'], 'operation': 'string', 'name': 'string', 'value': 'string'}}], 'name': 'string', 'id': 'string', 'instanceTenantId': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_00a2fa6146089317(self):
return re.search(
self.TAG_00a2fa6146089317_PATTERN,
self.path
)
def tag_add_members_to_the_tag_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_eab7abe048fb99ad(self):
return re.search(
self.TAG_eab7abe048fb99ad_PATTERN,
self.path
)
def tag_get_tag_members_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': [{'instanceUuid': 'string'}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_caa3ea704d78b37e(self):
return re.search(
self.TAG_caa3ea704d78b37e_PATTERN,
self.path
)
def tag_remove_tag_member_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_2e9db85840fbb1cf(self):
return re.search(
self.TAG_2e9db85840fbb1cf_PATTERN,
self.path
)
def tag_get_tag_member_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_8091a9b84bfba53b(self):
return re.search(
self.TAG_8091a9b84bfba53b_PATTERN,
self.path
)
def tag_get_tag_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_45bc7a8344a8bc1e(self):
return re.search(
self.TAG_45bc7a8344a8bc1e_PATTERN,
self.path
)
def tag_updates_tag_membership_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': {'taskId': {}, 'url': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TAG_4695090d403b8eaa(self):
return re.search(
self.TAG_4695090d403b8eaa_PATTERN,
self.path
)
def tag_get_tag_resource_types_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': ['string']})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_55b439dc4239b140(self):
return re.search(
self.NETWORK_DISCOVERY_55b439dc4239b140_PATTERN,
self.path
)
def network_discovery_start_discovery_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_9788b8fc4418831d(self):
return re.search(
self.NETWORK_DISCOVERY_9788b8fc4418831d_PATTERN,
self.path
)
def network_discovery_updates_discovery_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_db8e09234a988bab(self):
return re.search(
self.NETWORK_DISCOVERY_db8e09234a988bab_PATTERN,
self.path
)
def network_discovery_delete_all_discovery_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_4c8cab5f435a80f4(self):
return re.search(
self.NETWORK_DISCOVERY_4c8cab5f435a80f4_PATTERN,
self.path
)
def network_discovery_delete_discovery_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_63bb88b74f59aa17(self):
return re.search(
self.NETWORK_DISCOVERY_63bb88b74f59aa17_PATTERN,
self.path
)
def network_discovery_get_discovery_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'attributeInfo': {}, 'cdpLevel': 0, 'deviceIds': 'string', 'discoveryCondition': 'string', 'discoveryStatus': 'string', 'discoveryType': 'string', 'enablePasswordList': 'string', 'globalCredentialIdList': ['string'], 'httpReadCredential': {'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}, 'httpWriteCredential': {'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}, 'id': 'string', 'ipAddressList': 'string', 'ipFilterList': 'string', 'isAutoCdp': True, 'lldpLevel': 0, 'name': 'string', 'netconfPort': 'string', 'numDevices': 0, 'parentDiscoveryId': 'string', 'passwordList': 'string', 'preferredMgmtIPMethod': 'string', 'protocolOrder': 'string', 'retryCount': 0, 'snmpAuthPassphrase': 'string', 'snmpAuthProtocol': 'string', 'snmpMode': 'string', 'snmpPrivPassphrase': 'string', 'snmpPrivProtocol': 'string', 'snmpRoCommunity': 'string', 'snmpRoCommunityDesc': 'string', 'snmpRwCommunity': 'string', 'snmpRwCommunityDesc': 'string', 'snmpUserName': 'string', 'timeOut': 0, 'updateMgmtIp': True, 'userNameList': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_99872a134d0a9fb4(self):
return re.search(
self.NETWORK_DISCOVERY_99872a134d0a9fb4_PATTERN,
self.path
)
def network_discovery_get_list_of_discoveries_by_discovery_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'cliStatus': 'string', 'discoveryStatus': 'string', 'endTime': 'string', 'httpStatus': 'string', 'id': 'string', 'inventoryCollectionStatus': 'string', 'inventoryReachabilityStatus': 'string', 'ipAddress': 'string', 'jobStatus': 'string', 'name': 'string', 'netconfStatus': 'string', 'pingStatus': 'string', 'snmpStatus': 'string', 'startTime': 'string', 'taskId': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_f6ac994f451ba011(self):
return re.search(
self.NETWORK_DISCOVERY_f6ac994f451ba011_PATTERN,
self.path
)
def network_discovery_get_discovered_network_devices_by_discovery_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'anchorWlcForAp': 'string', 'authModelId': 'string', 'avgUpdateFrequency': 0, 'bootDateTime': 'string', 'cliStatus': 'string', 'duplicateDeviceId': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'httpStatus': 'string', 'id': 'string', 'imageName': 'string', 'ingressQueueConfig': 'string', 'interfaceCount': 'string', 'inventoryCollectionStatus': 'string', 'inventoryReachabilityStatus': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'netconfStatus': 'string', 'numUpdates': 0, 'pingStatus': 'string', 'platformId': 'string', 'portRange': 'string', 'qosStatus': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'snmpStatus': 'string', 'softwareVersion': 'string', 'tag': 'string', 'tagCount': 0, 'type': 'string', 'upTime': 'string', 'vendor': 'string', 'wlcApDeviceStatus': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_a6b798ab4acaa34e(self):
return re.search(
self.NETWORK_DISCOVERY_a6b798ab4acaa34e_PATTERN,
self.path
)
def network_discovery_get_discovered_devices_by_range_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'anchorWlcForAp': 'string', 'authModelId': 'string', 'avgUpdateFrequency': 0, 'bootDateTime': 'string', 'cliStatus': 'string', 'duplicateDeviceId': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'httpStatus': 'string', 'id': 'string', 'imageName': 'string', 'ingressQueueConfig': 'string', 'interfaceCount': 'string', 'inventoryCollectionStatus': 'string', 'inventoryReachabilityStatus': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'netconfStatus': 'string', 'numUpdates': 0, 'pingStatus': 'string', 'platformId': 'string', 'portRange': 'string', 'qosStatus': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'snmpStatus': 'string', 'softwareVersion': 'string', 'tag': 'string', 'tagCount': 0, 'type': 'string', 'upTime': 'string', 'vendor': 'string', 'wlcApDeviceStatus': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_a6965b454c9a8663(self):
return re.search(
self.NETWORK_DISCOVERY_a6965b454c9a8663_PATTERN,
self.path
)
def network_discovery_get_devices_discovered_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_3d9b99c343398a27(self):
return re.search(
self.NETWORK_DISCOVERY_3d9b99c343398a27_PATTERN,
self.path
)
def network_discovery_get_network_devices_from_discovery_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_c1ba9a424c08a01b(self):
return re.search(
self.NETWORK_DISCOVERY_c1ba9a424c08a01b_PATTERN,
self.path
)
def network_discovery_delete_discovery_by_specified_range_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_33b799d04d0a8907(self):
return re.search(
self.NETWORK_DISCOVERY_33b799d04d0a8907_PATTERN,
self.path
)
def network_discovery_get_discoveries_by_range_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'cdpLevel': 0, 'deviceIds': 'string', 'discoveryCondition': 'string', 'discoveryStatus': 'string', 'discoveryType': 'string', 'enablePasswordList': 'string', 'globalCredentialIdList': ['string'], 'httpReadCredential': {'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}, 'httpWriteCredential': {'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}, 'id': 'string', 'ipAddressList': 'string', 'ipFilterList': 'string', 'isAutoCdp': True, 'lldpLevel': 0, 'name': 'string', 'netconfPort': 'string', 'numDevices': 0, 'parentDiscoveryId': 'string', 'passwordList': 'string', 'preferredMgmtIPMethod': 'string', 'protocolOrder': 'string', 'retryCount': 0, 'snmpAuthPassphrase': 'string', 'snmpAuthProtocol': 'string', 'snmpMode': 'string', 'snmpPrivPassphrase': 'string', 'snmpPrivProtocol': 'string', 'snmpRoCommunity': 'string', 'snmpRoCommunityDesc': 'string', 'snmpRwCommunity': 'string', 'snmpRwCommunityDesc': 'string', 'snmpUserName': 'string', 'timeOut': 0, 'updateMgmtIp': True, 'userNameList': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_069d9823451b892d(self):
return re.search(
self.NETWORK_DISCOVERY_069d9823451b892d_PATTERN,
self.path
)
def network_discovery_get_count_of_all_discovery_jobs_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_a4967be64dfaaa1a(self):
return re.search(
self.NETWORK_DISCOVERY_a4967be64dfaaa1a_PATTERN,
self.path
)
def network_discovery_get_discovery_jobs_by_ip_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'cliStatus': 'string', 'discoveryStatus': 'string', 'endTime': 'string', 'httpStatus': 'string', 'id': 'string', 'inventoryCollectionStatus': 'string', 'inventoryReachabilityStatus': 'string', 'ipAddress': 'string', 'jobStatus': 'string', 'name': 'string', 'netconfStatus': 'string', 'pingStatus': 'string', 'snmpStatus': 'string', 'startTime': 'string', 'taskId': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_ff816b8e435897eb(self):
return re.search(
self.NETWORK_DISCOVERY_ff816b8e435897eb_PATTERN,
self.path
)
def network_discovery_get_global_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_709fda3c42b8877a(self):
return re.search(
self.NETWORK_DISCOVERY_709fda3c42b8877a_PATTERN,
self.path
)
def network_discovery_update_global_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_f5ac590c4ca9975a(self):
return re.search(
self.NETWORK_DISCOVERY_f5ac590c4ca9975a_PATTERN,
self.path
)
def network_discovery_delete_global_credentials_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_58a3699e489b9529(self):
return re.search(
self.NETWORK_DISCOVERY_58a3699e489b9529_PATTERN,
self.path
)
def network_discovery_get_credential_sub_type_by_credential_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 'string', 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_948ea8194348bc0b(self):
return re.search(
self.NETWORK_DISCOVERY_948ea8194348bc0b_PATTERN,
self.path
)
def network_discovery_create_cli_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_fba0d80747eb82e8(self):
return re.search(
self.NETWORK_DISCOVERY_fba0d80747eb82e8_PATTERN,
self.path
)
def network_discovery_update_cli_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_bf859ac64a0ba19c(self):
return re.search(
self.NETWORK_DISCOVERY_bf859ac64a0ba19c_PATTERN,
self.path
)
def network_discovery_create_http_read_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_89b36b4649999d81(self):
return re.search(
self.NETWORK_DISCOVERY_89b36b4649999d81_PATTERN,
self.path
)
def network_discovery_update_http_read_credential_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_4d9ca8e2431a8a24(self):
return re.search(
self.NETWORK_DISCOVERY_4d9ca8e2431a8a24_PATTERN,
self.path
)
def network_discovery_create_http_write_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_b68a6bd8473a9a25(self):
return re.search(
self.NETWORK_DISCOVERY_b68a6bd8473a9a25_PATTERN,
self.path
)
def network_discovery_update_http_write_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_17929bc7465bb564(self):
return re.search(
self.NETWORK_DISCOVERY_17929bc7465bb564_PATTERN,
self.path
)
def network_discovery_create_netconf_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_c5acd9fa4c1a8abc(self):
return re.search(
self.NETWORK_DISCOVERY_c5acd9fa4c1a8abc_PATTERN,
self.path
)
def network_discovery_update_netconf_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_7aa3da9d4e098ef2(self):
return re.search(
self.NETWORK_DISCOVERY_7aa3da9d4e098ef2_PATTERN,
self.path
)
def network_discovery_create_snmp_read_community_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_47a1b84b4e1b8044(self):
return re.search(
self.NETWORK_DISCOVERY_47a1b84b4e1b8044_PATTERN,
self.path
)
def network_discovery_update_snmp_read_community_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_10b06a6a4f7bb3cb(self):
return re.search(
self.NETWORK_DISCOVERY_10b06a6a4f7bb3cb_PATTERN,
self.path
)
def network_discovery_update_snmp_write_community_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_6bacb8d14639bdc7(self):
return re.search(
self.NETWORK_DISCOVERY_6bacb8d14639bdc7_PATTERN,
self.path
)
def network_discovery_create_snmp_write_community_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_1da5ebdd434aacfe(self):
return re.search(
self.NETWORK_DISCOVERY_1da5ebdd434aacfe_PATTERN,
self.path
)
def network_discovery_update_snmpv3_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_979688084b7ba60d(self):
return re.search(
self.NETWORK_DISCOVERY_979688084b7ba60d_PATTERN,
self.path
)
def network_discovery_create_snmpv3_credentials_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_44974ba5435a801d(self):
return re.search(
self.NETWORK_DISCOVERY_44974ba5435a801d_PATTERN,
self.path
)
def network_discovery_get_snmp_properties_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'intValue': 0, 'systemPropertyName': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORK_DISCOVERY_a5ac99774c6bb541(self):
return re.search(
self.NETWORK_DISCOVERY_a5ac99774c6bb541_PATTERN,
self.path
)
def network_discovery_create_update_snmp_properties_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TASK_e78bb8a2449b9eed(self):
return re.search(
self.TASK_e78bb8a2449b9eed_PATTERN,
self.path
)
def task_get_tasks_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'additionalStatusURL': 'string', 'data': 'string', 'endTime': 'string', 'errorCode': 'string', 'errorKey': 'string', 'failureReason': 'string', 'id': 'string', 'instanceTenantId': 'string', 'isError': True, 'lastUpdate': 'string', 'operationIdList': {}, 'parentId': 'string', 'progress': 'string', 'rootId': 'string', 'serviceType': 'string', 'startTime': 'string', 'username': 'string', 'version': 0}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TASK_a1a9387346ba92b1(self):
return re.search(
self.TASK_a1a9387346ba92b1_PATTERN,
self.path
)
def task_get_task_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'additionalStatusURL': 'string', 'data': 'string', 'endTime': 'string', 'errorCode': 'string', 'errorKey': 'string', 'failureReason': 'string', 'id': 'string', 'instanceTenantId': 'string', 'isError': True, 'lastUpdate': 'string', 'operationIdList': {}, 'parentId': 'string', 'progress': 'string', 'rootId': 'string', 'serviceType': 'string', 'startTime': 'string', 'username': 'string', 'version': 0}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TASK_f5a269c44f2a95fa(self):
return re.search(
self.TASK_f5a269c44f2a95fa_PATTERN,
self.path
)
def task_get_task_tree_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'additionalStatusURL': 'string', 'data': 'string', 'endTime': 'string', 'errorCode': 'string', 'errorKey': 'string', 'failureReason': 'string', 'id': 'string', 'instanceTenantId': 'string', 'isError': True, 'lastUpdate': 'string', 'operationIdList': {}, 'parentId': 'string', 'progress': 'string', 'rootId': 'string', 'serviceType': 'string', 'startTime': 'string', 'username': 'string', 'version': 0}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TASK_26b44ab04649a183(self):
return re.search(
self.TASK_26b44ab04649a183_PATTERN,
self.path
)
def task_get_task_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_TASK_e487f8d3481b94f2(self):
return re.search(
self.TASK_e487f8d3481b94f2_PATTERN,
self.path
)
def task_get_task_by_operationid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'additionalStatusURL': 'string', 'data': 'string', 'endTime': 'string', 'errorCode': 'string', 'errorKey': 'string', 'failureReason': 'string', 'id': 'string', 'instanceTenantId': 'string', 'isError': True, 'lastUpdate': 'string', 'operationIdList': {}, 'parentId': 'string', 'progress': 'string', 'rootId': 'string', 'serviceType': 'string', 'startTime': 'string', 'username': 'string', 'version': 0}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_COMMAND_RUNNER_33bb2b9d40199e14(self):
return re.search(
self.COMMAND_RUNNER_33bb2b9d40199e14_PATTERN,
self.path
)
def command_runner_get_all_keywords_of_clis_accepted_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': ['string'], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_COMMAND_RUNNER_d6b8ca774739adf4(self):
return re.search(
self.COMMAND_RUNNER_d6b8ca774739adf4_PATTERN,
self.path
)
def command_runner_run_read_only_commands_on_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FILE_9698c8ec4a0b8c1a(self):
return re.search(
self.FILE_9698c8ec4a0b8c1a_PATTERN,
self.path
)
def file_download_a_file_by_fileid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FILE_3f89bbfc4f6b8b50(self):
return re.search(
self.FILE_3f89bbfc4f6b8b50_PATTERN,
self.path
)
def file_get_list_of_available_namespaces_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': ['string'], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FILE_42b6a86e44b8bdfc(self):
return re.search(
self.FILE_42b6a86e44b8bdfc_PATTERN,
self.path
)
def file_get_list_of_files_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'downloadPath': 'string', 'encrypted': True, 'fileFormat': 'string', 'fileSize': 'string', 'id': 'string', 'md5Checksum': 'string', 'name': 'string', 'nameSpace': 'string', 'sftpServerList': [{}], 'sha1Checksum': 'string', 'taskId': {}}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PATH_TRACE_55bc3bf94e38b6ff(self):
return re.search(
self.PATH_TRACE_55bc3bf94e38b6ff_PATTERN,
self.path
)
def path_trace_retrives_all_previous_pathtraces_summary_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'controlPath': True, 'createTime': 0, 'destIP': 'string', 'destPort': 'string', 'failureReason': 'string', 'id': 'string', 'inclusions': ['string'], 'lastUpdateTime': 0, 'periodicRefresh': True, 'protocol': 'string', 'sourceIP': 'string', 'sourcePort': 'string', 'status': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PATH_TRACE_a395fae644ca899c(self):
return re.search(
self.PATH_TRACE_a395fae644ca899c_PATTERN,
self.path
)
def path_trace_initiate_a_new_pathtrace_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'flowAnalysisId': 'string', 'taskId': 'string', 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PATH_TRACE_8a9d2b76443b914e(self):
return re.search(
self.PATH_TRACE_8a9d2b76443b914e_PATTERN,
self.path
)
def path_trace_deletes_pathtrace_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PATH_TRACE_7ab9a8bd4f3b86a4(self):
return re.search(
self.PATH_TRACE_7ab9a8bd4f3b86a4_PATTERN,
self.path
)
def path_trace_retrieves_previous_pathtrace_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'detailedStatus': {'aclTraceCalculation': 'string', 'aclTraceCalculationFailureReason': 'string'}, 'lastUpdate': 'string', 'networkElements': [{'accuracyList': [{'percent': 0, 'reason': 'string'}], 'detailedStatus': {'aclTraceCalculation': 'string', 'aclTraceCalculationFailureReason': 'string'}, 'deviceStatistics': {'cpuStatistics': {'fiveMinUsageInPercentage': 0, 'fiveSecsUsageInPercentage': 0, 'oneMinUsageInPercentage': 0, 'refreshedAt': 0}, 'memoryStatistics': {'memoryUsage': 0, 'refreshedAt': 0, 'totalMemory': 0}}, 'deviceStatsCollection': 'string', 'deviceStatsCollectionFailureReason': 'string', 'egressPhysicalInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'egressVirtualInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'flexConnect': {'authentication': 'LOCAL', 'dataSwitching': 'LOCAL', 'egressAclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'ingressAclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'wirelessLanControllerId': 'string', 'wirelessLanControllerName': 'string'}, 'id': 'string', 'ingressPhysicalInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'ingressVirtualInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'ip': 'string', 'linkInformationSource': 'string', 'name': 'string', 'perfMonCollection': 'string', 'perfMonCollectionFailureReason': 'string', 'perfMonStatistics': [{'byteRate': 0, 'destIpAddress': 'string', 'destPort': 'string', 'inputInterface': 'string', 'ipv4DSCP': 'string', 'ipv4TTL': 0, 'outputInterface': 'string', 'packetBytes': 0, 'packetCount': 0, 'packetLoss': 0, 'packetLossPercentage': 0, 'protocol': 'string', 'refreshedAt': 0, 'rtpJitterMax': 0, 'rtpJitterMean': 0, 'rtpJitterMin': 0, 'sourceIpAddress': 'string', 'sourcePort': 'string'}], 'role': 'string', 'ssid': 'string', 'tunnels': ['string'], 'type': 'string', 'wlanId': 'string'}], 'networkElementsInfo': [{'accuracyList': [{'percent': 0, 'reason': 'string'}], 'detailedStatus': {'aclTraceCalculation': 'string', 'aclTraceCalculationFailureReason': 'string'}, 'deviceStatistics': {'cpuStatistics': {'fiveMinUsageInPercentage': 0, 'fiveSecsUsageInPercentage': 0, 'oneMinUsageInPercentage': 0, 'refreshedAt': 0}, 'memoryStatistics': {'memoryUsage': 0, 'refreshedAt': 0, 'totalMemory': 0}}, 'deviceStatsCollection': 'string', 'deviceStatsCollectionFailureReason': 'string', 'egressInterface': {'physicalInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'virtualInterface': [{'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}]}, 'flexConnect': {'authentication': 'LOCAL', 'dataSwitching': 'LOCAL', 'egressAclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'ingressAclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'wirelessLanControllerId': 'string', 'wirelessLanControllerName': 'string'}, 'id': 'string', 'ingressInterface': {'physicalInterface': {'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}, 'virtualInterface': [{'aclAnalysis': {'aclName': 'string', 'matchingAces': [{'ace': 'string', 'matchingPorts': [{'ports': [{'destPorts': ['string'], 'sourcePorts': ['string']}], 'protocol': 'string'}], 'result': 'string'}], 'result': 'string'}, 'id': 'string', 'interfaceStatistics': {'adminStatus': 'string', 'inputPackets': 0, 'inputQueueCount': 0, 'inputQueueDrops': 0, 'inputQueueFlushes': 0, 'inputQueueMaxDepth': 0, 'inputRatebps': 0, 'operationalStatus': 'string', 'outputDrop': 0, 'outputPackets': 0, 'outputQueueCount': 0, 'outputQueueDepth': 0, 'outputRatebps': 0, 'refreshedAt': 0}, 'interfaceStatsCollection': 'string', 'interfaceStatsCollectionFailureReason': 'string', 'name': 'string', 'pathOverlayInfo': [{'controlPlane': 'string', 'dataPacketEncapsulation': 'string', 'destIp': 'string', 'destPort': 'string', 'protocol': 'string', 'sourceIp': 'string', 'sourcePort': 'string', 'vxlanInfo': {'dscp': 'string', 'vnid': 'string'}}], 'qosStatistics': [{'classMapName': 'string', 'dropRate': 0, 'numBytes': 0, 'numPackets': 0, 'offeredRate': 0, 'queueBandwidthbps': 'string', 'queueDepth': 0, 'queueNoBufferDrops': 0, 'queueTotalDrops': 0, 'refreshedAt': 0}], 'qosStatsCollection': 'string', 'qosStatsCollectionFailureReason': 'string', 'usedVlan': 'string', 'vrfName': 'string'}]}, 'ip': 'string', 'linkInformationSource': 'string', 'name': 'string', 'perfMonCollection': 'string', 'perfMonCollectionFailureReason': 'string', 'perfMonitorStatistics': [{'byteRate': 0, 'destIpAddress': 'string', 'destPort': 'string', 'inputInterface': 'string', 'ipv4DSCP': 'string', 'ipv4TTL': 0, 'outputInterface': 'string', 'packetBytes': 0, 'packetCount': 0, 'packetLoss': 0, 'packetLossPercentage': 0, 'protocol': 'string', 'refreshedAt': 0, 'rtpJitterMax': 0, 'rtpJitterMean': 0, 'rtpJitterMin': 0, 'sourceIpAddress': 'string', 'sourcePort': 'string'}], 'role': 'string', 'ssid': 'string', 'tunnels': ['string'], 'type': 'string', 'wlanId': 'string'}], 'properties': ['string'], 'request': {'controlPath': True, 'createTime': 0, 'destIP': 'string', 'destPort': 'string', 'failureReason': 'string', 'id': 'string', 'inclusions': ['string'], 'lastUpdateTime': 0, 'periodicRefresh': True, 'protocol': 'string', 'sourceIP': 'string', 'sourcePort': 'string', 'status': 'string'}}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SWIM_fb9beb664f2aba4c(self):
return re.search(
self.SWIM_fb9beb664f2aba4c_PATTERN,
self.path
)
def swim_trigger_software_image_activation_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SWIM_8cb6783b4faba1f4(self):
return re.search(
self.SWIM_8cb6783b4faba1f4_PATTERN,
self.path
)
def swim_trigger_software_image_distribution_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SWIM_0c8f7a0b49b9aedd(self):
return re.search(
self.SWIM_0c8f7a0b49b9aedd_PATTERN,
self.path
)
def swim_get_software_image_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'applicableDevicesForImage': [{'mdfId': 'string', 'productId': ['string'], 'productName': 'string'}], 'applicationType': 'string', 'createdTime': 'string', 'extendedAttributes': {}, 'family': 'string', 'feature': 'string', 'fileServiceId': 'string', 'fileSize': 'string', 'imageIntegrityStatus': 'string', 'imageName': 'string', 'imageSeries': ['string'], 'imageSource': 'string', 'imageType': 'string', 'imageUuid': 'string', 'importSourceType': 'DEVICE', 'isTaggedGolden': True, 'md5Checksum': 'string', 'name': 'string', 'profileInfo': [{'description': 'string', 'extendedAttributes': {}, 'memory': 0, 'productType': 'string', 'profileName': 'string', 'shares': 0, 'vCpu': 0}], 'shaCheckSum': 'string', 'vendor': 'string', 'version': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SWIM_4dbe3bc743a891bc(self):
return re.search(
self.SWIM_4dbe3bc743a891bc_PATTERN,
self.path
)
def swim_import_local_software_image_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SWIM_bc8aab4746ca883d(self):
return re.search(
self.SWIM_bc8aab4746ca883d_PATTERN,
self.path
)
def swim_import_software_image_via_url_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_e6b3db8046c99654(self):
return re.search(
self.PNP_e6b3db8046c99654_PATTERN,
self.path
)
def pnp_get_device_list_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([{'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'}])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_f3b26b5544cabab9(self):
return re.search(
self.PNP_f3b26b5544cabab9_PATTERN,
self.path
)
def pnp_add_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_09b0f9ce4239ae10(self):
return re.search(
self.PNP_09b0f9ce4239ae10_PATTERN,
self.path
)
def pnp_update_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_bab6c9e5440885cc(self):
return re.search(
self.PNP_bab6c9e5440885cc_PATTERN,
self.path
)
def pnp_get_device_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_cdab9b474899ae06(self):
return re.search(
self.PNP_cdab9b474899ae06_PATTERN,
self.path
)
def pnp_delete_device_by_id_from_pnp_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_d8a619974a8a8c48(self):
return re.search(
self.PNP_d8a619974a8a8c48_PATTERN,
self.path
)
def pnp_claim_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'jsonArrayResponse': [{}], 'jsonResponse': {}, 'message': 'string', 'statusCode': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_d9a1fa9c4068b23c(self):
return re.search(
self.PNP_d9a1fa9c4068b23c_PATTERN,
self.path
)
def pnp_get_device_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_f09319674049a7d4(self):
return re.search(
self.PNP_f09319674049a7d4_PATTERN,
self.path
)
def pnp_get_device_history_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'timestamp': 0, 'details': 'string', 'historyTaskInfo': {'name': 'string', 'type': 'string', 'timeTaken': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'startTime': 0, 'endTime': 0, 'timeTaken': 0, 'outputStr': 'string'}], 'addnDetails': [{'key': 'string', 'value': 'string'}]}, 'errorFlag': True}], 'statusCode': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_21a6db2540298f55(self):
return re.search(
self.PNP_21a6db2540298f55_PATTERN,
self.path
)
def pnp_import_devices_in_bulk_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'successList': [{'_id': 'string', 'deviceInfo': {'source': 'string', 'serialNumber': 'string', 'stack': True, 'mode': 'string', 'state': 'string', 'location': {'siteId': 'string', 'address': 'string', 'latitude': 'string', 'longitude': 'string', 'altitude': 'string'}, 'description': 'string', 'onbState': 'string', 'authenticatedMicNumber': 'string', 'authenticatedSudiSerialNo': 'string', 'capabilitiesSupported': ['string'], 'featuresSupported': ['string'], 'cmState': 'string', 'firstContact': 0, 'lastContact': 0, 'macAddress': 'string', 'pid': 'string', 'deviceSudiSerialNos': ['string'], 'lastUpdateOn': 0, 'workflowId': 'string', 'workflowName': 'string', 'projectId': 'string', 'projectName': 'string', 'deviceType': 'string', 'agentType': 'string', 'imageVersion': 'string', 'fileSystemList': [{'type': 'string', 'writeable': True, 'freespace': 0, 'name': 'string', 'readable': True, 'size': 0}], 'pnpProfileList': [{'profileName': 'string', 'discoveryCreated': True, 'createdBy': 'string', 'primaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}, 'secondaryEndpoint': {'port': 0, 'protocol': 'string', 'ipv4Address': {}, 'ipv6Address': {}, 'fqdn': 'string', 'certificate': 'string'}}], 'imageFile': 'string', 'httpHeaders': [{'key': 'string', 'value': 'string'}], 'neighborLinks': [{'localInterfaceName': 'string', 'localShortInterfaceName': 'string', 'localMacAddress': 'string', 'remoteInterfaceName': 'string', 'remoteShortInterfaceName': 'string', 'remoteMacAddress': 'string', 'remoteDeviceName': 'string', 'remotePlatform': 'string', 'remoteVersion': 'string'}], 'lastSyncTime': 0, 'ipInterfaces': [{'status': 'string', 'macAddress': 'string', 'ipv4Address': {}, 'ipv6AddressList': [{}], 'name': 'string'}], 'hostname': 'string', 'authStatus': 'string', 'stackInfo': {'supportsStackWorkflows': True, 'isFullRing': True, 'stackMemberList': [{'serialNumber': 'string', 'state': 'string', 'role': 'string', 'macAddress': 'string', 'pid': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'sudiSerialNumber': 'string', 'hardwareVersion': 'string', 'stackNumber': 0, 'softwareVersion': 'string', 'priority': 0}], 'stackRingProtocol': 'string', 'validLicenseLevels': ['string'], 'totalMemberCount': 0}, 'reloadRequested': True, 'addedOn': 0, 'siteId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'userMicNumbers': ['string'], 'userSudiSerialNos': ['string'], 'addnMacAddrs': ['string'], 'preWorkflowCliOuputs': [{'cli': 'string', 'cliOutput': 'string'}], 'tags': {}, 'sudiRequired': True, 'smartAccountId': 'string', 'virtualAccountId': 'string', 'populateInventory': True, 'siteName': 'string', 'name': 'string'}, 'systemResetWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'systemWorkflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'workflow': {'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}, 'runSummaryList': [{'details': 'string', 'historyTaskInfo': {'type': 'string', 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'addnDetails': [{'key': 'string', 'value': 'string'}], 'name': 'string'}, 'errorFlag': True, 'timestamp': 0}], 'workflowParameters': {'topOfStackSerialNumber': 'string', 'licenseLevel': 'string', 'licenseType': 'string', 'configList': [{'configParameters': [{'key': 'string', 'value': 'string'}], 'configId': 'string'}]}, 'dayZeroConfig': {'config': 'string'}, 'dayZeroConfigPreview': {}, 'version': 0, 'tenantId': 'string'}], 'failureList': [{'index': 0, 'serialNum': 'string', 'id': 'string', 'msg': 'string'}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_9e857b5a4a0bbcdb(self):
return re.search(
self.PNP_9e857b5a4a0bbcdb_PATTERN,
self.path
)
def pnp_reset_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'jsonArrayResponse': [{}], 'jsonResponse': {}, 'message': 'string', 'statusCode': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_0a9c988445cb91c8(self):
return re.search(
self.PNP_0a9c988445cb91c8_PATTERN,
self.path
)
def pnp_get_sync_result_for_virtual_account_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string', 'profile': {'proxy': True, 'makeDefault': True, 'port': 0, 'profileId': 'string', 'name': 'string', 'addressIpV4': 'string', 'cert': 'string', 'addressFqdn': 'string'}, 'ccoUser': 'string', 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'token': 'string', 'syncStartTime': 0, 'lastSync': 0, 'tenantId': 'string', 'smartAccountId': 'string', 'expiry': 0, 'syncStatus': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_5889fb844939a13b(self):
return re.search(
self.PNP_5889fb844939a13b_PATTERN,
self.path
)
def pnp_claim_a_device_to_a_site_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 'string', 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_cf9418234d9ab37e(self):
return re.search(
self.PNP_cf9418234d9ab37e_PATTERN,
self.path
)
def pnp_preview_config_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'complete': True, 'config': 'string', 'error': True, 'errorMessage': 'string', 'expiredTime': 0, 'rfProfile': 'string', 'sensorProfile': 'string', 'siteId': 'string', 'startTime': 0, 'taskId': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_0b836b7b4b6a9fd5(self):
return re.search(
self.PNP_0b836b7b4b6a9fd5_PATTERN,
self.path
)
def pnp_un_claim_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'jsonArrayResponse': [{}], 'jsonResponse': {}, 'message': 'string', 'statusCode': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_a4b6c87a4ffb9efa(self):
return re.search(
self.PNP_a4b6c87a4ffb9efa_PATTERN,
self.path
)
def pnp_sync_virtual_account_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string', 'profile': {'proxy': True, 'makeDefault': True, 'port': 0, 'profileId': 'string', 'name': 'string', 'addressIpV4': 'string', 'cert': 'string', 'addressFqdn': 'string'}, 'ccoUser': 'string', 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'token': 'string', 'syncStartTime': 0, 'lastSync': 0, 'tenantId': 'string', 'smartAccountId': 'string', 'expiry': 0, 'syncStatus': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_8da0391947088a5a(self):
return re.search(
self.PNP_8da0391947088a5a_PATTERN,
self.path
)
def pnp_update_pnp_global_settings_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'savaMappingList': [{'syncStatus': 'string', 'syncStartTime': 0, 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'lastSync': 0, 'tenantId': 'string', 'profile': {'port': 0, 'addressIpV4': 'string', 'addressFqdn': 'string', 'profileId': 'string', 'proxy': True, 'makeDefault': True, 'cert': 'string', 'name': 'string'}, 'token': 'string', 'expiry': 0, 'ccoUser': 'string', 'smartAccountId': 'string', 'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string'}], 'taskTimeOuts': {'imageDownloadTimeOut': 0, 'configTimeOut': 0, 'generalTimeOut': 0}, 'tenantId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'defaultProfile': {'fqdnAddresses': ['string'], 'proxy': True, 'cert': 'string', 'ipAddresses': ['string'], 'port': 0}, 'acceptEula': True, 'id': 'string', '_id': 'string', 'version': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_7e92f9eb46db8320(self):
return re.search(
self.PNP_7e92f9eb46db8320_PATTERN,
self.path
)
def pnp_get_pnp_global_settings_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'savaMappingList': [{'syncStatus': 'string', 'syncStartTime': 0, 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'lastSync': 0, 'tenantId': 'string', 'profile': {'port': 0, 'addressIpV4': 'string', 'addressFqdn': 'string', 'profileId': 'string', 'proxy': True, 'makeDefault': True, 'cert': 'string', 'name': 'string'}, 'token': 'string', 'expiry': 0, 'ccoUser': 'string', 'smartAccountId': 'string', 'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string'}], 'taskTimeOuts': {'imageDownloadTimeOut': 0, 'configTimeOut': 0, 'generalTimeOut': 0}, 'tenantId': 'string', 'aaaCredentials': {'password': 'string', 'username': 'string'}, 'defaultProfile': {'fqdnAddresses': ['string'], 'proxy': True, 'cert': 'string', 'ipAddresses': ['string'], 'port': 0}, 'acceptEula': True, 'id': 'string', '_id': 'string', 'version': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_3cb24acb486b89d2(self):
return re.search(
self.PNP_3cb24acb486b89d2_PATTERN,
self.path
)
def pnp_get_smart_account_list_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps(['string'])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_70a479a6462a9496(self):
return re.search(
self.PNP_70a479a6462a9496_PATTERN,
self.path
)
def pnp_get_virtual_account_list_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps(['string'])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_1e962af345b8b59f(self):
return re.search(
self.PNP_1e962af345b8b59f_PATTERN,
self.path
)
def pnp_add_virtual_account_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string', 'profile': {'proxy': True, 'makeDefault': True, 'port': 0, 'profileId': 'string', 'name': 'string', 'addressIpV4': 'string', 'cert': 'string', 'addressFqdn': 'string'}, 'ccoUser': 'string', 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'token': 'string', 'syncStartTime': 0, 'lastSync': 0, 'tenantId': 'string', 'smartAccountId': 'string', 'expiry': 0, 'syncStatus': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_6f9819e84178870c(self):
return re.search(
self.PNP_6f9819e84178870c_PATTERN,
self.path
)
def pnp_update_pnp_server_profile_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string', 'profile': {'proxy': True, 'makeDefault': True, 'port': 0, 'profileId': 'string', 'name': 'string', 'addressIpV4': 'string', 'cert': 'string', 'addressFqdn': 'string'}, 'ccoUser': 'string', 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'token': 'string', 'syncStartTime': 0, 'lastSync': 0, 'tenantId': 'string', 'smartAccountId': 'string', 'expiry': 0, 'syncStatus': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_2499e9ad42e8ae5b(self):
return re.search(
self.PNP_2499e9ad42e8ae5b_PATTERN,
self.path
)
def pnp_deregister_virtual_account_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'virtualAccountId': 'string', 'autoSyncPeriod': 0, 'syncResultStr': 'string', 'profile': {'proxy': True, 'makeDefault': True, 'port': 0, 'profileId': 'string', 'name': 'string', 'addressIpV4': 'string', 'cert': 'string', 'addressFqdn': 'string'}, 'ccoUser': 'string', 'syncResult': {'syncList': [{'syncType': 'string', 'deviceSnList': ['string']}], 'syncMsg': 'string'}, 'token': 'string', 'syncStartTime': 0, 'lastSync': 0, 'tenantId': 'string', 'smartAccountId': 'string', 'expiry': 0, 'syncStatus': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_aeb4dad04a99bbe3(self):
return re.search(
self.PNP_aeb4dad04a99bbe3_PATTERN,
self.path
)
def pnp_get_workflows_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([{'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'}])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_848b5a7b4f9b8c12(self):
return re.search(
self.PNP_848b5a7b4f9b8c12_PATTERN,
self.path
)
def pnp_add_a_workflow_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_3086c9624f498b85(self):
return re.search(
self.PNP_3086c9624f498b85_PATTERN,
self.path
)
def pnp_update_workflow_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_80acb88e4ac9ac6d(self):
return re.search(
self.PNP_80acb88e4ac9ac6d_PATTERN,
self.path
)
def pnp_get_workflow_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_af8d7b0e470b8ae2(self):
return re.search(
self.PNP_af8d7b0e470b8ae2_PATTERN,
self.path
)
def pnp_delete_workflow_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'_id': 'string', 'state': 'string', 'type': 'string', 'description': 'string', 'lastupdateOn': 0, 'imageId': 'string', 'currTaskIdx': 0, 'addedOn': 0, 'tasks': [{'state': 'string', 'type': 'string', 'currWorkItemIdx': 0, 'taskSeqNo': 0, 'endTime': 0, 'startTime': 0, 'workItemList': [{'state': 'string', 'command': 'string', 'outputStr': 'string', 'endTime': 0, 'startTime': 0, 'timeTaken': 0}], 'timeTaken': 0, 'name': 'string'}], 'addToInventory': True, 'instanceType': 'string', 'endTime': 0, 'execTime': 0, 'startTime': 0, 'useState': 'string', 'configId': 'string', 'name': 'string', 'version': 0, 'tenantId': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_PNP_7989f86846faaf99(self):
return re.search(
self.PNP_7989f86846faaf99_PATTERN,
self.path
)
def pnp_get_workflow_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SITE_PROFILE_828828f44f28bd0d(self):
return re.search(
self.SITE_PROFILE_828828f44f28bd0d_PATTERN,
self.path
)
def site_profile_provision_nfv_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SITE_PROFILE_7fbe4b804879baa4(self):
return re.search(
self.SITE_PROFILE_7fbe4b804879baa4_PATTERN,
self.path
)
def site_profile_get_device_details_by_ip_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'provisionDetails': {'startTime': 'string', 'endTime': 'string', 'duration': 'string', 'statusMessage': 'string', 'status': 'string', 'taskNodes': [{'startTime': 'string', 'endTime': 'string', 'duration': 'string', 'status': 'string', 'nextTask': 'string', 'name': 'string', 'target': 'string', 'statusMessage': 'string', 'payload': 'string', 'provisionedNames': {}, 'errorPayload': {}, 'parentTask': {}, 'cliTemplateUserMessageDTO': {}, 'stepRan': 'string'}], 'topology': 'string', 'beginStep': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_89b2fb144f5bb09b(self):
return re.search(
self.DEVICES_89b2fb144f5bb09b_PATTERN,
self.path
)
def devices_get_device_detail_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'HALastResetReason': 'string', 'managementIpAddr': 'string', 'HAPrimaryPowerStatus': 'string', 'redundancyMode': 'string', 'communicationState': 'string', 'nwDeviceName': 'string', 'redundancyUnit': 'string', 'platformId': 'string', 'redundancyPeerState': 'string', 'nwDeviceId': 'string', 'redundancyState': 'string', 'nwDeviceRole': 'string', 'nwDeviceFamily': 'string', 'macAddress': 'string', 'collectionStatus': 'string', 'deviceSeries': 'string', 'osType': 'string', 'clientCount': 'string', 'HASecondaryPowerStatus': 'string', 'softwareVersion': 'string', 'nwDeviceType': 'string', 'overallHealth': 0, 'memoryScore': 0, 'cpuScore': 0, 'noiseScore': 0, 'utilizationScore': 0, 'airQualityScore': 0, 'interferenceScore': 0, 'wqeScore': 0, 'freeMbufScore': 0, 'packetPoolScore': 0, 'freeTimerScore': 0, 'memory': 'string', 'cpu': 'string', 'noise': 'string', 'utilization': 'string', 'airQuality': 'string', 'interference': 'string', 'wqe': 'string', 'freeMbuf': 'string', 'packetPool': 'string', 'freeTimer': 'string', 'location': 'string', 'timestamp': 'string'}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_f5947a4c439a8bf0(self):
return re.search(
self.DEVICES_f5947a4c439a8bf0_PATTERN,
self.path
)
def devices_get_all_interfaces_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_b888792d43baba46(self):
return re.search(
self.DEVICES_b888792d43baba46_PATTERN,
self.path
)
def devices_get_interface_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_3d923b184dc9a4ca(self):
return re.search(
self.DEVICES_3d923b184dc9a4ca_PATTERN,
self.path
)
def devices_get_device_interface_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_cd8469e647caab0e(self):
return re.search(
self.DEVICES_cd8469e647caab0e_PATTERN,
self.path
)
def devices_get_interface_by_ip_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_84ad8b0e42cab48a(self):
return re.search(
self.DEVICES_84ad8b0e42cab48a_PATTERN,
self.path
)
def devices_get_isis_interfaces_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_ba9dc85b4b8a9a17(self):
return re.search(
self.DEVICES_ba9dc85b4b8a9a17_PATTERN,
self.path
)
def devices_get_interface_info_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_349c888443b89a58(self):
return re.search(
self.DEVICES_349c888443b89a58_PATTERN,
self.path
)
def devices_get_device_interfaces_by_specified_range_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_5b8639224cd88ea7(self):
return re.search(
self.DEVICES_5b8639224cd88ea7_PATTERN,
self.path
)
def devices_get_device_interface_count_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_4eb56a614cc9a2d2(self):
return re.search(
self.DEVICES_4eb56a614cc9a2d2_PATTERN,
self.path
)
def devices_get_interface_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_70ad397649e9b4d3(self):
return re.search(
self.DEVICES_70ad397649e9b4d3_PATTERN,
self.path
)
def devices_get_ospf_interfaces_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'adminStatus': 'string', 'className': 'string', 'description': 'string', 'deviceId': 'string', 'duplex': 'string', 'id': 'string', 'ifIndex': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceType': 'string', 'ipv4Address': 'string', 'ipv4Mask': 'string', 'isisSupport': 'string', 'lastUpdated': 'string', 'macAddress': 'string', 'mappedPhysicalInterfaceId': 'string', 'mappedPhysicalInterfaceName': 'string', 'mediaType': 'string', 'nativeVlanId': 'string', 'ospfSupport': 'string', 'pid': 'string', 'portMode': 'string', 'portName': 'string', 'portType': 'string', 'serialNo': 'string', 'series': 'string', 'speed': 'string', 'status': 'string', 'vlanId': 'string', 'voiceVlan': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_20b19b52464b8972(self):
return re.search(
self.DEVICES_20b19b52464b8972_PATTERN,
self.path
)
def devices_get_device_list_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'apManagerInterfaceIp': 'string', 'associatedWlcIp': 'string', 'bootDateTime': 'string', 'collectionInterval': 'string', 'collectionStatus': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceCount': 'string', 'inventoryStatusDetail': 'string', 'lastUpdateTime': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'platformId': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'series': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'softwareType': 'string', 'softwareVersion': 'string', 'tagCount': 'string', 'tunnelUdpPort': 'string', 'type': 'string', 'upTime': 'string', 'waasDeviceMode': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_4bb22af046fa8f08(self):
return re.search(
self.DEVICES_4bb22af046fa8f08_PATTERN,
self.path
)
def devices_add_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_aeb9eb67460b92df(self):
return re.search(
self.DEVICES_aeb9eb67460b92df_PATTERN,
self.path
)
def devices_sync_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_1c894b5848eab214(self):
return re.search(
self.DEVICES_1c894b5848eab214_PATTERN,
self.path
)
def devices_delete_device_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_8fa8eb404a4a8d96(self):
return re.search(
self.DEVICES_8fa8eb404a4a8d96_PATTERN,
self.path
)
def devices_get_device_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'apManagerInterfaceIp': 'string', 'associatedWlcIp': 'string', 'bootDateTime': 'string', 'collectionInterval': 'string', 'collectionStatus': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceCount': 'string', 'inventoryStatusDetail': 'string', 'lastUpdateTime': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'platformId': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'series': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'softwareType': 'string', 'softwareVersion': 'string', 'tagCount': 'string', 'tunnelUdpPort': 'string', 'type': 'string', 'upTime': 'string', 'waasDeviceMode': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_819f9aa54feab7bf(self):
return re.search(
self.DEVICES_819f9aa54feab7bf_PATTERN,
self.path
)
def devices_get_device_summary_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'id': 'string', 'role': 'string', 'roleSource': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_82918a1b4d289c5c(self):
return re.search(
self.DEVICES_82918a1b4d289c5c_PATTERN,
self.path
)
def devices_get_polling_interval_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_84b37ae54c59ab28(self):
return re.search(
self.DEVICES_84b37ae54c59ab28_PATTERN,
self.path
)
def devices_get_organization_list_for_meraki_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': ['string'], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_288df9494f2a9746(self):
return re.search(
self.DEVICES_288df9494f2a9746_PATTERN,
self.path
)
def devices_get_device_interface_vlans_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'interfaceName': 'string', 'ipAddress': 'string', 'mask': 0, 'networkAddress': 'string', 'numberOfIPs': 0, 'prefix': 'string', 'vlanNumber': 0, 'vlanType': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_f6826a8e41bba242(self):
return re.search(
self.DEVICES_f6826a8e41bba242_PATTERN,
self.path
)
def devices_get_wireless_lan_controller_details_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'adminEnabledPorts': [0], 'apGroupName': 'string', 'deviceId': 'string', 'ethMacAddress': 'string', 'flexGroupName': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'lagModeEnabled': True, 'netconfEnabled': True, 'wirelessLicenseInfo': 'ADVANTAGE', 'wirelessPackageInstalled': True}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_84b33a9e480abcaf(self):
return re.search(
self.DEVICES_84b33a9e480abcaf_PATTERN,
self.path
)
def devices_get_device_config_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 'string', 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_f49548c54be8a3e2(self):
return re.search(
self.DEVICES_f49548c54be8a3e2_PATTERN,
self.path
)
def devices_get_network_device_by_pagination_range_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'apManagerInterfaceIp': 'string', 'associatedWlcIp': 'string', 'bootDateTime': 'string', 'collectionInterval': 'string', 'collectionStatus': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceCount': 'string', 'inventoryStatusDetail': 'string', 'lastUpdateTime': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'platformId': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'series': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'softwareType': 'string', 'softwareVersion': 'string', 'tagCount': 'string', 'tunnelUdpPort': 'string', 'type': 'string', 'upTime': 'string', 'waasDeviceMode': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_ffa748cc44e9a437(self):
return re.search(
self.DEVICES_ffa748cc44e9a437_PATTERN,
self.path
)
def devices_retrieves_all_network_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_b9855ad54ae98156(self):
return re.search(
self.DEVICES_b9855ad54ae98156_PATTERN,
self.path
)
def devices_update_device_role_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_38bd0b884b89a785(self):
return re.search(
self.DEVICES_38bd0b884b89a785_PATTERN,
self.path
)
def devices_get_polling_interval_for_all_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_b7bcaa084e2b90d0(self):
return re.search(
self.DEVICES_b7bcaa084e2b90d0_PATTERN,
self.path
)
def devices_get_device_config_for_all_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'cdpNeighbors': 'string', 'healthMonitor': 'string', 'id': 'string', 'intfDescription': 'string', 'inventory': 'string', 'ipIntfBrief': 'string', 'macAddressTable': 'string', 'runningConfig': 'string', 'snmp': 'string', 'version': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_888f585c49b88441(self):
return re.search(
self.DEVICES_888f585c49b88441_PATTERN,
self.path
)
def devices_get_device_config_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_5db21b8e43fab7d8(self):
return re.search(
self.DEVICES_5db21b8e43fab7d8_PATTERN,
self.path
)
def devices_get_device_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_cd98780f4888a66d(self):
return re.search(
self.DEVICES_cd98780f4888a66d_PATTERN,
self.path
)
def devices_export_device_list_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_c3b3c9ef4e6b8a09(self):
return re.search(
self.DEVICES_c3b3c9ef4e6b8a09_PATTERN,
self.path
)
def devices_get_functional_capability_for_devices_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'attributeInfo': {}, 'deviceId': 'string', 'functionalCapability': [{'attributeInfo': {}, 'functionDetails': [{'attributeInfo': {}, 'id': 'string', 'propertyName': 'string', 'stringValue': 'string'}], 'functionName': 'string', 'functionOpState': 'UNKNOWN', 'id': 'string'}], 'id': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_81bb4804405a8d2f(self):
return re.search(
self.DEVICES_81bb4804405a8d2f_PATTERN,
self.path
)
def devices_get_functional_capability_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'attributeInfo': {}, 'functionDetails': [{'attributeInfo': {}, 'id': 'string', 'propertyName': 'string', 'stringValue': 'string'}], 'functionName': 'string', 'functionOpState': 'UNKNOWN', 'id': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_d0a4b88145aabb51(self):
return re.search(
self.DEVICES_d0a4b88145aabb51_PATTERN,
self.path
)
def devices_get_network_device_by_ip_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'apManagerInterfaceIp': 'string', 'associatedWlcIp': 'string', 'bootDateTime': 'string', 'collectionInterval': 'string', 'collectionStatus': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceCount': 'string', 'inventoryStatusDetail': 'string', 'lastUpdateTime': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'platformId': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'series': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'softwareType': 'string', 'softwareVersion': 'string', 'tagCount': 'string', 'tunnelUdpPort': 'string', 'type': 'string', 'upTime': 'string', 'waasDeviceMode': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_eb8249e34f69b0f1(self):
return re.search(
self.DEVICES_eb8249e34f69b0f1_PATTERN,
self.path
)
def devices_get_modules_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'assemblyNumber': 'string', 'assemblyRevision': 'string', 'attributeInfo': {}, 'containmentEntity': 'string', 'description': 'string', 'entityPhysicalIndex': 'string', 'id': 'string', 'isFieldReplaceable': 'UNKNOWN', 'isReportingAlarmsAllowed': 'UNKNOWN', 'manufacturer': 'string', 'moduleIndex': 0, 'name': 'string', 'operationalStateCode': 'string', 'partNumber': 'string', 'serialNumber': 'string', 'vendorEquipmentType': 'string'}], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_0db7da744c0b83d8(self):
return re.search(
self.DEVICES_0db7da744c0b83d8_PATTERN,
self.path
)
def devices_get_module_info_by_id_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'assemblyNumber': 'string', 'assemblyRevision': 'string', 'attributeInfo': {}, 'containmentEntity': 'string', 'description': 'string', 'entityPhysicalIndex': 'string', 'id': 'string', 'isFieldReplaceable': 'UNKNOWN', 'isReportingAlarmsAllowed': 'UNKNOWN', 'manufacturer': 'string', 'moduleIndex': 0, 'name': 'string', 'operationalStateCode': 'string', 'partNumber': 'string', 'serialNumber': 'string', 'vendorEquipmentType': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_8db939744649a782(self):
return re.search(
self.DEVICES_8db939744649a782_PATTERN,
self.path
)
def devices_get_module_count_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': 0, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_d888ab6d4d59a8c1(self):
return re.search(
self.DEVICES_d888ab6d4d59a8c1_PATTERN,
self.path
)
def devices_get_device_by_serial_number_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'apManagerInterfaceIp': 'string', 'associatedWlcIp': 'string', 'bootDateTime': 'string', 'collectionInterval': 'string', 'collectionStatus': 'string', 'errorCode': 'string', 'errorDescription': 'string', 'family': 'string', 'hostname': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'interfaceCount': 'string', 'inventoryStatusDetail': 'string', 'lastUpdateTime': 'string', 'lastUpdated': 'string', 'lineCardCount': 'string', 'lineCardId': 'string', 'location': 'string', 'locationName': 'string', 'macAddress': 'string', 'managementIpAddress': 'string', 'memorySize': 'string', 'platformId': 'string', 'reachabilityFailureReason': 'string', 'reachabilityStatus': 'string', 'role': 'string', 'roleSource': 'string', 'serialNumber': 'string', 'series': 'string', 'snmpContact': 'string', 'snmpLocation': 'string', 'softwareType': 'string', 'softwareVersion': 'string', 'tagCount': 'string', 'tunnelUdpPort': 'string', 'type': 'string', 'upTime': 'string', 'waasDeviceMode': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_3b9ef9674429be4c(self):
return re.search(
self.DEVICES_3b9ef9674429be4c_PATTERN,
self.path
)
def devices_sync_devices_using_forcesync_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'taskId': {}, 'url': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_DEVICES_c9809b6744f8a502(self):
return re.search(
self.DEVICES_c9809b6744f8a502_PATTERN,
self.path
)
def devices_register_device_for_wsa_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'macAddress': 'string', 'modelNumber': 'string', 'name': 'string', 'serialNumber': 'string', 'tenantId': 'string'}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SITES_17a82ac94cf99ab0(self):
return re.search(
self.SITES_17a82ac94cf99ab0_PATTERN,
self.path
)
def sites_get_site_health_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'siteName': 'string', 'siteId': 'string', 'parentSiteId': 'string', 'parentSiteName': 'string', 'siteType': 'string', 'latitude': {}, 'longitude': {}, 'healthyNetworkDevicePercentage': 'string', 'healthyClientsPercentage': 'string', 'clientHealthWired': 'string', 'clientHealthWireless': {}, 'numberOfClients': 'string', 'clientNumberOfIssues': {}, 'networkNumberOfIssues': {}, 'numberOfNetworkDevice': 'string', 'networkHealthAverage': {}, 'networkHealthAccess': 'string', 'networkHealthCore': 'string', 'networkHealthDistribution': 'string', 'networkHealthRouter': 'string', 'networkHealthWireless': {}, 'networkHealthOthers': {}, 'numberOfWiredClients': 'string', 'numberOfWirelessClients': {}, 'wiredGoodClients': 'string', 'wirelessGoodClients': {}, 'clientIssueCount': {}, 'overallGoodDevices': 'string', 'accessGoodCount': 'string', 'accessTotalCount': 'string', 'coreGoodCount': 'string', 'coreTotalCount': 'string', 'distributionGoodCount': 'string', 'distributionTotalCount': 'string', 'routerGoodCount': 'string', 'routerTotalCount': 'string', 'wirelessDeviceGoodCount': 'string', 'wirelessDeviceTotalCount': 'string', 'applicationHealth': {}, 'applicationGoodCount': {}, 'applicationTotalCount': {}, 'applicationBytesTotalCount': {}}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SITES_eeb168eb41988e07(self):
return re.search(
self.SITES_eeb168eb41988e07_PATTERN,
self.path
)
def sites_assign_device_to_site_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_SITES_50b589fd4c7a930a(self):
return re.search(
self.SITES_50b589fd4c7a930a_PATTERN,
self.path
)
def sites_create_site_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_ca91da84401abba1(self):
return re.search(
self.NETWORKS_ca91da84401abba1_PATTERN,
self.path
)
def networks_get_overall_network_health_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'version': 'string', 'response': [{'time': 'string', 'healthScore': 0, 'totalCount': 0, 'goodCount': 0, 'unmonCount': 0, 'fairCount': 0, 'badCount': 0, 'entity': {}, 'timeinMillis': 0}], 'measuredBy': 'string', 'latestMeasuredByEntity': {}, 'latestHealthScore': 0, 'monitoredDevices': 0, 'monitoredHealthyDevices': 0, 'monitoredUnHealthyDevices': 0, 'unMonitoredDevices': 0, 'healthDistirubution': [{'category': 'string', 'totalCount': 0, 'healthScore': 0, 'goodPercentage': 0, 'badPercentage': 0, 'fairPercentage': 0, 'unmonPercentage': 0, 'goodCount': 0, 'badCount': 0, 'fairCount': 0, 'unmonCount': 0, 'kpiMetrics': [{}]}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_b9b48ac8463a8aba(self):
return re.search(
self.NETWORKS_b9b48ac8463a8aba_PATTERN,
self.path
)
def networks_get_topology_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'id': 'string', 'links': [{'additionalInfo': {}, 'endPortID': 'string', 'endPortIpv4Address': 'string', 'endPortIpv4Mask': 'string', 'endPortName': 'string', 'endPortSpeed': 'string', 'greyOut': True, 'id': 'string', 'linkStatus': 'string', 'source': 'string', 'startPortID': 'string', 'startPortIpv4Address': 'string', 'startPortIpv4Mask': 'string', 'startPortName': 'string', 'startPortSpeed': 'string', 'tag': 'string', 'target': 'string'}], 'nodes': [{'aclApplied': True, 'additionalInfo': {}, 'customParam': {'id': 'string', 'label': 'string', 'parentNodeId': 'string', 'x': 0, 'y': 0}, 'dataPathId': 'string', 'deviceType': 'string', 'family': 'string', 'fixed': True, 'greyOut': True, 'id': 'string', 'ip': 'string', 'label': 'string', 'networkType': 'string', 'nodeType': 'string', 'order': 0, 'osType': 'string', 'platformId': 'string', 'role': 'string', 'roleSource': 'string', 'softwareVersion': 'string', 'tags': ['string'], 'upperNode': 'string', 'userId': 'string', 'vlanId': 'string', 'x': 0, 'y': 0}]}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_c2b5fb764d888375(self):
return re.search(
self.NETWORKS_c2b5fb764d888375_PATTERN,
self.path
)
def networks_get_l3_topology_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'id': 'string', 'links': [{'additionalInfo': {}, 'endPortID': 'string', 'endPortIpv4Address': 'string', 'endPortIpv4Mask': 'string', 'endPortName': 'string', 'endPortSpeed': 'string', 'greyOut': True, 'id': 'string', 'linkStatus': 'string', 'source': 'string', 'startPortID': 'string', 'startPortIpv4Address': 'string', 'startPortIpv4Mask': 'string', 'startPortName': 'string', 'startPortSpeed': 'string', 'tag': 'string', 'target': 'string'}], 'nodes': [{'aclApplied': True, 'additionalInfo': {}, 'customParam': {'id': 'string', 'label': 'string', 'parentNodeId': 'string', 'x': 0, 'y': 0}, 'dataPathId': 'string', 'deviceType': 'string', 'family': 'string', 'fixed': True, 'greyOut': True, 'id': 'string', 'ip': 'string', 'label': 'string', 'networkType': 'string', 'nodeType': 'string', 'order': 0, 'osType': 'string', 'platformId': 'string', 'role': 'string', 'roleSource': 'string', 'softwareVersion': 'string', 'tags': ['string'], 'upperNode': 'string', 'userId': 'string', 'vlanId': 'string', 'x': 0, 'y': 0}]}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_b2b8cb91459aa58f(self):
return re.search(
self.NETWORKS_b2b8cb91459aa58f_PATTERN,
self.path
)
def networks_get_physical_topology_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'id': 'string', 'links': [{'additionalInfo': {}, 'endPortID': 'string', 'endPortIpv4Address': 'string', 'endPortIpv4Mask': 'string', 'endPortName': 'string', 'endPortSpeed': 'string', 'greyOut': True, 'id': 'string', 'linkStatus': 'string', 'source': 'string', 'startPortID': 'string', 'startPortIpv4Address': 'string', 'startPortIpv4Mask': 'string', 'startPortName': 'string', 'startPortSpeed': 'string', 'tag': 'string', 'target': 'string'}], 'nodes': [{'aclApplied': True, 'additionalInfo': {}, 'customParam': {'id': 'string', 'label': 'string', 'parentNodeId': 'string', 'x': 0, 'y': 0}, 'dataPathId': 'string', 'deviceType': 'string', 'family': 'string', 'fixed': True, 'greyOut': True, 'id': 'string', 'ip': 'string', 'label': 'string', 'networkType': 'string', 'nodeType': 'string', 'order': 0, 'osType': 'string', 'platformId': 'string', 'role': 'string', 'roleSource': 'string', 'softwareVersion': 'string', 'tags': ['string'], 'upperNode': 'string', 'userId': 'string', 'vlanId': 'string', 'x': 0, 'y': 0}]}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_9ba14a9e441b8a60(self):
return re.search(
self.NETWORKS_9ba14a9e441b8a60_PATTERN,
self.path
)
def networks_get_site_topology_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': {'sites': [{'displayName': 'string', 'groupNameHierarchy': 'string', 'id': 'string', 'latitude': 'string', 'locationAddress': 'string', 'locationCountry': 'string', 'locationType': 'string', 'longitude': 'string', 'name': 'string', 'parentId': 'string'}]}, 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NETWORKS_6284db4649aa8d31(self):
return re.search(
self.NETWORKS_6284db4649aa8d31_PATTERN,
self.path
)
def networks_get_vlan_details_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': ['string'], 'version': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_CLIENTS_e2adba7943bab3e9(self):
return re.search(
self.CLIENTS_e2adba7943bab3e9_PATTERN,
self.path
)
def clients_get_client_detail_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'detail': {'id': 'string', 'connectionStatus': 'string', 'hostType': 'string', 'userId': {}, 'hostName': 'string', 'hostOs': {}, 'hostVersion': {}, 'subType': 'string', 'lastUpdated': 0, 'healthScore': [{'healthType': 'string', 'reason': 'string', 'score': 0}], 'hostMac': 'string', 'hostIpV4': 'string', 'hostIpV6': ['string'], 'authType': 'string', 'vlanId': 'string', 'vnid': 'string', 'ssid': 'string', 'frequency': 'string', 'channel': 'string', 'apGroup': {}, 'location': {}, 'clientConnection': 'string', 'connectedDevice': [{}], 'issueCount': 0, 'rssi': 'string', 'avgRssi': {}, 'snr': 'string', 'avgSnr': {}, 'dataRate': 'string', 'txBytes': 'string', 'rxBytes': 'string', 'dnsSuccess': {}, 'dnsFailure': {}, 'onboarding': {'averageRunDuration': {}, 'maxRunDuration': {}, 'averageAssocDuration': {}, 'maxAssocDuration': {}, 'averageAuthDuration': {}, 'maxAuthDuration': {}, 'averageDhcpDuration': {}, 'maxDhcpDuration': {}, 'aaaServerIp': 'string', 'dhcpServerIp': {}, 'authDoneTime': {}, 'assocDoneTime': {}, 'dhcpDoneTime': {}, 'assocRootcauseList': [{}], 'aaaRootcauseList': [{}], 'dhcpRootcauseList': [{}], 'otherRootcauseList': [{}]}, 'clientType': 'string', 'onboardingTime': {}, 'port': {}, 'iosCapable': True}, 'connectionInfo': {'hostType': 'string', 'nwDeviceName': 'string', 'nwDeviceMac': 'string', 'protocol': 'string', 'band': 'string', 'spatialStream': 'string', 'channel': 'string', 'channelWidth': 'string', 'wmm': 'string', 'uapsd': 'string', 'timestamp': 0}, 'topology': {'nodes': [{'role': 'string', 'name': 'string', 'id': 'string', 'description': 'string', 'deviceType': 'string', 'platformId': {}, 'family': {}, 'ip': 'string', 'softwareVersion': {}, 'userId': {}, 'nodeType': 'string', 'radioFrequency': {}, 'clients': {}, 'count': {}, 'healthScore': 0, 'level': 0, 'fabricGroup': {}, 'connectedDevice': {}}], 'links': [{'source': 'string', 'linkStatus': 'string', 'label': ['string'], 'target': 'string', 'id': {}, 'portUtilization': {}}]}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_CLIENTS_149aa93b4ddb80dd(self):
return re.search(
self.CLIENTS_149aa93b4ddb80dd_PATTERN,
self.path
)
def clients_get_overall_client_health_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'response': [{'siteId': 'string', 'scoreDetail': [{'scoreCategory': {'scoreCategory': 'string', 'value': 'string'}, 'scoreValue': 'string', 'clientCount': 'string', 'clientUniqueCount': 'string', 'starttime': 'string', 'endtime': 'string', 'scoreList': ['string']}]}]})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NON_FABRIC_WIRELESS_db9f997f4e59aec1(self):
return re.search(
self.NON_FABRIC_WIRELESS_db9f997f4e59aec1_PATTERN,
self.path
)
def non_fabric_wireless_create_and_provision_ssid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NON_FABRIC_WIRELESS_cca098344a489dfa(self):
return re.search(
self.NON_FABRIC_WIRELESS_cca098344a489dfa_PATTERN,
self.path
)
def non_fabric_wireless_delete_and_provision_ssid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NON_FABRIC_WIRELESS_8a96fb954d09a349(self):
return re.search(
self.NON_FABRIC_WIRELESS_8a96fb954d09a349_PATTERN,
self.path
)
def non_fabric_wireless_create_enterprise_ssid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NON_FABRIC_WIRELESS_cca519ba45ebb423(self):
return re.search(
self.NON_FABRIC_WIRELESS_cca519ba45ebb423_PATTERN,
self.path
)
def non_fabric_wireless_get_enterprise_ssid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([{'instanceUuid': 'string', 'version': 0, 'ssidDetails': [{'name': 'string', 'wlanType': 'string', 'enableFastLane': True, 'securityLevel': 'string', 'authServer': 'string', 'passphrase': 'string', 'trafficType': 'string', 'enableMACFiltering': True, 'isEnabled': True, 'isFabric': True, 'fastTransition': 'string', 'radioPolicy': 'string', 'enableBroadcastSSID': True}], 'groupUuid': 'string', 'inheritedGroupUuid': 'string', 'inheritedGroupName': 'string'}])
self.wfile.write(response_content.encode('utf-8'))
return
def matches_NON_FABRIC_WIRELESS_c7a6592b4b98a369(self):
return re.search(
self.NON_FABRIC_WIRELESS_c7a6592b4b98a369_PATTERN,
self.path
)
def non_fabric_wireless_delete_enterprise_ssid_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'executionId': 'string', 'executionStatusUrl': 'string', 'message': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FABRIC_WIRED_bead7b3443b996a7(self):
return re.search(
self.FABRIC_WIRED_bead7b3443b996a7_PATTERN,
self.path
)
def fabric_wired_adds_border_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'status': 'string', 'description': 'string', 'executionStatusUrl': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FABRIC_WIRED_98a39bf4485a9871(self):
return re.search(
self.FABRIC_WIRED_98a39bf4485a9871_PATTERN,
self.path
)
def fabric_wired_gets_border_device_detail_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'status': 'string', 'description': 'string', 'payload': {'id': 'string', 'instanceId': 0, 'authEntityId': 0, 'displayName': 'string', 'authEntityClass': 0, 'instanceTenantId': 'string', 'deployPending': 'string', 'instanceVersion': 0, 'createTime': 0, 'deployed': True, 'isSeeded': True, 'isStale': True, 'lastUpdateTime': 0, 'name': 'string', 'namespace': 'string', 'provisioningState': 'string', 'resourceVersion': 0, 'targetIdList': [{}], 'type': 'string', 'cfsChangeInfo': [{}], 'customProvisions': [{}], 'configs': [{}], 'managedSites': [{}], 'networkDeviceId': 'string', 'roles': ['string'], 'saveWanConnectivityDetailsOnly': True, 'siteId': 'string', 'akcSettingsCfs': [{}], 'deviceInterfaceInfo': [{}], 'deviceSettings': {'id': 'string', 'instanceId': 0, 'displayName': 'string', 'instanceTenantId': 'string', 'deployPending': 'string', 'instanceVersion': 0, 'connectedTo': [{}], 'cpu': 0, 'dhcpEnabled': True, 'externalConnectivityIpPool': 'string', 'externalDomainRoutingProtocol': 'string', 'internalDomainProtocolNumber': 'string', 'memory': 0, 'nodeType': ['string'], 'storage': 0, 'extConnectivitySettings': [{'id': 'string', 'instanceId': 0, 'displayName': 'string', 'instanceTenantId': 'string', 'deployPending': 'string', 'instanceVersion': 0, 'externalDomainProtocolNumber': 'string', 'interfaceUuid': 'string', 'policyPropagationEnabled': True, 'policySgtTag': 0, 'l2Handoff': [{}], 'l3Handoff': [{'id': 'string', 'instanceId': 0, 'displayName': 'string', 'instanceTenantId': 'string', 'deployPending': 'string', 'instanceVersion': 0, 'localIpAddress': 'string', 'remoteIpAddress': 'string', 'vlanId': 0, 'virtualNetwork': {'idRef': 'string'}}]}]}, 'networkWideSettings': {'id': 'string', 'instanceId': 0, 'displayName': 'string', 'instanceTenantId': 'string', 'deployPending': 'string', 'instanceVersion': 0, 'aaa': [{}], 'cmx': [{}], 'dhcp': [{'id': 'string', 'ipAddress': {'id': 'string', 'paddedAddress': 'string', 'addressType': 'string', 'address': 'string'}}], 'dns': [{'id': 'string', 'domainName': 'string', 'ip': {'id': 'string', 'paddedAddress': 'string', 'addressType': 'string', 'address': 'string'}}], 'ldap': [{}], 'nativeVlan': [{}], 'netflow': [{}], 'ntp': [{}], 'snmp': [{}], 'syslogs': [{}]}, 'otherDevice': [{}], 'transitNetworks': [{'idRef': 'string'}], 'virtualNetwork': [{}], 'wlan': [{}]}})
self.wfile.write(response_content.encode('utf-8'))
return
def matches_FABRIC_WIRED_cb81b93540baaab0(self):
return re.search(
self.FABRIC_WIRED_cb81b93540baaab0_PATTERN,
self.path
)
def fabric_wired_deletes_border_device_response(self):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps({'status': 'string', 'description': 'string', 'executionStatusUrl': 'string'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_GET(self):
if self.matches_TEMPLATE_PROGRAMMER_109d1b4f4289aecd():
self.template_programmer_get_projects_response()
return
if self.matches_TEMPLATE_PROGRAMMER_01b09a254b9ab259():
self.template_programmer_gets_the_templates_available_response()
return
if self.matches_TEMPLATE_PROGRAMMER_83a3b9404cb88787():
self.template_programmer_get_template_details_response()
return
if self.matches_TEMPLATE_PROGRAMMER_9c9a785741cbb41f():
self.template_programmer_get_template_deployment_status_response()
return
if self.matches_TEMPLATE_PROGRAMMER_c8bf6b65414a9bc7():
self.template_programmer_get_template_versions_response()
return
if self.matches_TAG_ee9aab01487a8896():
self.tag_get_tag_response()
return
if self.matches_TAG_c1a359b14c89b573():
self.tag_get_tag_by_id_response()
return
if self.matches_TAG_eab7abe048fb99ad():
self.tag_get_tag_members_by_id_response()
return
if self.matches_TAG_2e9db85840fbb1cf():
self.tag_get_tag_member_count_response()
return
if self.matches_TAG_8091a9b84bfba53b():
self.tag_get_tag_count_response()
return
if self.matches_TAG_4695090d403b8eaa():
self.tag_get_tag_resource_types_response()
return
if self.matches_NETWORK_DISCOVERY_63bb88b74f59aa17():
self.network_discovery_get_discovery_by_id_response()
return
if self.matches_NETWORK_DISCOVERY_99872a134d0a9fb4():
self.network_discovery_get_list_of_discoveries_by_discovery_id_response()
return
if self.matches_NETWORK_DISCOVERY_f6ac994f451ba011():
self.network_discovery_get_discovered_network_devices_by_discovery_id_response()
return
if self.matches_NETWORK_DISCOVERY_a6b798ab4acaa34e():
self.network_discovery_get_discovered_devices_by_range_response()
return
if self.matches_NETWORK_DISCOVERY_a6965b454c9a8663():
self.network_discovery_get_devices_discovered_by_id_response()
return
if self.matches_NETWORK_DISCOVERY_3d9b99c343398a27():
self.network_discovery_get_network_devices_from_discovery_response()
return
if self.matches_NETWORK_DISCOVERY_33b799d04d0a8907():
self.network_discovery_get_discoveries_by_range_response()
return
if self.matches_NETWORK_DISCOVERY_069d9823451b892d():
self.network_discovery_get_count_of_all_discovery_jobs_response()
return
if self.matches_NETWORK_DISCOVERY_a4967be64dfaaa1a():
self.network_discovery_get_discovery_jobs_by_ip_response()
return
if self.matches_NETWORK_DISCOVERY_ff816b8e435897eb():
self.network_discovery_get_global_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_58a3699e489b9529():
self.network_discovery_get_credential_sub_type_by_credential_id_response()
return
if self.matches_NETWORK_DISCOVERY_44974ba5435a801d():
self.network_discovery_get_snmp_properties_response()
return
if self.matches_TASK_e78bb8a2449b9eed():
self.task_get_tasks_response()
return
if self.matches_TASK_a1a9387346ba92b1():
self.task_get_task_by_id_response()
return
if self.matches_TASK_f5a269c44f2a95fa():
self.task_get_task_tree_response()
return
if self.matches_TASK_26b44ab04649a183():
self.task_get_task_count_response()
return
if self.matches_TASK_e487f8d3481b94f2():
self.task_get_task_by_operationid_response()
return
if self.matches_COMMAND_RUNNER_33bb2b9d40199e14():
self.command_runner_get_all_keywords_of_clis_accepted_response()
return
if self.matches_FILE_9698c8ec4a0b8c1a():
self.file_download_a_file_by_fileid_response()
return
if self.matches_FILE_3f89bbfc4f6b8b50():
self.file_get_list_of_available_namespaces_response()
return
if self.matches_FILE_42b6a86e44b8bdfc():
self.file_get_list_of_files_response()
return
if self.matches_PATH_TRACE_55bc3bf94e38b6ff():
self.path_trace_retrives_all_previous_pathtraces_summary_response()
return
if self.matches_PATH_TRACE_7ab9a8bd4f3b86a4():
self.path_trace_retrieves_previous_pathtrace_response()
return
if self.matches_SWIM_0c8f7a0b49b9aedd():
self.swim_get_software_image_details_response()
return
if self.matches_PNP_e6b3db8046c99654():
self.pnp_get_device_list_response()
return
if self.matches_PNP_bab6c9e5440885cc():
self.pnp_get_device_by_id_response()
return
if self.matches_PNP_d9a1fa9c4068b23c():
self.pnp_get_device_count_response()
return
if self.matches_PNP_f09319674049a7d4():
self.pnp_get_device_history_response()
return
if self.matches_PNP_0a9c988445cb91c8():
self.pnp_get_sync_result_for_virtual_account_response()
return
if self.matches_PNP_7e92f9eb46db8320():
self.pnp_get_pnp_global_settings_response()
return
if self.matches_PNP_3cb24acb486b89d2():
self.pnp_get_smart_account_list_response()
return
if self.matches_PNP_70a479a6462a9496():
self.pnp_get_virtual_account_list_response()
return
if self.matches_PNP_aeb4dad04a99bbe3():
self.pnp_get_workflows_response()
return
if self.matches_PNP_80acb88e4ac9ac6d():
self.pnp_get_workflow_by_id_response()
return
if self.matches_PNP_7989f86846faaf99():
self.pnp_get_workflow_count_response()
return
if self.matches_SITE_PROFILE_7fbe4b804879baa4():
self.site_profile_get_device_details_by_ip_response()
return
if self.matches_DEVICES_89b2fb144f5bb09b():
self.devices_get_device_detail_response()
return
if self.matches_DEVICES_f5947a4c439a8bf0():
self.devices_get_all_interfaces_response()
return
if self.matches_DEVICES_b888792d43baba46():
self.devices_get_interface_by_id_response()
return
if self.matches_DEVICES_3d923b184dc9a4ca():
self.devices_get_device_interface_count_response()
return
if self.matches_DEVICES_cd8469e647caab0e():
self.devices_get_interface_by_ip_response()
return
if self.matches_DEVICES_84ad8b0e42cab48a():
self.devices_get_isis_interfaces_response()
return
if self.matches_DEVICES_ba9dc85b4b8a9a17():
self.devices_get_interface_info_by_id_response()
return
if self.matches_DEVICES_349c888443b89a58():
self.devices_get_device_interfaces_by_specified_range_response()
return
if self.matches_DEVICES_5b8639224cd88ea7():
self.devices_get_device_interface_count_by_id_response()
return
if self.matches_DEVICES_4eb56a614cc9a2d2():
self.devices_get_interface_details_response()
return
if self.matches_DEVICES_70ad397649e9b4d3():
self.devices_get_ospf_interfaces_response()
return
if self.matches_DEVICES_20b19b52464b8972():
self.devices_get_device_list_response()
return
if self.matches_DEVICES_8fa8eb404a4a8d96():
self.devices_get_device_by_id_response()
return
if self.matches_DEVICES_819f9aa54feab7bf():
self.devices_get_device_summary_response()
return
if self.matches_DEVICES_82918a1b4d289c5c():
self.devices_get_polling_interval_by_id_response()
return
if self.matches_DEVICES_84b37ae54c59ab28():
self.devices_get_organization_list_for_meraki_response()
return
if self.matches_DEVICES_288df9494f2a9746():
self.devices_get_device_interface_vlans_response()
return
if self.matches_DEVICES_f6826a8e41bba242():
self.devices_get_wireless_lan_controller_details_by_id_response()
return
if self.matches_DEVICES_84b33a9e480abcaf():
self.devices_get_device_config_by_id_response()
return
if self.matches_DEVICES_f49548c54be8a3e2():
self.devices_get_network_device_by_pagination_range_response()
return
if self.matches_DEVICES_ffa748cc44e9a437():
self.devices_retrieves_all_network_devices_response()
return
if self.matches_DEVICES_38bd0b884b89a785():
self.devices_get_polling_interval_for_all_devices_response()
return
if self.matches_DEVICES_b7bcaa084e2b90d0():
self.devices_get_device_config_for_all_devices_response()
return
if self.matches_DEVICES_888f585c49b88441():
self.devices_get_device_config_count_response()
return
if self.matches_DEVICES_5db21b8e43fab7d8():
self.devices_get_device_count_response()
return
if self.matches_DEVICES_c3b3c9ef4e6b8a09():
self.devices_get_functional_capability_for_devices_response()
return
if self.matches_DEVICES_81bb4804405a8d2f():
self.devices_get_functional_capability_by_id_response()
return
if self.matches_DEVICES_d0a4b88145aabb51():
self.devices_get_network_device_by_ip_response()
return
if self.matches_DEVICES_eb8249e34f69b0f1():
self.devices_get_modules_response()
return
if self.matches_DEVICES_0db7da744c0b83d8():
self.devices_get_module_info_by_id_response()
return
if self.matches_DEVICES_8db939744649a782():
self.devices_get_module_count_response()
return
if self.matches_DEVICES_d888ab6d4d59a8c1():
self.devices_get_device_by_serial_number_response()
return
if self.matches_DEVICES_c9809b6744f8a502():
self.devices_register_device_for_wsa_response()
return
if self.matches_SITES_17a82ac94cf99ab0():
self.sites_get_site_health_response()
return
if self.matches_NETWORKS_ca91da84401abba1():
self.networks_get_overall_network_health_response()
return
if self.matches_NETWORKS_b9b48ac8463a8aba():
self.networks_get_topology_details_response()
return
if self.matches_NETWORKS_c2b5fb764d888375():
self.networks_get_l3_topology_details_response()
return
if self.matches_NETWORKS_b2b8cb91459aa58f():
self.networks_get_physical_topology_response()
return
if self.matches_NETWORKS_9ba14a9e441b8a60():
self.networks_get_site_topology_response()
return
if self.matches_NETWORKS_6284db4649aa8d31():
self.networks_get_vlan_details_response()
return
if self.matches_CLIENTS_e2adba7943bab3e9():
self.clients_get_client_detail_response()
return
if self.matches_CLIENTS_149aa93b4ddb80dd():
self.clients_get_overall_client_health_response()
return
if self.matches_NON_FABRIC_WIRELESS_cca519ba45ebb423():
self.non_fabric_wireless_get_enterprise_ssid_response()
return
if self.matches_FABRIC_WIRED_98a39bf4485a9871():
self.fabric_wired_gets_border_device_detail_response()
return
def do_POST(self):
if self.matches_AUTHENTICATION_ac8ae94c4e69a09d():
self.authentication_authentication_response()
return
if self.matches_TEMPLATE_PROGRAMMER_00aec9b1422ab27e():
self.template_programmer_create_project_response()
return
if self.matches_TEMPLATE_PROGRAMMER_f6b119ad4d4aaf16():
self.template_programmer_create_template_response()
return
if self.matches_TEMPLATE_PROGRAMMER_6099da82477b858a():
self.template_programmer_deploy_template_response()
return
if self.matches_TEMPLATE_PROGRAMMER_62b05b2c40a9b216():
self.template_programmer_version_template_response()
return
if self.matches_TAG_1399891c42a8be64():
self.tag_create_tag_response()
return
if self.matches_TAG_00a2fa6146089317():
self.tag_add_members_to_the_tag_response()
return
if self.matches_NETWORK_DISCOVERY_55b439dc4239b140():
self.network_discovery_start_discovery_response()
return
if self.matches_NETWORK_DISCOVERY_948ea8194348bc0b():
self.network_discovery_create_cli_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_bf859ac64a0ba19c():
self.network_discovery_create_http_read_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_4d9ca8e2431a8a24():
self.network_discovery_create_http_write_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_17929bc7465bb564():
self.network_discovery_create_netconf_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_7aa3da9d4e098ef2():
self.network_discovery_create_snmp_read_community_response()
return
if self.matches_NETWORK_DISCOVERY_6bacb8d14639bdc7():
self.network_discovery_create_snmp_write_community_response()
return
if self.matches_NETWORK_DISCOVERY_979688084b7ba60d():
self.network_discovery_create_snmpv3_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_a5ac99774c6bb541():
self.network_discovery_create_update_snmp_properties_response()
return
if self.matches_COMMAND_RUNNER_d6b8ca774739adf4():
self.command_runner_run_read_only_commands_on_devices_response()
return
if self.matches_PATH_TRACE_a395fae644ca899c():
self.path_trace_initiate_a_new_pathtrace_response()
return
if self.matches_SWIM_fb9beb664f2aba4c():
self.swim_trigger_software_image_activation_response()
return
if self.matches_SWIM_8cb6783b4faba1f4():
self.swim_trigger_software_image_distribution_response()
return
if self.matches_SWIM_4dbe3bc743a891bc():
self.swim_import_local_software_image_response()
return
if self.matches_SWIM_bc8aab4746ca883d():
self.swim_import_software_image_via_url_response()
return
if self.matches_PNP_f3b26b5544cabab9():
self.pnp_add_device_response()
return
if self.matches_PNP_d8a619974a8a8c48():
self.pnp_claim_device_response()
return
if self.matches_PNP_21a6db2540298f55():
self.pnp_import_devices_in_bulk_response()
return
if self.matches_PNP_9e857b5a4a0bbcdb():
self.pnp_reset_device_response()
return
if self.matches_PNP_5889fb844939a13b():
self.pnp_claim_a_device_to_a_site_response()
return
if self.matches_PNP_cf9418234d9ab37e():
self.pnp_preview_config_response()
return
if self.matches_PNP_0b836b7b4b6a9fd5():
self.pnp_un_claim_device_response()
return
if self.matches_PNP_a4b6c87a4ffb9efa():
self.pnp_sync_virtual_account_devices_response()
return
if self.matches_PNP_1e962af345b8b59f():
self.pnp_add_virtual_account_response()
return
if self.matches_PNP_848b5a7b4f9b8c12():
self.pnp_add_a_workflow_response()
return
if self.matches_SITE_PROFILE_828828f44f28bd0d():
self.site_profile_provision_nfv_response()
return
if self.matches_DEVICES_4bb22af046fa8f08():
self.devices_add_device_response()
return
if self.matches_DEVICES_cd98780f4888a66d():
self.devices_export_device_list_response()
return
if self.matches_SITES_eeb168eb41988e07():
self.sites_assign_device_to_site_response()
return
if self.matches_SITES_50b589fd4c7a930a():
self.sites_create_site_response()
return
if self.matches_NON_FABRIC_WIRELESS_db9f997f4e59aec1():
self.non_fabric_wireless_create_and_provision_ssid_response()
return
if self.matches_NON_FABRIC_WIRELESS_8a96fb954d09a349():
self.non_fabric_wireless_create_enterprise_ssid_response()
return
if self.matches_FABRIC_WIRED_bead7b3443b996a7():
self.fabric_wired_adds_border_device_response()
return
def do_PUT(self):
if self.matches_TEMPLATE_PROGRAMMER_9480fa1f47ca9254():
self.template_programmer_update_project_response()
return
if self.matches_TEMPLATE_PROGRAMMER_7781fa0548a98342():
self.template_programmer_update_template_response()
return
if self.matches_TEMPLATE_PROGRAMMER_f393abe84989bb48():
self.template_programmer_preview_template_response()
return
if self.matches_TAG_4d86a993469a9da9():
self.tag_update_tag_response()
return
if self.matches_TAG_45bc7a8344a8bc1e():
self.tag_updates_tag_membership_response()
return
if self.matches_NETWORK_DISCOVERY_9788b8fc4418831d():
self.network_discovery_updates_discovery_by_id_response()
return
if self.matches_NETWORK_DISCOVERY_709fda3c42b8877a():
self.network_discovery_update_global_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_fba0d80747eb82e8():
self.network_discovery_update_cli_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_89b36b4649999d81():
self.network_discovery_update_http_read_credential_response()
return
if self.matches_NETWORK_DISCOVERY_b68a6bd8473a9a25():
self.network_discovery_update_http_write_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_c5acd9fa4c1a8abc():
self.network_discovery_update_netconf_credentials_response()
return
if self.matches_NETWORK_DISCOVERY_47a1b84b4e1b8044():
self.network_discovery_update_snmp_read_community_response()
return
if self.matches_NETWORK_DISCOVERY_10b06a6a4f7bb3cb():
self.network_discovery_update_snmp_write_community_response()
return
if self.matches_NETWORK_DISCOVERY_1da5ebdd434aacfe():
self.network_discovery_update_snmpv3_credentials_response()
return
if self.matches_PNP_09b0f9ce4239ae10():
self.pnp_update_device_response()
return
if self.matches_PNP_8da0391947088a5a():
self.pnp_update_pnp_global_settings_response()
return
if self.matches_PNP_6f9819e84178870c():
self.pnp_update_pnp_server_profile_response()
return
if self.matches_PNP_3086c9624f498b85():
self.pnp_update_workflow_response()
return
if self.matches_DEVICES_aeb9eb67460b92df():
self.devices_sync_devices_response()
return
if self.matches_DEVICES_b9855ad54ae98156():
self.devices_update_device_role_response()
return
if self.matches_DEVICES_3b9ef9674429be4c():
self.devices_sync_devices_using_forcesync_response()
return
def do_DELETE(self):
if self.matches_TEMPLATE_PROGRAMMER_d0a1abfa435b841d():
self.template_programmer_delete_project_response()
return
if self.matches_TEMPLATE_PROGRAMMER_a7b42836408a8e74():
self.template_programmer_delete_template_response()
return
if self.matches_TAG_429c28154bdaa13d():
self.tag_delete_tag_response()
return
if self.matches_TAG_caa3ea704d78b37e():
self.tag_remove_tag_member_response()
return
if self.matches_NETWORK_DISCOVERY_db8e09234a988bab():
self.network_discovery_delete_all_discovery_response()
return
if self.matches_NETWORK_DISCOVERY_4c8cab5f435a80f4():
self.network_discovery_delete_discovery_by_id_response()
return
if self.matches_NETWORK_DISCOVERY_c1ba9a424c08a01b():
self.network_discovery_delete_discovery_by_specified_range_response()
return
if self.matches_NETWORK_DISCOVERY_f5ac590c4ca9975a():
self.network_discovery_delete_global_credentials_by_id_response()
return
if self.matches_PATH_TRACE_8a9d2b76443b914e():
self.path_trace_deletes_pathtrace_by_id_response()
return
if self.matches_PNP_cdab9b474899ae06():
self.pnp_delete_device_by_id_from_pnp_response()
return
if self.matches_PNP_2499e9ad42e8ae5b():
self.pnp_deregister_virtual_account_response()
return
if self.matches_PNP_af8d7b0e470b8ae2():
self.pnp_delete_workflow_by_id_response()
return
if self.matches_DEVICES_1c894b5848eab214():
self.devices_delete_device_by_id_response()
return
if self.matches_NON_FABRIC_WIRELESS_cca098344a489dfa():
self.non_fabric_wireless_delete_and_provision_ssid_response()
return
if self.matches_NON_FABRIC_WIRELESS_c7a6592b4b98a369():
self.non_fabric_wireless_delete_enterprise_ssid_response()
return
if self.matches_FABRIC_WIRED_cb81b93540baaab0():
self.fabric_wired_deletes_border_device_response()
return
| [
"[email protected]"
] | |
971ab6c7828b1588b8bd01af5b3d5d1997e2cb0c | d6c84b8591c27195e9e7916d23b2d501c403b6ab | /scripts/plotMauveBetter.py | 62c8bc851a0582edd6d7adb34182a5689b6bc403 | [
"MIT"
] | permissive | marencc/riboSeed | fb850cc956cb7675fa9fd8934dfaecd19bd675ad | 8619f81978a4ff672c52491fccc19d1ef7d7eaaf | refs/heads/master | 2021-01-15T22:51:34.436704 | 2017-07-21T16:20:47 | 2017-07-21T16:20:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,386 | py | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import FancyBboxPatch
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
import os
import sys
import argparse
import glob
import subprocess
mycolors = {
"pinkish": mpl.colors.ColorConverter().to_rgba(
"#ff4c05", alpha=1),
"redish": mpl.colors.ColorConverter().to_rgba(
"#ff4c05", alpha=1),
"yellish": mpl.colors.ColorConverter().to_rgba(
"#FFFB07", alpha=1),
"greenish": mpl.colors.ColorConverter().to_rgba(
"#04FF08", alpha=1),
"bluish": mpl.colors.ColorConverter().to_rgba(
"#06B9FF", alpha=1),
"greyish": mpl.colors.ColorConverter().to_rgba(
"#7E7F97", alpha=1),
"clear": mpl.colors.ColorConverter().to_rgba(
"#FF012F", alpha=0),
}
bgcols = {
"purle": mpl.colors.ColorConverter().to_rgba(
"#EB87A3", alpha=0.5),
"green": mpl.colors.ColorConverter().to_rgba(
"#5EA662", alpha=0.5),
"yellow": mpl.colors.ColorConverter().to_rgba(
"#EBE418", alpha=0.5),
"red": mpl.colors.ColorConverter().to_rgba(
"#EB7D7D", alpha=0.5),
"blue": mpl.colors.ColorConverter().to_rgba(
"#6795A6", alpha=0.5),
}
def get_args(): # pragma: no cover
"""get the arguments as a main parser with subparsers
for named required arguments and optional arguments
"""
parser = argparse.ArgumentParser(
description="Pretty up the plots generated by mauve contig mover",
add_help=False)
parser.add_argument("files",
help="list of files for comparison, starting with " +
"a genbank file and having at least one fasta file " +
"with the contigs afterward", nargs="+")
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument("-o", "--outdir",
help="output directory; default: %(default)s",
default=os.getcwd(),
type=str, dest="outdir")
optional = parser.add_argument_group('optional arguments')
optional.add_argument("-n", "--names",
help="name the resulting plot and output " +
"dirs; comma-separate",
default=None, dest="names",
action="store", type=str)
optional.add_argument("-r", "--replot",
help="replot, using a previous run of analyses",
default=False, dest="replot",
action="store_true")
optional.add_argument("--mauve_exe", dest="mauve_exe",
action="store", default="~/mauve_snapshot_2015-02-13/Mauve.jar",
help="path to Mauve.jar; " +
"default: %(default)s")
# had to make this explicitly to call it a faux optional arg
optional.add_argument("-h", "--help",
action="help", default=argparse.SUPPRESS,
help="Displays this help message")
args = parser.parse_args()
return args
def makeContigMovercmds(ref, files, outdir, mauve_exe):
cmds = []
results = []
for f in files:
thisdir = os.path.join(outdir, "ref_vs_" + os.path.splitext(os.path.basename(f))[0])
cmd = "java -Xmx500m -cp {0} org.gel.mauve.contigs.ContigOrderer -output {1} -ref {2} -draft {3}".format(
mauve_exe,
thisdir,
ref,
f)
cmds.append(cmd)
results.append(thisdir)
return(cmds, results)
def findBestAlignments(outdir):
dirs = os.listdir(outdir)
print(dirs)
maxiter = max([int(x.split("alignment")[1]) for x in dirs])
print(maxiter)
maxiterdir = [x for x in dirs if int(x.split("alignment")[1]) == maxiter]
print(maxiterdir)
return(os.path.join(outdir, maxiterdir[0], ""))
def parseBackbones(filelist):
""" Given a list of .backbones files, write out as nested list
"""
comps_list = []
for i, f in enumerate(filelist):
with open(f, "r") as infile:
temp = [x.strip().split("\t") for x in infile.readlines()]
temp2 = []
for sublist in temp[1:len(temp)]:
temp2.append([int(x) for x in sublist])
# temp = [int(x) for x in [y for y in temp[1:len(temp)]]]
comps_list.append(temp2) # get rid of header
return (comps_list)
def plot_mauve_compare(refgb,
assembly_list,
backbones_list,
bufferlen=10000,
breakwidth=40,
aspect=.6,
names=["Position", "Entropy"],
title="Shannon Entropy by Position",
output_prefix="entropy_plot.png"):
assert len(assembly_list) == len(backbones_list), \
"must have same amount of assemblies as backbones"
with open(refgb, "r") as rg:
ref_recs = list(SeqIO.parse(rg, "genbank"))
assembly_lens = [[sum([len(x) for x in ref_recs])]]
for seq in assembly_list:
with open(seq, "r") as inseq:
assembly_lens.append([len(x) for x in list(SeqIO.parse(inseq, "fasta"))])
backbones = parseBackbones(backbones_list)
npanels = len(assembly_list) + 1
max_combined_len = max([sum(x) for x in assembly_lens]) + bufferlen
print(max_combined_len)
fig, ax = plt.subplots(1, 1)
ax.set_title(title, y=1.08)
relheight = max_combined_len * aspect
coding_height = .05 * relheight
# set the centers as starting relative to relheight - (2* codingdepth)
relinner = relheight - (coding_height * 3)
centers = []
for i in range(npanels):
if i == 0:
centers.append(relheight - (coding_height * 1.5))
elif i == npanels - 1:
centers.append(0 + (coding_height * 1.5))
else:
centers.append(relheight - ((coding_height * 1.5) +
(relinner / float(npanels - 1)) * i))
xmin, xmax = 0, max_combined_len
ymin, ymax = 0, relheight
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
# plot the color shadings
unused_cols = ["red", "green", "yellow", "purple", "red", "blue"]
nudge = coding_height / 2
patch_list = []
for i, bblist in enumerate(backbones):
for As, Ae, Bs, Be in bblist:
if (Bs == 0 and Be == 0) or \
(As == 0 and Ae == 0):
continue
verts = [
(Bs, centers[i + 1] + nudge), # left, bottom
(As, centers[0] - nudge), # left, top
(Ae, centers[0] - nudge), # right, top
(Be, centers[i + 1] + nudge), # right, bottom
(Bs, centers[i + 1] + nudge), # ignored
]
codes = [mpl.path.Path.MOVETO,
mpl.path.Path.LINETO,
mpl.path.Path.LINETO,
mpl.path.Path.LINETO,
mpl.path.Path.CLOSEPOLY]
path = mpl.path.Path(verts, codes)
patch = patches.PathPatch(path,
facecolor=bgcols.get(unused_cols[0]),
edgecolor=mycolors.get("clear"),
lw=2)
patch_list.append(patch)
unused_cols.pop(0)
# we want the first annotation on top
[ax.add_patch(p) for p in list(reversed(patch_list))]
# add annotations
last_chrom_end = 0
for record in ref_recs:
# coding sequence
print(centers[0] * .005)
coding_box = FancyBboxPatch(
(last_chrom_end, centers[0] - coding_height / 2),
len(record), coding_height,
boxstyle="round,pad=0,rounding_size=" + str(centers[0] / 50),
mutation_aspect=.5,
# mutation_scale=.5,
fc=mycolors['greyish'],
ec=mycolors['clear']
)
# buffer_box = FancyBboxPatch(
# (last_chrom_end + len(record), centers[0] - coding_height / 2),
# last_chrom_end + len(record) + bufferlen, coding_height,
# boxstyle="round,pad=0,rounding_size=0",
# mutation_aspect=.5,
# # mutation_scale=.5,
# fc=mycolors['clear'],
# ec=mycolors['clear']
# )
last_chrom_end = last_chrom_end + len(record)
ax.add_patch(coding_box)
# ax.add_patch(buffer_box)
for i, feature in enumerate(record.features):
if feature.type != "rRNA" and i == 0:
#Exclude this feature
continue
feat_len = \
feature.location.end.position - feature.location.start.position
anno_box = FancyBboxPatch(
(feature.location.start.position,
centers[0] - coding_height),
feat_len, coding_height * 2,
boxstyle="round,pad=0,rounding_size=" + str(feat_len / 2),
mutation_aspect=.5,
# mutation_scale=.5,
fc=mycolors['redish'],
ec=mycolors['redish']
)
ax.add_patch(anno_box)
for i in range(npanels):
# for each assembly
if i == 0:
continue
with open(assembly_list[i - 1], "r") as infile:
contigs = list(SeqIO.parse(infile, "fasta"))
last_contig_end = 0
for record in contigs:
coding_box = FancyBboxPatch(
(last_contig_end, centers[i] - coding_height / 2),
len(record), coding_height,
boxstyle="round,pad=0,rounding_size=" + str(centers[i] / 50),
mutation_aspect=.5,
# mutation_scale=.5,
fc=mycolors['greyish'],
ec=mycolors['clear']
)
buffer_box = FancyBboxPatch(
(last_contig_end + len(record) - breakwidth, centers[i] - coding_height),
breakwidth, coding_height * 2,
boxstyle="round,pad=0,rounding_size=0",
mutation_aspect=.5,
# mutation_scale=.5,
fc="black",
ec=mycolors['clear']
)
last_contig_end = last_contig_end + len(record)
ax.add_patch(coding_box)
ax.add_patch(buffer_box)
ax.set_yticks(np.array(centers))
ax.set_yticklabels(names)
ax.get_yaxis().set_label_coords(-.05, .1)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('top')
# ax.tick_params(axis='y', colors='dimgrey')
ax.tick_params(axis='x', colors='dimgrey')
ax.yaxis.label.set_color('black')
ax.xaxis.label.set_color('black')
ax.spines['top'].set_visible(True)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
plt.tight_layout()
fig.subplots_adjust(hspace=0)
fig.set_size_inches(12, 12 * aspect)
fig.savefig(str(output_prefix + '.png'), dpi=(200))
fig.savefig(str(output_prefix + '.pdf'), dpi=(200))
return 0
if __name__ == "__main__":
args = get_args()
try:
os.makedirs(args.outdir)
os.makedirs(os.path.join(args.outdir, "reordering"))
except:
if args.replot:
print("using existing output dir and alignment results")
else:
sys.stderr.write("Output Directory already exists!\n")
sys.exit(1)
cmds, result_paths = makeContigMovercmds(
ref=args.files[0], files=args.files[1:],
outdir=os.path.join(args.outdir, "reordering"),
mauve_exe=args.mauve_exe)
if not args.replot:
for i in cmds:
try:
print(i)
subprocess.run([i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
except Exception as e:
print(e)
sys.exit(1)
# get the path to the dir for the last iteration of the reorderer
best_aln_dirs = [findBestAlignments(i) for i in result_paths]
assembly_list = []
backbone_list = []
for d in best_aln_dirs:
assembly_list.append(glob.glob(d + "*.fasta")[0])
backbone_list.append(glob.glob(d + "*.backbone")[0])
# refgbpath = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/mauve/reference.gb")
# deferepath = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_fere/alignment2/coli_de_fere_novo.fa.fas")
# df_bb = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_fere/alignment2/alignment2.backbone")
# denovopath = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_novo/alignment2/coli_de_novo.fa.fas")
# dn_bb = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_novo/alignment2/alignment2.backbone")
# deklebpath = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_fere_kleb/alignment2/kleb_de_fere_novo.fa.fas")
# dk_bb = os.path.expanduser("~/GitHub/riboSeed/manuscript_results/simulated_genome/de_fere_kleb/alignment2/alignment2.backbone")
plot_mauve_compare(refgb=args.files[0],
# refgb=refgbpath,
# assembly_list=[deferepath, denovopath, deklebpath],
# backbones_list=[df_bb, dn_bb, dk_bb],
assembly_list=assembly_list,
backbones_list=backbone_list,
# names=["reference", "de_novo", "de_fere", "de_kleb"],
names=args.names.split(","),
bufferlen=1000,
breakwidth=100,
title="",
aspect=.4,
output_prefix=os.path.join(args.outdir,
"PrettyMauve"))
| [
"[email protected]"
] | |
905f02f515e5d02e19c57b45372edb6089c87b26 | e34cbf5fce48f661d08221c095750240dbd88caf | /python/day10/flask.py | 8558ed1fb5d09f9ecf932a6a6acab0c8bcb6d972 | [] | no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 2,323 | py | #!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import select
import socket
class Flask(object):
def __init__(self, routers):
self.routers = routers
def process_data(self, client):
data = bytes()
while True: # 接收数据循环
try:
trunk = client.recv(1024) # 没有数据会报错, 用户断开也会报错.
except BlockingIOError as e:
trunk = ""
if not trunk:
break
data += trunk
data_str = str(data, encoding='utf8')
header, body = data_str.split('\r\n\r\n', 1)
header_list = header.split('\r\n')
header_dict = {}
for line in header_list:
value = line.split(":", 1)
if len(value) == 2:
k, v = value
header_dict[k] = v
else:
header_dict['mothod'], header_dict['url'], header_dict['protocol'] = line.split(' ')
return header_dict, body
def run(self, host='127.0.0.1', port=8888):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind((host, port))
sock.listen(5)
inputs = [sock, ]
while True:
rList, wList, eList = select.select(inputs, [], [], 0.5)
for client in rList:
# 建立新的连接
if client == sock:
conn, addr = client.accept()
conn.setblocking(False)
inputs.append(conn)
else: # 用户发送数据
header_dict, body = self.process_data(client)
request_url = header_dict['url']
func_name = None
for item in self.routers:
if item[0] == request_url:
func_name = item[1]
break
if not func_name:
client.sendall(b"404")
else:
result = func_name(header_dict, body)
client.sendall(result.encode('utf8'))
inputs.remove(client)
client.close()
| [
"[email protected]"
] | |
71a2388dcad6ad8e70d8fc2e86e246444b5ced55 | 8941c8ca788b1a45bfad23ca26ebfa357c13f09b | /Lyceum/Mars_Sql_Alchemy/zapros8.py | 85a4ed28493a5c99de6c54d2326d35b671007644 | [] | no_license | MysteriousSonOfGod/Python-2 | d1dfdf094f4a763758bfc7e1777c2cd6efbd0809 | 0d488906e4b5e3897da6b7cb077815740e82fd84 | refs/heads/master | 2023-02-05T13:38:25.673248 | 2020-12-22T13:54:02 | 2020-12-22T13:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from data.db_session import global_init, create_session
from data.users import User
from data.jobs import Jobs
from data.departments import Department
from sqlalchemy import func
db = input()
global_init(db)
session = create_session()
d = session.query(Department).filter(Department.id == 1).first()
members = list(map(int, d.members.split(",")))
workers = []
for m in members:
j = session.query(func.sum(Jobs.work_size)).filter(Jobs.collaborators.like(f'%{str(m)}%')).scalar()
# print(j)
if j > 25:
workers.append(m)
# print(workers)
users = session.query(User).filter(User.id.in_(workers))
for user in users:
print(user.surname, user.name)
# db/mars_explorer.db
| [
"[email protected]"
] | |
faf11b5f5dbf57001f44e9ad498633a1097fffc0 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/Azure/azure-sdk-for-python/azure-mgmt-web/azure/mgmt/web/models/ip_security_restriction.py | 4749ab6f9dc423ffde15078edcc4ca12dc54be31 | [
"Apache-2.0"
] | permissive | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 1,587 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IpSecurityRestriction(Model):
"""
Represents an ip security restriction on a web app.
:param ip_address: IP address the security restriction is valid for
:type ip_address: str
:param subnet_mask: Subnet mask for the range of IP addresses the
restriction is valid for
:type subnet_mask: str
"""
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet_mask': {'key': 'subnetMask', 'type': 'str'},
}
def __init__(self, ip_address=None, subnet_mask=None):
self.ip_address = ip_address
self.subnet_mask = subnet_mask
| [
"[email protected]"
] | |
a03d7ae8801659c506b674965a5faaa056589de2 | 1a94622c336c127a7d0657c010d5edf359d869ad | /src/python/counts.py | b4f1d1ba1607e80459e9d1d9e2170f1e8ab3cdf7 | [
"MIT"
] | permissive | dankolbman/BCIM | 088eab0aa1b2cf656be3f877020ae1cc97f85eee | e3108828ebdadd14968ad8ec093ab5fa6f8612d1 | refs/heads/master | 2021-01-22T06:48:41.995215 | 2015-05-28T23:06:42 | 2015-05-28T23:06:42 | 20,842,183 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import numpy as np
from .DataIO import read_parts
def counts(filen, params):
t = []
counts = [ [0], [0] ]
with open(filen, 'r') as f:
ctime = ''
for line in f:
l = line.split()
if l[0] != ctime and line[0] !='#':
ctime = l[0]
t.append( float(l[0]) )
counts[0].append(0)
counts[1].append(0)
elif line[0] != '#':
sp = int( l[1] ) - 1
counts[sp][ -1 ] += 1
counts[0] = counts[0][1:]
counts[1] = counts[1][1:]
return t, counts
| [
"[email protected]"
] | |
a084bed1223eae867997fc027ac2332fc44f1eda | 9829fef375374a3887326fa3ac814914c2db63a5 | /models/networks.py | 1844580c86b10e708e53f71e5af84e82e952af1d | [] | no_license | fuchami/scene_detection_pytorch | bf78a19011176112e6a0dd4bc9462c9302d20008 | 2befe163a7d78674ebdb4ec7c22e6d50c609214f | refs/heads/master | 2022-04-07T02:06:16.345739 | 2020-02-20T06:54:43 | 2020-02-20T06:54:43 | 216,630,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | # coding:utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from models.embedding import EmbeddingNet
# from torchsummary import summary
class SiameseNet(nn.Module):
def __init__(self, image=False, audio=False, text=False, time=False, merge='concat', outdim=128):
super(SiameseNet, self).__init__()
self.embedding_net = EmbeddingNet(image,audio,text,time,merge,outdim)
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
class TripletNet(nn.Module):
def __init__(self, image=False, audio=False, text=False, time=False, merge='concat', outdim=128):
super(TripletNet, self).__init__()
self.embedding_net = EmbeddingNet(image,audio,text,time,merge,outdim)
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
| [
"[email protected]"
] | |
3ed77387ea1326b471aae183e9e5b935e36511e3 | a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e | /graveyard/web/VNET/branches/vnf/vnf/inventory/__init__.py | 69262bdf621adb11656adb43e9aada29cd9cb8a9 | [] | no_license | danse-inelastic/inelastic-svn | dda998d7b9f1249149821d1bd3c23c71859971cc | 807f16aa9510d45a45360d8f59f34f75bb74414f | refs/heads/master | 2016-08-11T13:40:16.607694 | 2016-02-25T17:58:35 | 2016-02-25T17:58:35 | 52,544,337 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def dataobject( *args, **kwds ):
from DataObject import DataObject
return DataObject( *args, **kwds )
def form( *args, **kwds ):
from Form import Form
return Form( *args, **kwds )
def geometer( *args, **kwds ):
from Geometer import Geometer
return Geometer( *args, **kwds )
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
] | |
2826700b29c69e5fbd17b9b903aede212d353033 | eba7c4b31b4c2bdebc972ea99ffb72fcddc7661c | /tests/unit/v1/test_async_client.py | 393bef51420d71b14b89dec8fb87a4bd4737eb44 | [
"Apache-2.0"
] | permissive | googleapis/python-firestore | eb6ab81daa6567c1416ac4bd389573b23ed29b4e | ccadec5eba81c20618a94c0e4a23f07dfb7c1ea7 | refs/heads/main | 2023-09-03T13:18:15.845520 | 2023-08-07T10:56:08 | 2023-08-07T10:56:08 | 226,992,533 | 203 | 72 | Apache-2.0 | 2023-09-14T19:33:50 | 2019-12-10T00:09:33 | Python | UTF-8 | Python | false | false | 18,500 | py | # Copyright 2020 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import types
import mock
import pytest
from tests.unit.v1.test__helpers import AsyncIter
from tests.unit.v1.test__helpers import AsyncMock
PROJECT = "my-prahjekt"
def _make_async_client(*args, **kwargs):
from google.cloud.firestore_v1.async_client import AsyncClient
return AsyncClient(*args, **kwargs)
def _make_default_async_client():
credentials = _make_credentials()
return _make_async_client(project=PROJECT, credentials=credentials)
def test_asyncclient_constructor():
from google.cloud.firestore_v1.async_client import _CLIENT_INFO
from google.cloud.firestore_v1.base_client import DEFAULT_DATABASE
credentials = _make_credentials()
client = _make_async_client(project=PROJECT, credentials=credentials)
assert client.project == PROJECT
assert client._credentials == credentials
assert client._database == DEFAULT_DATABASE
assert client._client_info is _CLIENT_INFO
def test_asyncclient_constructor_explicit():
from google.api_core.client_options import ClientOptions
credentials = _make_credentials()
database = "now-db"
client_info = mock.Mock()
client_options = ClientOptions("endpoint")
client = _make_async_client(
project=PROJECT,
credentials=credentials,
database=database,
client_info=client_info,
client_options=client_options,
)
assert client.project == PROJECT
assert client._credentials == credentials
assert client._database == database
assert client._client_info is client_info
assert client._client_options is client_options
def test_asyncclient_constructor_w_client_options():
credentials = _make_credentials()
client = _make_async_client(
project=PROJECT,
credentials=credentials,
client_options={"api_endpoint": "foo-firestore.googleapis.com"},
)
assert client._target == "foo-firestore.googleapis.com"
def test_asyncclient_collection_factory():
from google.cloud.firestore_v1.async_collection import AsyncCollectionReference
collection_id = "users"
client = _make_default_async_client()
collection = client.collection(collection_id)
assert collection._path == (collection_id,)
assert collection._client is client
assert isinstance(collection, AsyncCollectionReference)
def test_asyncclient_collection_factory_nested():
from google.cloud.firestore_v1.async_collection import AsyncCollectionReference
client = _make_default_async_client()
parts = ("users", "alovelace", "beep")
collection_path = "/".join(parts)
collection1 = client.collection(collection_path)
assert collection1._path == parts
assert collection1._client is client
assert isinstance(collection1, AsyncCollectionReference)
# Make sure using segments gives the same result.
collection2 = client.collection(*parts)
assert collection2._path == parts
assert collection2._client is client
assert isinstance(collection2, AsyncCollectionReference)
def test_asyncclient__get_collection_reference():
from google.cloud.firestore_v1.async_collection import AsyncCollectionReference
client = _make_default_async_client()
collection = client._get_collection_reference("collectionId")
assert collection._client is client
assert isinstance(collection, AsyncCollectionReference)
def test_asyncclient_collection_group():
client = _make_default_async_client()
query = client.collection_group("collectionId").where("foo", "==", "bar")
assert query._all_descendants
assert query._field_filters[0].field.field_path == "foo"
assert query._field_filters[0].value.string_value == "bar"
assert query._field_filters[0].op == query._field_filters[0].Operator.EQUAL
assert query._parent.id == "collectionId"
def test_asyncclient_collection_group_no_slashes():
client = _make_default_async_client()
with pytest.raises(ValueError):
client.collection_group("foo/bar")
def test_asyncclient_document_factory():
from google.cloud.firestore_v1.async_document import AsyncDocumentReference
parts = ("rooms", "roomA")
client = _make_default_async_client()
doc_path = "/".join(parts)
document1 = client.document(doc_path)
assert document1._path == parts
assert document1._client is client
assert isinstance(document1, AsyncDocumentReference)
# Make sure using segments gives the same result.
document2 = client.document(*parts)
assert document2._path == parts
assert document2._client is client
assert isinstance(document2, AsyncDocumentReference)
def test_asyncclient_document_factory_w_absolute_path():
from google.cloud.firestore_v1.async_document import AsyncDocumentReference
parts = ("rooms", "roomA")
client = _make_default_async_client()
doc_path = "/".join(parts)
to_match = client.document(doc_path)
document1 = client.document(to_match._document_path)
assert document1._path == parts
assert document1._client is client
assert isinstance(document1, AsyncDocumentReference)
def test_asyncclient_document_factory_w_nested_path():
from google.cloud.firestore_v1.async_document import AsyncDocumentReference
client = _make_default_async_client()
parts = ("rooms", "roomA", "shoes", "dressy")
doc_path = "/".join(parts)
document1 = client.document(doc_path)
assert document1._path == parts
assert document1._client is client
assert isinstance(document1, AsyncDocumentReference)
# Make sure using segments gives the same result.
document2 = client.document(*parts)
assert document2._path == parts
assert document2._client is client
assert isinstance(document2, AsyncDocumentReference)
async def _collections_helper(retry=None, timeout=None):
from google.cloud.firestore_v1.async_collection import AsyncCollectionReference
from google.cloud.firestore_v1 import _helpers
collection_ids = ["users", "projects"]
class Pager(object):
async def __aiter__(self, **_):
for collection_id in collection_ids:
yield collection_id
firestore_api = AsyncMock()
firestore_api.mock_add_spec(spec=["list_collection_ids"])
firestore_api.list_collection_ids.return_value = Pager()
client = _make_default_async_client()
client._firestore_api_internal = firestore_api
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
collections = [c async for c in client.collections(**kwargs)]
assert len(collections) == len(collection_ids)
for collection, collection_id in zip(collections, collection_ids):
assert isinstance(collection, AsyncCollectionReference)
assert collection.parent is None
assert collection.id == collection_id
base_path = client._database_string + "/documents"
firestore_api.list_collection_ids.assert_called_once_with(
request={"parent": base_path},
metadata=client._rpc_metadata,
**kwargs,
)
@pytest.mark.asyncio
async def test_asyncclient_collections():
await _collections_helper()
@pytest.mark.asyncio
async def test_asyncclient_collections_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
await _collections_helper(retry=retry, timeout=timeout)
async def _invoke_get_all(client, references, document_pbs, **kwargs):
# Create a minimal fake GAPIC with a dummy response.
firestore_api = AsyncMock(spec=["batch_get_documents"])
response_iterator = AsyncIter(document_pbs)
firestore_api.batch_get_documents.return_value = response_iterator
# Attach the fake GAPIC to a real client.
client._firestore_api_internal = firestore_api
# Actually call get_all().
snapshots = client.get_all(references, **kwargs)
assert isinstance(snapshots, types.AsyncGeneratorType)
return [s async for s in snapshots]
async def _get_all_helper(num_snapshots=2, txn_id=None, retry=None, timeout=None):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.async_document import DocumentSnapshot
client = _make_default_async_client()
data1 = {"a": "cheese"}
document1 = client.document("pineapple", "lamp1")
document_pb1, read_time = _doc_get_info(document1._document_path, data1)
response1 = _make_batch_response(found=document_pb1, read_time=read_time)
data2 = {"b": True, "c": 18}
document2 = client.document("pineapple", "lamp2")
document, read_time = _doc_get_info(document2._document_path, data2)
response2 = _make_batch_response(found=document, read_time=read_time)
document3 = client.document("pineapple", "lamp3")
response3 = _make_batch_response(missing=document3._document_path)
expected_data = [data1, data2, None][:num_snapshots]
documents = [document1, document2, document3][:num_snapshots]
responses = [response1, response2, response3][:num_snapshots]
field_paths = [
field_path for field_path in ["a", "b", None][:num_snapshots] if field_path
]
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
if txn_id is not None:
transaction = client.transaction()
transaction._id = txn_id
kwargs["transaction"] = transaction
snapshots = await _invoke_get_all(
client,
documents,
responses,
field_paths=field_paths,
**kwargs,
)
assert len(snapshots) == num_snapshots
for data, document, snapshot in zip(expected_data, documents, snapshots):
assert isinstance(snapshot, DocumentSnapshot)
assert snapshot._reference is document
if data is None:
assert not snapshot.exists
else:
assert snapshot._data == data
# Verify the call to the mock.
doc_paths = [document._document_path for document in documents]
mask = common.DocumentMask(field_paths=field_paths)
kwargs.pop("transaction", None)
client._firestore_api.batch_get_documents.assert_called_once_with(
request={
"database": client._database_string,
"documents": doc_paths,
"mask": mask,
"transaction": txn_id,
},
metadata=client._rpc_metadata,
**kwargs,
)
@pytest.mark.asyncio
async def test_asyncclient_get_all():
await _get_all_helper()
@pytest.mark.asyncio
async def test_asyncclient_get_all_with_transaction():
txn_id = b"the-man-is-non-stop"
await _get_all_helper(num_snapshots=1, txn_id=txn_id)
@pytest.mark.asyncio
async def test_asyncclient_get_all_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
await _get_all_helper(retry=retry, timeout=timeout)
@pytest.mark.asyncio
async def test_asyncclient_get_all_wrong_order():
await _get_all_helper(num_snapshots=3)
@pytest.mark.asyncio
async def test_asyncclient_get_all_unknown_result():
from google.cloud.firestore_v1.base_client import _BAD_DOC_TEMPLATE
client = _make_default_async_client()
expected_document = client.document("pineapple", "lamp1")
data = {"z": 28.5}
wrong_document = client.document("pineapple", "lamp2")
document_pb, read_time = _doc_get_info(wrong_document._document_path, data)
response = _make_batch_response(found=document_pb, read_time=read_time)
# Exercise the mocked ``batch_get_documents``.
with pytest.raises(ValueError) as exc_info:
await _invoke_get_all(client, [expected_document], [response])
err_msg = _BAD_DOC_TEMPLATE.format(response.found.name)
assert exc_info.value.args == (err_msg,)
# Verify the call to the mock.
doc_paths = [expected_document._document_path]
client._firestore_api.batch_get_documents.assert_called_once_with(
request={
"database": client._database_string,
"documents": doc_paths,
"mask": None,
"transaction": None,
},
metadata=client._rpc_metadata,
)
def test_asyncclient_bulk_writer():
"""BulkWriter is opaquely async and thus does not have a dedicated
async variant."""
from google.cloud.firestore_v1.bulk_writer import BulkWriter
client = _make_default_async_client()
bulk_writer = client.bulk_writer()
assert isinstance(bulk_writer, BulkWriter)
assert bulk_writer._client is client._sync_copy
def test_asyncclient_sync_copy():
client = _make_default_async_client()
# Multiple calls to this method should return the same cached instance.
assert client._to_sync_copy() is client._to_sync_copy()
@pytest.mark.asyncio
async def test_asyncclient_recursive_delete():
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
client = _make_default_async_client()
client._firestore_api_internal = AsyncMock(spec=["run_query"])
collection_ref = client.collection("my_collection")
results = []
for index in range(10):
results.append(
firestore.RunQueryResponse(
document=document.Document(name=f"{collection_ref.id}/{index}")
)
)
chunks = [
results[:3],
results[3:6],
results[6:9],
results[9:],
]
def _get_chunk(*args, **kwargs):
return AsyncIter(items=chunks.pop(0))
client._firestore_api_internal.run_query.side_effect = _get_chunk
bulk_writer = mock.MagicMock()
bulk_writer.mock_add_spec(spec=["delete", "close"])
num_deleted = await client.recursive_delete(
collection_ref, bulk_writer=bulk_writer, chunk_size=3
)
assert num_deleted == len(results)
@pytest.mark.asyncio
async def test_asyncclient_recursive_delete_from_document():
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
client = _make_default_async_client()
client._firestore_api_internal = mock.Mock(
spec=["run_query", "list_collection_ids"]
)
collection_ref = client.collection("my_collection")
collection_1_id: str = "collection_1_id"
collection_2_id: str = "collection_2_id"
parent_doc = collection_ref.document("parent")
collection_1_results = []
collection_2_results = []
for index in range(10):
collection_1_results.append(
firestore.RunQueryResponse(
document=document.Document(name=f"{collection_1_id}/{index}"),
),
)
collection_2_results.append(
firestore.RunQueryResponse(
document=document.Document(name=f"{collection_2_id}/{index}"),
),
)
col_1_chunks = [
collection_1_results[:3],
collection_1_results[3:6],
collection_1_results[6:9],
collection_1_results[9:],
]
col_2_chunks = [
collection_2_results[:3],
collection_2_results[3:6],
collection_2_results[6:9],
collection_2_results[9:],
]
async def _get_chunk(*args, **kwargs):
start_at = (
kwargs["request"]["structured_query"].start_at.values[0].reference_value
)
if collection_1_id in start_at:
return AsyncIter(col_1_chunks.pop(0))
return AsyncIter(col_2_chunks.pop(0))
async def _get_collections(*args, **kwargs):
return AsyncIter([collection_1_id, collection_2_id])
client._firestore_api_internal.run_query.side_effect = _get_chunk
client._firestore_api_internal.list_collection_ids.side_effect = _get_collections
bulk_writer = mock.MagicMock()
bulk_writer.mock_add_spec(spec=["delete", "close"])
num_deleted = await client.recursive_delete(
parent_doc, bulk_writer=bulk_writer, chunk_size=3
)
expected_len = len(collection_1_results) + len(collection_2_results) + 1
assert num_deleted == expected_len
@pytest.mark.asyncio
async def test_asyncclient_recursive_delete_raises():
client = _make_default_async_client()
with pytest.raises(TypeError):
await client.recursive_delete(object())
def test_asyncclient_batch():
from google.cloud.firestore_v1.async_batch import AsyncWriteBatch
client = _make_default_async_client()
batch = client.batch()
assert isinstance(batch, AsyncWriteBatch)
assert batch._client is client
assert batch._write_pbs == []
def test_asyncclient_transaction():
from google.cloud.firestore_v1.async_transaction import AsyncTransaction
client = _make_default_async_client()
transaction = client.transaction(max_attempts=3, read_only=True)
assert isinstance(transaction, AsyncTransaction)
assert transaction._write_pbs == []
assert transaction._max_attempts == 3
assert transaction._read_only
assert transaction._id is None
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_batch_response(**kwargs):
from google.cloud.firestore_v1.types import firestore
return firestore.BatchGetDocumentsResponse(**kwargs)
def _doc_get_info(ref_string, values):
from google.cloud.firestore_v1.types import document
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.firestore_v1 import _helpers
now = datetime.datetime.utcnow()
read_time = _datetime_to_pb_timestamp(now)
delta = datetime.timedelta(seconds=100)
update_time = _datetime_to_pb_timestamp(now - delta)
create_time = _datetime_to_pb_timestamp(now - 2 * delta)
document_pb = document.Document(
name=ref_string,
fields=_helpers.encode_dict(values),
create_time=create_time,
update_time=update_time,
)
return document_pb, read_time
| [
"[email protected]"
] | |
08a772274dc0b7588e67be727f019c4b0572db37 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03146/s635038540.py | ffcd848d7b53f891a4f49a1d39ab65423805b702 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | s = int(input())
i = 1
a_set = {s}
a_prev = s
while True:
i += 1
if a_prev % 2 == 0:
a = a_prev // 2
else:
a = 3 * a_prev + 1
if a in a_set:
ans = i
break
a_set.add(a)
a_prev = a
print(ans) | [
"[email protected]"
] | |
3e13a374dd395bb496b4156d4850e4514534773d | b56ca08eb67163d3ccb02ff0775f59a2d971d910 | /backend/settings/migrations/0006_replanishmentplan.py | 694fa4a6750e7c6b69b45668571ca37920eab849 | [] | no_license | globax89/dating-work | f23d07f98dcb5efad62a1c91cdb04b1a8ef021f7 | bb3d09c4e2f48ecd3d73e664ab8e3982fc97b534 | refs/heads/master | 2022-12-11T22:45:19.360096 | 2019-10-16T07:01:40 | 2019-10-16T07:01:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # Generated by Django 2.2.4 on 2019-10-03 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0005_auto_20190927_1426'),
]
operations = [
migrations.CreateModel(
name='ReplanishmentPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('dollar', models.IntegerField(default=0)),
('credit', models.IntegerField(default=0)),
],
),
]
| [
"[email protected]"
] | |
60b986e42773c78b6920270767e1061ac93a28aa | 07b22625827cbcf86440115c438768b2d6c0e28b | /Chapter03/email_spam.py | 4f09789a55a58a9dcc929b14ab0d4625fb7ad410 | [
"MIT"
] | permissive | PacktPublishing/Python-Machine-Learning-By-Example | 8892b262c4010972f8a1dca6c793f9216fd575be | 6ee2be561e511bd0a1c0b3d481ad3950ea3f1815 | refs/heads/master | 2023-01-28T18:33:27.857048 | 2023-01-18T09:17:08 | 2023-01-18T09:17:08 | 92,726,056 | 125 | 94 | null | null | null | null | UTF-8 | Python | false | false | 10,530 | py | from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
import glob
import os
import numpy as np
file_path = 'enron1/ham/0007.1999-12-14.farmer.ham.txt'
with open(file_path, 'r') as infile:
ham_sample = infile.read()
print(ham_sample)
file_path = 'enron1/spam/0058.2003-12-21.GP.spam.txt'
with open(file_path, 'r') as infile:
spam_sample = infile.read()
print(spam_sample)
cv = CountVectorizer(stop_words="english", max_features=500)
emails, labels = [], []
file_path = 'enron1/spam/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(1)
file_path = 'enron1/ham/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(0)
def letters_only(astr):
return astr.isalpha()
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def clean_text(docs):
cleaned_docs = []
for doc in docs:
cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())
for word in doc.split()
if letters_only(word)
and word not in all_names]))
return cleaned_docs
cleaned_emails = clean_text(emails)
term_docs = cv.fit_transform(cleaned_emails)
print(term_docs [0])
feature_mapping = cv.vocabulary
feature_names = cv.get_feature_names()
def get_label_index(labels):
from collections import defaultdict
label_index = defaultdict(list)
for index, label in enumerate(labels):
label_index[label].append(index)
return label_index
def get_prior(label_index):
""" Compute prior based on training samples
Args:
label_index (grouped sample indices by class)
Returns:
dictionary, with class label as key, corresponding prior as the value
"""
prior = {label: len(index) for label, index in label_index.items()}
total_count = sum(prior.values())
for label in prior:
prior[label] /= float(total_count)
return prior
def get_likelihood(term_document_matrix, label_index, smoothing=0):
""" Compute likelihood based on training samples
Args:
term_document_matrix (sparse matrix)
label_index (grouped sample indices by class)
smoothing (integer, additive Laplace smoothing parameter)
Returns:
dictionary, with class as key, corresponding conditional probability P(feature|class) vector as value
"""
likelihood = {}
for label, index in label_index.items():
likelihood[label] = term_document_matrix[index, :].sum(axis=0) + smoothing
likelihood[label] = np.asarray(likelihood[label])[0]
total_count = likelihood[label].sum()
likelihood[label] = likelihood[label] / float(total_count)
return likelihood
feature_names[:5]
def get_posterior(term_document_matrix, prior, likelihood):
""" Compute posterior of testing samples, based on prior and likelihood
Args:
term_document_matrix (sparse matrix)
prior (dictionary, with class label as key, corresponding prior as the value)
likelihood (dictionary, with class label as key, corresponding conditional probability vector as value)
Returns:
dictionary, with class label as key, corresponding posterior as value
"""
num_docs = term_document_matrix.shape[0]
posteriors = []
for i in range(num_docs):
# posterior is proportional to prior * likelihood
# = exp(log(prior * likelihood))
# = exp(log(prior) + log(likelihood))
posterior = {key: np.log(prior_label) for key, prior_label in prior.items()}
for label, likelihood_label in likelihood.items():
term_document_vector = term_document_matrix.getrow(i)
counts = term_document_vector.data
indices = term_document_vector.indices
for count, index in zip(counts, indices):
posterior[label] += np.log(likelihood_label[index]) * count
# exp(-1000):exp(-999) will cause zero division error,
# however it equates to exp(0):exp(1)
min_log_posterior = min(posterior.values())
for label in posterior:
try:
posterior[label] = np.exp(posterior[label] - min_log_posterior)
except:
# if one's log value is excessively large, assign it infinity
posterior[label] = float('inf')
# normalize so that all sums up to 1
sum_posterior = sum(posterior.values())
for label in posterior:
if posterior[label] == float('inf'):
posterior[label] = 1.0
else:
posterior[label] /= sum_posterior
posteriors.append(posterior.copy())
return posteriors
label_index = get_label_index(labels)
prior = get_prior(label_index)
smoothing = 1
likelihood = get_likelihood(term_docs, label_index, smoothing)
emails_test = [
'''Subject: flat screens
hello ,
please call or contact regarding the other flat screens requested .
trisha tlapek - eb 3132 b
michael sergeev - eb 3132 a
also the sun blocker that was taken away from eb 3131 a .
trisha should two monitors also michael .
thanks
kevin moore''',
'''Subject: having problems in bed ? we can help !
cialis allows men to enjoy a fully normal sex life without having to plan the sexual act .
if we let things terrify us , life will not be worth living .
brevity is the soul of lingerie .
suspicion always haunts the guilty mind .''',
]
cleaned_test = clean_text(emails_test)
term_docs_test = cv.transform(cleaned_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
print(posterior)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(cleaned_emails, labels, test_size=0.33, random_state=42)
len(X_train), len(Y_train)
len(X_test), len(Y_test)
term_docs_train = cv.fit_transform(X_train)
label_index = get_label_index(Y_train)
prior = get_prior(label_index)
likelihood = get_likelihood(term_docs_train, label_index, smoothing)
term_docs_test = cv.transform(X_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
correct = 0.0
for pred, actual in zip(posterior, Y_test):
if actual == 1:
if pred[1] >= 0.5:
correct += 1
elif pred[0] > 0.5:
correct += 1
print('The accuracy on {0} testing samples is: {1:.1f}%'.format(len(Y_test), correct/len(Y_test)*100))
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=1.0, fit_prior=True)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
prediction_prob[0:10]
prediction = clf.predict(term_docs_test)
prediction[:10]
accuracy = clf.score(term_docs_test, Y_test)
print('The accuracy using MultinomialNB is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, prediction, labels=[0, 1])
from sklearn.metrics import precision_score, recall_score, f1_score
precision_score(Y_test, prediction, pos_label=1)
recall_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=0)
from sklearn.metrics import classification_report
report = classification_report(Y_test, prediction)
print(report)
pos_prob = prediction_prob[:, 1]
thresholds = np.arange(0.0, 1.2, 0.1)
true_pos, false_pos = [0]*len(thresholds), [0]*len(thresholds)
for pred, y in zip(pos_prob, Y_test):
for i, threshold in enumerate(thresholds):
if pred >= threshold:
if y == 1:
true_pos[i] += 1
else:
false_pos[i] += 1
else:
break
true_pos_rate = [tp / 516.0 for tp in true_pos]
false_pos_rate = [fp / 1191.0 for fp in false_pos]
import matplotlib.pyplot as plt
plt.figure()
lw = 2
plt.plot(false_pos_rate, true_pos_rate, color='darkorange',
lw=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(Y_test, pos_prob)
from sklearn.model_selection import StratifiedKFold
k = 10
k_fold = StratifiedKFold(n_splits=k)
# convert to numpy array for more efficient slicing
cleaned_emails_np = np.array(cleaned_emails)
labels_np = np.array(labels)
max_features_option = [2000, 4000, 8000]
smoothing_factor_option = [0.5, 1.0, 1.5, 2.0]
fit_prior_option = [True, False]
auc_record = {}
for train_indices, test_indices in k_fold.split(cleaned_emails, labels):
X_train, X_test = cleaned_emails_np[train_indices], cleaned_emails_np[test_indices]
Y_train, Y_test = labels_np[train_indices], labels_np[test_indices]
for max_features in max_features_option:
if max_features not in auc_record:
auc_record[max_features] = {}
cv = CountVectorizer(stop_words="english", max_features=max_features)
term_docs_train = cv.fit_transform(X_train)
term_docs_test = cv.transform(X_test)
for smoothing_factor in smoothing_factor_option:
if smoothing_factor not in auc_record[max_features]:
auc_record[max_features][smoothing_factor] = {}
for fit_prior in fit_prior_option:
clf = MultinomialNB(alpha=smoothing_factor, fit_prior=fit_prior)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
pos_prob = prediction_prob[:, 1]
auc = roc_auc_score(Y_test, pos_prob)
auc_record[max_features][smoothing_factor][fit_prior] \
= auc + auc_record[max_features][smoothing_factor].get(fit_prior, 0.0)
print(auc_record)
print('max features smoothing fit prior auc')
for max_features, max_feature_record in auc_record.items():
for smoothing, smoothing_record in max_feature_record.items():
for fit_prior, auc in smoothing_record.items():
print(' {0} {1} {2} {3:.4f}'.format(max_features, smoothing, fit_prior, auc/k))
| [
"[email protected]"
] | |
3422a51a83dc62f586c3b410c2855a1a571865e7 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/mornington.py | a03be2e24b8431ad233a6f58e1d012ac0ef38b44 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 63 | py | ii = [('WilbRLW.py', 3), ('AubePRP2.py', 6), ('WadeJEB.py', 7)] | [
"[email protected]"
] | |
ea099bf25701e772952a954d522c781a406a6161 | d37277c61facf70dae7d74c82e5b14826d0f7029 | /task1_AdvancedModels/task1/advanced_model/migrations/0002_employee.py | d1a9d6aeb02a92f83b929f9432c778dd9e45dff9 | [] | no_license | ProgMmgGhoneim/Django-Tasks | d8d53acbec6e042261ee28ef0e5931fb40e93fd7 | 2288c1a9c3d1348897f5fb7be42bc807719aacb4 | refs/heads/master | 2020-03-30T08:41:10.720962 | 2018-10-01T23:27:59 | 2018-10-01T23:27:59 | 151,031,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # Generated by Django 2.0.7 on 2018-07-22 12:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('advanced_model', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=200)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employees', related_query_name='person', to='advanced_model.Company')),
],
),
]
| [
"[email protected]"
] | |
c0638bf57936d184b304610bc685f889609e27a4 | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /dbpedia/api/cultivated_variety_api.py | 12087e5025ead772d770219f659ece42c3db2db5 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,095 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dbpedia.api_client import ApiClient
from dbpedia.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CultivatedVarietyApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cultivatedvarietys_get(self, **kwargs): # noqa: E501
"""List all instances of CultivatedVariety # noqa: E501
Gets a list of all instances of CultivatedVariety (more information in http://dbpedia.org/ontology/CultivatedVariety) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cultivatedvarietys_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str label: Filter by label
:param int page: Page number
:param int per_page: Items per page
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[CultivatedVariety]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cultivatedvarietys_get_with_http_info(**kwargs) # noqa: E501
def cultivatedvarietys_get_with_http_info(self, **kwargs): # noqa: E501
"""List all instances of CultivatedVariety # noqa: E501
Gets a list of all instances of CultivatedVariety (more information in http://dbpedia.org/ontology/CultivatedVariety) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cultivatedvarietys_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str label: Filter by label
:param int page: Page number
:param int per_page: Items per page
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[CultivatedVariety], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'label',
'page',
'per_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method cultivatedvarietys_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] > 200: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `cultivatedvarietys_get`, must be a value less than or equal to `200`") # noqa: E501
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `cultivatedvarietys_get`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'label' in local_var_params and local_var_params['label'] is not None: # noqa: E501
query_params.append(('label', local_var_params['label'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per_page', local_var_params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/cultivatedvarietys', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CultivatedVariety]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def cultivatedvarietys_id_get(self, id, **kwargs): # noqa: E501
"""Get a single CultivatedVariety by its id # noqa: E501
Gets the details of a given CultivatedVariety (more information in http://dbpedia.org/ontology/CultivatedVariety) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cultivatedvarietys_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The ID of the CultivatedVariety to be retrieved (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CultivatedVariety
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cultivatedvarietys_id_get_with_http_info(id, **kwargs) # noqa: E501
def cultivatedvarietys_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a single CultivatedVariety by its id # noqa: E501
Gets the details of a given CultivatedVariety (more information in http://dbpedia.org/ontology/CultivatedVariety) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cultivatedvarietys_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The ID of the CultivatedVariety to be retrieved (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CultivatedVariety, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method cultivatedvarietys_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `cultivatedvarietys_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/cultivatedvarietys/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CultivatedVariety', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
60d41789f0b6c11b0a220611f79880209d81a28c | 39beeca8b6862adfb7f1a55b9f5308b20cd64395 | /reports_tex/models/__init__.py | 01321daae12a940393dd8ab5827c5ec186bb0474 | [] | no_license | Ibrahimmardini/texmar | 98c34469618b72648686c3252b6053f1dd07e9a0 | d31416df8feb87b93d757b1451be5f870d3ca867 | refs/heads/master | 2023-08-15T20:20:57.520164 | 2021-02-02T12:02:17 | 2021-02-02T12:02:17 | 255,095,450 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | # -*- coding: utf-8 -*-
from . import account_bank_statement
from . import account_payment | [
"[email protected]"
] | |
d588d68aeb430577ac4064e7a02be539d12d03ea | a2c74fac6c18b998a41c47f9b6a87885e6166f4e | /controllers/ventas/PlanPagosController.py | c82cfd711718c20a6fabc814bd2090ddb6d005b5 | [] | no_license | alanclaros/salesfoodv20 | 1bd6c19a4315b425f61ab8340f81478411ec765d | 2a710142dc324d89843a4e16b40a34b1b50ff925 | refs/heads/master | 2023-08-29T04:26:25.886154 | 2021-10-05T15:11:51 | 2021-10-05T15:11:51 | 413,471,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,891 | py | from controllers.DefaultValues import DefaultValues
from django.conf import settings
from django.apps import apps
from inventarios.models import PlanPagos, PlanPagosDetalles, PlanPagosPagos
from permisos.models import UsersPerfiles
from cajas.models import Cajas, CajasIngresos
from configuraciones.models import Puntos
from status.models import Status
from decimal import Decimal
from django.db import transaction
from decimal import Decimal
# fechas
from utils.dates_functions import get_date_show, get_date_system, get_seconds_date1_sub_date2, get_day_from_date
from utils.permissions import get_permissions_user, get_system_settings
from controllers.cajas.CajasIngresosController import CajasIngresosController
from utils.validators import validate_string, validate_number_int, validate_number_decimal
# conexion directa a la base de datos
from django.db import connection
class PlanPagosController(DefaultValues):
def __init__(self):
DefaultValues.__init__(self)
self.modelo_name = 'PlanPagos'
self.modelo_id = 'plan_pago_id'
self.modelo_app = 'ventas'
self.modulo_id = settings.MOD_PLAN_PAGOS
# variables de session
self.modulo_session = "plan_pagos"
self.columnas.append('fecha')
self.columnas.append('venta')
self.columnas.append('total')
self.columnas.append('saldo')
self.variables_filtros.append('search_tipo_plan_pago')
self.variables_filtros.append('search_concepto')
self.variables_filtros.append('search_apellidos')
self.variables_filtros.append('search_nombres')
self.variables_filtros.append('search_ci_nit')
self.variables_filtros.append('search_codigo')
self.variables_filtros.append('search_activos')
self.variables_filtros.append('search_anulados')
self.variables_filtros.append('search_almacen2')
self.variables_filtros_defecto['search_tipo_plan_pago'] = 'venta'
self.variables_filtros_defecto['search_concepto'] = ''
self.variables_filtros_defecto['search_apellidos'] = ''
self.variables_filtros_defecto['search_nombres'] = ''
self.variables_filtros_defecto['search_ci_nit'] = ''
self.variables_filtros_defecto['search_codigo'] = ''
self.variables_filtros_defecto['search_activos'] = '1'
self.variables_filtros_defecto['search_anulados'] = '0'
self.variables_filtros_defecto['search_almacen2'] = '0'
self.variable_page = "page"
self.variable_page_defecto = "1"
self.variable_order = "search_order"
self.variable_order_value = self.columnas[0]
self.variable_order_type = "search_order_type"
self.variable_order_type_value = 'DESC'
# tablas donde se debe verificar para eliminar
self.modelos_eliminar = {}
# control del formulario
self.control_form = ""
# sql_add
self.sql_venta = ""
self.sql_inventario = ""
self.sql_cantidad = ''
self.sql_add = ''
def index(self, request):
DefaultValues.index(self, request)
self.filtros_modulo.clear()
# consulta
self.sql_venta = ''
self.sql_inventario = ''
self.sql_add = ''
if self.variables_filtros_values['search_codigo'].strip() != '':
self.sql_add += f"AND p.plan_pago_id='{self.variables_filtros_values['search_codigo'].strip()}' "
else:
# anulados
if 'search_anulados' in request.POST.keys() or self.variables_filtros_values['search_anulados'] == '1':
self.variables_filtros_values['search_anulados'] = '1'
self.sql_add += f"AND p.status_id='{self.anulado}' "
else:
self.variables_filtros_values['search_anulados'] = '0'
self.sql_add += f"AND p.status_id='{self.activo}' "
# activos
if 'search_activos' in request.POST.keys() or self.variables_filtros_values['search_activos'] == '1':
self.variables_filtros_values['search_activos'] = '1'
self.sql_add += "AND p.saldo>0 "
else:
self.variables_filtros_values['search_activos'] = '0'
self.sql_add += "AND p.saldo=0 "
if self.variables_filtros_values['search_tipo_plan_pago'].strip() == 'venta':
# de ventas
# apellidos
if self.variables_filtros_values['search_apellidos'].strip() != "":
self.sql_add += f"AND c.apellidos LIKE '%{self.variables_filtros_values['search_apellidos'].strip()}%' "
# nombres
if self.variables_filtros_values['search_nombres'].strip() != "":
self.sql_add += f"AND c.nombres LIKE '%{self.variables_filtros_values['search_nombres'].strip()}%' "
# ci_nit
if self.variables_filtros_values['search_ci_nit'].strip() != "":
self.sql_add += f"AND c.ci_nit LIKE '%{self.variables_filtros_values['search_ci_nit'].strip()}%' "
else:
# plan pago inventarios
self.sql_add += f"AND r.almacen2_id='{self.variables_filtros_values['search_almacen2']}' "
if self.variables_filtros_values['search_concepto'].strip() != '':
division = self.variables_filtros_values['search_concepto'].strip().split(' ')
if len(division) == 1:
self.sql_add += f"AND r.concepto LIKE '%{self.variables_filtros_values['search_concepto'].strip()}%' "
elif len(division) == 2:
self.sql_add += f"AND (r.concepto LIKE '%{division[0]}%{division[1]}%' OR r.concepto LIKE '%{division[1]}%{division[0]}%' "
self.sql_add += ') '
# if len(division) == 3:
else:
self.sql_add += f"AND (r.concepto LIKE '%{division[0]}%{division[1]}%{division[2]}' "
self.sql_add += f"OR r.concepto LIKE '%{division[0]}%{division[2]}%{division[1]}' "
self.sql_add += f"OR r.concepto LIKE '%{division[1]}%{division[0]}%{division[2]}' "
self.sql_add += f"OR r.concepto LIKE '%{division[1]}%{division[2]}%{division[0]}' "
self.sql_add += f"OR r.concepto LIKE '%{division[2]}%{division[0]}%{division[1]}' "
self.sql_add += f"OR r.concepto LIKE '%{division[2]}%{division[1]}%{division[0]}' "
self.sql_add += ') '
# tipo de plan de pago
if self.variables_filtros_values['search_tipo_plan_pago'].strip() == 'venta':
self.sql_venta = "SELECT p.fecha, p.concepto, p.numero_cuotas, p.monto_total, p.saldo, p.mensual_dias, p.dia_mensual, p.tiempo_dias, p.user_perfil_id_anula, p.motivo_anula, "
self.sql_venta += "c.apellidos, c.nombres, c.ci_nit, v.numero_venta, p.plan_pago_id, p.status_id "
self.sql_cantidad = "SELECT COUNT(*) AS cantidad "
aux = ''
aux += "FROM plan_pagos p, ventas v, clientes c "
aux += "WHERE p.venta_id=v.venta_id AND v.cliente_id=c.cliente_id "
aux += self.sql_add
self.sql_venta += aux
self.sql_cantidad += aux
self.sql_venta += "ORDER BY p.fecha, c.apellidos, c.nombres "
#print('venta: ', self.sql_venta)
else:
# plan de pago de inventario
self.sql_inventario = "SELECT p.fecha, p.concepto, p.numero_cuotas, p.monto_total, p.saldo, p.mensual_dias, p.dia_mensual, p.tiempo_dias, p.user_perfil_id_anula, p.motivo_anula, "
self.sql_inventario += "r.concepto, a.almacen, r.numero_registro, p.plan_pago_id, p.status_id "
self.sql_cantidad = "SELECT COUNT(*) AS cantidad "
aux = ''
aux += "FROM plan_pagos p, registros r, almacenes a "
aux += "WHERE p.registro_id=r.registro_id AND r.almacen2_id=a.almacen_id "
aux += self.sql_add
self.sql_inventario += aux
self.sql_cantidad += aux
self.sql_inventario += "ORDER BY p.fecha, r.concepto "
#print('inventario: ', self.sql_inventario)
# paginacion, paginas y definiendo el LIMIT *,*
self.pagination()
# asigamos la paginacion a la session
request.session[self.modulo_session]['pages_list'] = self.pages_list
# recuperamos los datos
return self.get_list()
def records_count(self):
"""cantidad de registros del modulo"""
cantidad = 0
with connection.cursor() as cursor:
cursor.execute(self.sql_cantidad)
row = cursor.fetchone()
if row:
cantidad = row[0]
return cantidad
def pagination(self):
settings_sistema = get_system_settings()
cant_per_page = settings_sistema['cant_per_page']
self.pages_list = []
cant_total = self.records_count()
j = 1
i = 0
while i < cant_total:
self.pages_list.append(j)
i = i + cant_per_page
j += 1
if j > 15:
break
self.pages_limit_botton = (int(self.variable_page_val) - 1) * cant_per_page
self.pages_limit_top = self.pages_limit_botton + cant_per_page
def get_list(self):
retorno = []
if self.variables_filtros_values['search_tipo_plan_pago'] == 'venta':
sql_mandar = self.sql_venta
sql_mandar += f"LIMIT {self.pages_limit_botton},{self.pages_limit_top} "
with connection.cursor() as cursor:
cursor.execute(sql_mandar)
rows = cursor.fetchall()
for row in rows:
datos = {}
datos['fecha'] = row[0]
datos['concepto'] = row[1]
datos['numero_cuotas'] = row[2]
datos['monto_total'] = row[3]
datos['saldo'] = row[4]
datos['mensual_dias'] = row[5]
datos['dia_mensual'] = row[6]
datos['tiempo_dias'] = row[7]
datos['user_id_anula'] = row[8]
datos['motivo_anula'] = row[9]
datos['detalle'] = row[10] + ' ' + row[11] + ', CI/NIT: ' + row[12] + f" (V:{row[13]})"
datos['plan_pago_id'] = row[14]
datos['status_id'] = row[15]
retorno.append(datos)
else:
sql_mandar = self.sql_inventario
sql_mandar += f"LIMIT {self.pages_limit_botton},{self.pages_limit_top} "
with connection.cursor() as cursor:
cursor.execute(sql_mandar)
rows = cursor.fetchall()
for row in rows:
datos = {}
datos['fecha'] = row[0]
datos['concepto'] = row[1]
datos['numero_cuotas'] = row[2]
datos['monto_total'] = row[3]
datos['saldo'] = row[4]
datos['mensual_dias'] = row[5]
datos['dia_mensual'] = row[6]
datos['tiempo_dias'] = row[7]
datos['user_id_anula'] = row[8]
datos['motivo_anula'] = row[9]
datos['detalle'] = row[10] + f" A:{row[11]} (I:{row[12]}) "
datos['plan_pago_id'] = row[13]
datos['status_id'] = row[14]
retorno.append(datos)
return retorno
def add_pago(self, request, plan_pago_id):
"""aniadimos un nuevo pago"""
try:
# control de almacenes
monto = validate_number_decimal('monto', request.POST['monto'])
observacion = validate_string('observacion', request.POST['observacion'], remove_specials='yes')
aux_caja = validate_number_int('caja', request.POST['caja'])
if monto <= 0:
self.error_operation = 'Debe ingresar un monto valido'
return False
# caja
caja_id = Cajas.objects.get(pk=aux_caja)
# estado
status_cuota = self.status_cuota_pagada
# usuario
usuario = request.user
user_perfil = UsersPerfiles.objects.get(user_id=usuario)
punto = Puntos.objects.get(pk=user_perfil.punto_id)
# plan de pago
plan_pago = PlanPagos.objects.get(pk=int(plan_pago_id))
datos = {}
datos['monto'] = monto
datos['observacion'] = observacion
datos['caja_id'] = caja_id
datos['punto_id'] = punto
datos['plan_pago'] = plan_pago
datos['status_id'] = status_cuota
datos['user_perfil_id'] = user_perfil
datos['fecha'] = 'now'
datos['created_at'] = 'now'
datos['updated_at'] = 'now'
if self.add_pago_db(**datos):
self.error_operation = ""
return True
else:
return False
except Exception as ex:
self.error_operation = "Error al agregar el pago, " + str(ex)
return False
def add_pago_db(self, **datos):
"""aniadimos a la base de datos"""
try:
# transaccion
with transaction.atomic():
# actualizamos el saldo del plan de pagos
#plan_pago= PlanPagos.objects.get(pk=datos['plan'])
if datos['plan_pago'].saldo - datos['monto'] < 0:
datos['plan_pago'].saldo = 0
datos['monto'] = datos['plan_pago'].saldo
else:
datos['plan_pago'].saldo = datos['plan_pago'].saldo - datos['monto']
datos['plan_pago'].updated_at = datos['updated_at']
datos['plan_pago'].save()
campos_add = {}
campos_add['monto'] = datos['monto']
campos_add['saldo'] = datos['plan_pago'].saldo
campos_add['persona_paga'] = datos['observacion']
campos_add['fecha'] = datos['fecha']
campos_add['numero_cuota'] = self.get_numero_cuota(datos['plan_pago'].plan_pago_id)
campos_add['user_perfil_id_paga'] = 0 # usuario del almacen al que se vende
campos_add['cliente_id_paga'] = datos['plan_pago'].cliente_id
campos_add['created_at'] = datos['created_at']
campos_add['updated_at'] = datos['updated_at']
campos_add['plan_pago_id'] = datos['plan_pago']
campos_add['user_perfil_id'] = datos['user_perfil_id']
campos_add['status_id'] = datos['status_id']
# nuevo registro
pp_add = PlanPagosPagos.objects.create(**campos_add)
pp_add.save()
# ingreso a caja
status_activo = self.status_activo
ci_controller = CajasIngresosController()
campos_ingreso = {}
campos_ingreso['caja_id'] = datos['caja_id']
campos_ingreso['punto_id'] = datos['punto_id']
campos_ingreso['user_perfil_id'] = datos['user_perfil_id']
campos_ingreso['status_id'] = status_activo
campos_ingreso['fecha'] = datos['fecha']
campos_ingreso['concepto'] = 'ingreso de efectivo, plan pago: ' + str(datos['plan_pago'].plan_pago_id)
campos_ingreso['monto'] = pp_add.monto
campos_ingreso['created_at'] = datos['created_at']
campos_ingreso['updated_at'] = datos['updated_at']
campos_ingreso['venta_plan_pago_id'] = pp_add.plan_pago_pago_id
# registramos
ci_controller.add_db(**campos_ingreso)
self.error_operation = ''
return True
except Exception as ex:
self.error_operation = 'error de argumentos, ' + str(ex)
print('ERROR registros add pago de plan de pago, '+str(ex))
return False
def can_anular(self, id, user):
"""verificando si se puede eliminar o no la tabla"""
# puede anular el usuario con permiso de la sucursal
usuario_perfil = UsersPerfiles.objects.get(user_id=user)
punto = Puntos.objects.get(pk=usuario_perfil.punto_id)
permisos = get_permissions_user(user, settings.MOD_PLAN_PAGOS)
# registro
plan_pago_pago = PlanPagosPagos.objects.get(pk=id)
if plan_pago_pago.status_id.status_id == self.anulado:
self.error_operation = 'el registro ya esta anulado'
return False
# registro de la misma sucursal
plan_pago = PlanPagos.objects.get(pk=plan_pago_pago.plan_pago_id.plan_pago_id)
plan_pago_punto = Puntos.objects.get(pk=plan_pago.punto_id)
if plan_pago_punto.sucursal_id == punto.sucursal_id:
# verificamos si es plan de pagos, y no pago ninguna cuota
if permisos.anular:
return True
return False
def anular(self, request, id):
"""anulando el registro"""
try:
if self.can_anular(id, request.user):
status_anular = self.status_anulado
motivo_a = validate_string('motivo anula', request.POST['motivo_anula'], remove_specials='yes')
campos_update = {}
# para actualizar el stock
user_perfil = UsersPerfiles.objects.get(user_id=request.user)
campos_update['user_perfil_id'] = user_perfil
campos_update['user_perfil_id_anula'] = user_perfil.user_perfil_id
campos_update['motivo_anula'] = motivo_a
campos_update['status_id'] = status_anular
campos_update['deleted_at'] = 'now'
if self.anular_db(id, **campos_update):
self.error_operation = ''
return True
else:
return False
else:
self.error_operation = 'No tiene permiso para anular este pago'
return False
except Exception as ex:
print('Error anular pago: ' + str(ex))
self.error_operation = 'Error al anular el pago, ' + str(ex)
return False
def anular_db(self, id, **datos):
""" anulamos en la bd """
try:
with transaction.atomic():
campos_update = {}
campos_update['user_perfil_id_anula'] = datos['user_perfil_id_anula']
campos_update['motivo_anula'] = datos['motivo_anula']
campos_update['status_id'] = datos['status_id']
campos_update['deleted_at'] = datos['deleted_at']
# registramos
pp_pago_update = PlanPagosPagos.objects.filter(pk=id)
pp_pago_update.update(**campos_update)
# anulamos el registro de caja
ci_controller = CajasIngresosController()
status_activo = self.status_activo
caja_ingreso = CajasIngresos.objects.get(venta_plan_pago_id=id, status_id=status_activo)
ci_controller.delete_db(caja_ingreso.caja_ingreso_id, **campos_update)
# actaulizamos el plan de pagos
pp_pago = PlanPagosPagos.objects.get(pk=id)
plan_pago = PlanPagos.objects.get(pk=pp_pago.plan_pago_id.plan_pago_id)
plan_pago.saldo = plan_pago.saldo + pp_pago.monto
plan_pago.updated_at = 'now'
plan_pago.save()
self.error_operation = ''
return True
except Exception as ex:
print('Error anular pago de plan pago db: ' + str(ex))
self.error_operation = 'Error de argumentos, ' + str(ex)
return
def get_detalles(self, plan_pago_id):
"""devolvemos los detalles del plan de pagos"""
plan_pagos_detalles = []
try:
plan_pago = PlanPagos.objects.get(pk=int(plan_pago_id))
status_cuota_pendiente = self.status_cuota_pendiente
plan_pagos_detalles = PlanPagosDetalles.objects.filter(plan_pago_id=plan_pago, status_id=status_cuota_pendiente).order_by('fecha')
return plan_pagos_detalles
except Exception as e:
print('error al recuperar plan pagos detalles: ' + str(plan_pago_id) + ', ' + str(e))
return plan_pagos_detalles
def get_pagos_realizados(self, plan_pago_id):
"""devolvemos los pagos realizados del plan de pagos"""
pagos = []
try:
plan_pago = PlanPagos.objects.get(pk=int(plan_pago_id))
#status_activo = Status.objects.get(pk=self.activo)
status_cuota_pagada = Status.objects.get(pk=self.cuota_pagada)
pagos = PlanPagosPagos.objects.filter(plan_pago_id=plan_pago).order_by('fecha')
return pagos
except Exception as e:
print('error al recuperar los pagos del plan pagos: ' + str(plan_pago_id) + ', ' + str(e))
return pagos
def get_numero_cuota(self, plan_pago_id):
"""devuelve el numero de cuota que esta pagando"""
plan_pago = PlanPagos.objects.get(pk=int(plan_pago_id))
status_cuota_pagada = Status.objects.get(pk=self.cuota_pagada)
listado = PlanPagosPagos.objects.filter(plan_pago_id=plan_pago, status_id=status_cuota_pagada)
cantidad = 0
if listado:
cantidad = listado.count() + 1
else:
cantidad = 1
return cantidad
| [
"[email protected]"
] | |
d597659a2088ec54a3ecda166c0eeca50dc549df | 42ed6d4e67172522f79ab6f3c8cb650f4234be90 | /zjgg_project/zjgg_main_thread.py | 7dcb5cb76edbc368a89f0eb5e87fd46f4fbfc044 | [] | no_license | Hylan129/Self-Learning | 81a5143015850c33d5226c4da43d110150661dc7 | 06ccdc202f62629395900658909e1363a32632fd | refs/heads/master | 2020-06-12T19:45:50.479677 | 2020-05-26T15:38:26 | 2020-05-26T15:38:26 | 194,405,633 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,134 | py | #!/usr/bin/env python
# coding=utf-8
import time,threading
import encodings.idna
import navigation_model_thread,sixmic_control,human_sensor
#position_list = ['11','12','13','14','15','16','17','18']
#position_list = ['4','56','2']
position_list = ['890','test','2','zd','zd2','sys']
"""
"LOCALIZATION_FAILED":"定位失败",
"GOAL_NOT_SAFE","目的地有障碍物"
"TOO_CLOSE_TO_OBSTACLES":"离障碍物太近",
"UNREACHABLE":"目的地无法到达",
"REACHED":"已到达目的地",
"HEADING":"正在前往目的地",
"PLANNING":"正在规划路径",
"UNREACHED":"到达目的地附近,目的地有障碍物"
"""
text_list = ['呈现在您面前的,是本馆的重点展品之一——在2001年9月十一日恐怖袭击中倒塌的美国纽约世界贸易中心钢构件残骸。本展品长二点五米,宽一点七米,高零点八五米,重约3吨,属于世贸北塔顶部天线八边形部分,位于铭牌所指示的红圈位置,由美国纽约与新泽西港务局无偿捐赠本馆。世贸中心的倒塌一度让人们对钢结构的安全性和可靠性产生怀疑,我们展示这件钢构件的目的,一是谴责恐怖主义,二是要澄清人们对钢结构认识上的误区。因为“9·11恐怖袭击”是一次极端事件,事后的调查表明,无论建筑使用的是钢结构还是其他材料,均难以承受如此猛烈的撞击和如此高强度的燃烧,恰恰因为钢结构的良好表现,为撞击部位以下的人员逃生争取到更多的时间,北塔和南塔在遭受撞击后仍然分别坚持了一百零三分钟和五十七分钟。2014年在世贸中心原址附近落成的世贸中心1号楼,主体仍然采用钢结构,再次证明了人们对钢结构的信心。',
'您背后的展墙,是新的里程碑板块,讲述第二次世界大战到20世纪末钢结构在世界各国的普遍应用。如美国的圣路易斯拱门、加拿大蒙特利尔世博会的美国馆、澳大利亚的悉尼歌剧院、法国的蓬皮杜国家艺术文化中心、日本的福冈体育馆等,当然也包括纽约世界贸易中心。这些地标建筑,展示着钢结构在人类生活中越来越广泛的应用,印证着世界工业文明发展的新的辉煌成就。',
'讲解完毕,小派在这停留三分钟,三分钟之后小派将带大家去下一个讲解点呢。',
'新中国成立以后,中国的钢铁工业从废墟中起步,但由于钢铁资源的短缺,仅在一些重大工程上,如武汉长江大桥、人民大会堂等使用了钢结构。改革开放以后,中国的钢结构产业进入逐渐发展期,截至二十世纪末,中国陆续建成了深圳发展中心、深圳地王大厦、上海金茂大厦等标志性钢结构建筑。最初,这些建筑由外国人设计,用外国的钢材,在外国加工,中国的企业只是承担施工,到后来,越来越多的钢结构建筑由中国人设计,用国产钢材,在国内加工。中国的钢结构产业沿着正确的轨道奋起直追。',
'二十一世纪堪称钢结构的世纪,新千年以来,世界各地不断涌现出新的钢结构建筑和桥梁,钢结构高度、跨度和精度的纪录不断刷新。在您右侧,通过三个屏幕展示这一时期的钢结构建设成就。左侧屏介绍的是2000年以来世界范围内钢结构经典建筑,如目前世界最高的哈利法塔,高度达到八百二十八米;中间屏介绍的是本世纪前十年中国的钢结构建设成就,包括上海环球金融中心、北京国家体育场和中央电视台、武汉火车站等;右侧屏则是2010年以来中国建成的钢结构建筑和桥梁,如深圳宝安国际机场T3航站楼、上海中心大厦、深圳平安金融中心等。中国的高端钢结构工程从设计到钢材供应、构件加工、现场施工已全部实现国产化,而且,钢结构乃至整个建筑业的技术水平已进入世界前列。',
'您现在进入本馆的科技厅。在这一部分,我们以科技为主线,介绍钢结构体系、设计、制造、安装、防腐、防火、防震、检测、监测等内容,同时也追溯这些技术的演进过程。您现在穿行在一座钢桥上,它是不是有点像上海的外白渡桥?在钢桥的两侧,我们以多媒体搭配模型的方式,重点介绍8种重要的结构体系。它们是:立体桁架结构、单层刚架、框架结构、框架-支撑结构、框架-筒体结构、巨型框架-筒体-支撑结构、索结构、网架结构。',
]
time_list = [0.3 * len(time_) for time_ in text_list]
def zjgg_xunhang():
try:
while True:
for go_point,text_point,time_point in zip(position_list,text_list,time_list):
navigation_model_thread.navigation_position(go_point)
while True:
if(navigation_model_thread.navigation_value =='REACHED'):
break
#if(navigation_model_thread.navigation_value =='UNREACHED'):
# navigation_model_thread.navigation_position(go_point)
time.sleep(1)
print(navigation_model_thread.navigation_value,navigation_model_thread.statuscode_value)
time.sleep(2)
sixmic_control.send(sixmic_control.text_broadcast(text_point))
time.sleep(time_point)
except Exception as e:
with open('err.txt','a') as code:
code.write(str(e) + '\n')
def monitor_notice():
while True:
if(navigation_model_thread.navigation_value in ["HEADING","UNREACHABLE", "PLANNING"]):
if (navigation_model_thread.statuscode_value == 701):
if(human_sensor.humansensor_value == human_sensor.human):
sixmic_control.send(sixmic_control.text_broadcast('您好!请借过一下!'))
human_sensor.red_shanshuo()
if __name__ == '__main__':
try:
sixmic_control.port_open()
human_sensor.port_open()
i =3
while(i):
sixmic_control.send(sixmic_control.buildShakePacket())
i -= 1
t1 = threading.Thread(target = zjgg_xunhang)
t2 = threading.Thread(target = human_sensor.humansensor_status)
t3 = threading.Thread(target = navigation_model_thread.status_status_monitor,args = (navigation_model_thread.url[1],))
t4 = threading.Thread(target = navigation_model_thread.status_navigtion_monitor,args = (navigation_model_thread.url[3],))
t5 = threading.Thread(target = monitor_notice)
#t6 = threading.Thread(target = human_coming_notice)
Threads = [t1,t2,t3,t4,t5]
for t in Threads:
t.start()
except Exception as e:
with open('zjgg_err.txt','a') as code:
code.write(str(e) + 'zjgg_err \n')
| [
"[email protected]"
] | |
a4c4a72eccc102761fa23a6b05f2d184b7d7e6bd | a7058080e41af37eb77c146fc09a5e4db57f7ec6 | /Solved/03955/03955.py | d1035b0c9ea3a2a50d96aab7660b7c8fea1e9062 | [] | no_license | Jinmin-Goh/BOJ_PS | bec0922c01fbf6e440589cc684d0cd736e775066 | 09a285bd1369bd0d73f86386b343d271dc08a67d | refs/heads/master | 2022-09-24T02:24:50.823834 | 2022-09-21T02:16:22 | 2022-09-21T02:16:22 | 223,768,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | # Problem No.: 3955
# Solver: Jinmin Goh
# Date: 20200611
# URL: https://www.acmicpc.net/problem/3955
import sys
import math
# expanded euclidean algorithm
def expGCD(a: int, b: int) -> (int,):
if b == 0:
return (a, 1, 0)
temp = expGCD(b, a % b)
#print(a, b, temp)
x, y = temp[1], temp[2]
return (temp[0], y, x - (y * (a // b)))
# find solution of kx + 1 = cy, (k, c, x, y are all positive int)
# -kx + cy = 1 or kx + cy = 1 when x is negative int
def main():
t = int(input())
for _ in range(t):
k, c = map(int, sys.stdin.readline().split())
# exception for c = 1 case
if c == 1:
if k + 1 > 10 ** 9:
print("IMPOSSIBLE")
else:
print(k + 1)
continue
ans = expGCD(k, c)
# if gcd(k, c) != 1
if ans[0] != 1:
print("IMPOSSIBLE")
continue
# general solution: x = x0 + c * t / y = y0 - k * t
# 0 > x and y > 0; x0 + c * t < 0 and y0 - k * t > 0
# t < min(-x0 / c, y0 / k)
# y <= 10 ** 9, k * t >= y0 - 10 ** 9
x0 = ans[1]
y0 = ans[2]
maxVal = math.floor(min(-(x0 / c), y0 / k))
minVal = y0 - 10 ** 9
if minVal > (maxVal * k):
print("IMPOSSIBLE")
else:
print(y0 - k * maxVal)
return
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
424f1ec6d08235b7758bbc7d66f4b0c9f69eac2f | 7da5ac719e4c9ca9cb3735d0ade3106183d96ffe | /Projeto/IoTcity_services/server/server/mainserver/forms.py | 1f441056b6edc243773d544b0f8e9e7759395fbb | [] | no_license | shanexia1818/IoTCity | a405c0921b417e5bb0a61966f9ca03a1f87147a7 | 3fe14b6918275684291f969fd6c3f69a7ee14a4c | refs/heads/master | 2020-08-07T21:08:38.811470 | 2018-09-10T11:10:56 | 2018-09-10T11:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,687 | py | from django.forms.extras.widgets import SelectDateWidget
import datetime
from django import forms
from models import Alarm
class ChoiceFieldNoValidation(forms.MultipleChoiceField):
def validate(self, value):
pass
class ActuatorForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
senders = kwargs.pop('senders')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['streams'].choices = senders
super(ActuatorForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
streams = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple)
value = forms.FloatField(initial=0, required=True)
def clean(self):
cleaned_data = super(ActuatorForm, self).clean()
if len(cleaned_data['streams'])==0:
raise forms.ValidationError("Select at least one stream")
return cleaned_data
class RuleForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
senders = kwargs.pop('senders')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['streams'].choices = senders
super(RuleForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
name = forms.CharField(max_length=50, required=True)
mo = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-mon2','type':'checkbox'}), initial=False, required=False)
tu = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-tue2','type':'checkbox'}), initial=False, required=False)
we = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-wed2','type':'checkbox'}), initial=False, required=False)
th = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-thu2','type':'checkbox'}), initial=False, required=False)
fr = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-fri2','type':'checkbox'}), initial=False, required=False)
sa = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sat2','type':'checkbox'}), initial=False, required=False)
su = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sun2','type':'checkbox'}), initial=False, required=False)
streams = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple)
value = forms.FloatField(initial=0, required=True)
beg_hour = forms.IntegerField(max_value=23, min_value=0)
beg_min = forms.IntegerField(max_value=59, min_value=0)
end_hour = forms.IntegerField(max_value=23, min_value=0)
end_min = forms.IntegerField(max_value=59, min_value=0)
hours_active_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_active_beg = forms.IntegerField(max_value=59, min_value=0)
def clean(self):
cleaned_data = super(RuleForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['beg_hour']
end_hour = cleaned_data['end_hour']
beg_min = cleaned_data['beg_min']
end_min = cleaned_data['end_min']
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
if len(cleaned_data['streams'])==0:
raise forms.ValidationError("Select at least one stream")
return cleaned_data
class AlarmForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
subscription_list = kwargs.pop('subscriptions')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['subscriptions'].choices = subscription_list
super(AlarmForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
name = forms.CharField(max_length=50, required=True)
mo = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-mon','type':'checkbox'}), initial=False, required=False)
tu = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-tue','type':'checkbox'}), initial=False, required=False)
we = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-wed','type':'checkbox'}), initial=False, required=False)
th = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-thu','type':'checkbox'}), initial=False, required=False)
fr = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-fri','type':'checkbox'}), initial=False, required=False)
sa = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sat','type':'checkbox'}), initial=False, required=False)
su = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sun','type':'checkbox'}), initial=False, required=False)
threshold = forms.FloatField()
beg_hour = forms.IntegerField(max_value=23, min_value=0)
beg_min = forms.IntegerField(max_value=59, min_value=0)
end_hour = forms.IntegerField(max_value=23, min_value=0)
end_min = forms.IntegerField(max_value=59, min_value=0)
hours_active_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_active_beg = forms.IntegerField(max_value=59, min_value=0)
hours_active_end = forms.IntegerField(max_value=23, min_value=0)
minutes_active_end = forms.IntegerField(max_value=59, min_value=0)
subscriptions = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple, required=True)
type_alarm = forms.ChoiceField(choices=(('MAX', 'Maximum'), ('MIN', 'Minimum'), ), widget=forms.RadioSelect)
def clean(self):
cleaned_data = super(AlarmForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['beg_hour']
end_hour = cleaned_data['end_hour']
beg_min = cleaned_data['beg_min']
end_min = cleaned_data['end_min']
if len(cleaned_data['subscriptions'])==0:
raise forms.ValidationError("Select at least one subscription")
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
return cleaned_data
class NoteForm(forms.Form):
title = forms.CharField()
message = forms.CharField(widget=forms.Textarea, max_length=250)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
hours_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_beg = forms.IntegerField(max_value=59, min_value=0)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
hours_end = forms.IntegerField(max_value=23, min_value=0)
minutes_end = forms.IntegerField(max_value=59, min_value=0)
def clean(self):
cleaned_data = super(NoteForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['hours_beg']
end_hour = cleaned_data['minutes_beg']
beg_min = cleaned_data['hours_end']
end_min = cleaned_data['minutes_end']
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
return cleaned_data
| [
"[email protected]"
] | |
82a5cad0e8235b738e0ae0c95dbc93eac5cf2b79 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-tpu/samples/generated_samples/tpu_v2alpha1_generated_tpu_list_nodes_sync.py | 68731aff1186d3aae6599641746cbe2858fc12db | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,822 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListNodes
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-tpu
# [START tpu_v2alpha1_generated_Tpu_ListNodes_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import tpu_v2alpha1
def sample_list_nodes():
# Create a client
client = tpu_v2alpha1.TpuClient()
# Initialize request argument(s)
request = tpu_v2alpha1.ListNodesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_nodes(request=request)
# Handle the response
for response in page_result:
print(response)
# [END tpu_v2alpha1_generated_Tpu_ListNodes_sync]
| [
"[email protected]"
] | |
cbcf2654adc49ace4c41edf30bb75817dccb968a | e13bccceb4c2fefbf8000f9b34195ab434cf1656 | /build/rosmsg/catkin_generated/generate_cached_setup.py | ad97943d630b6f759446bf143ff4e688185e10d0 | [] | no_license | QuiN-cy/vacuum-en-band | ab59b718f289ad4e8a1f29e96724250b00bd894d | 48c296199b4a6ade40e084c9980d53ba1611a344 | refs/heads/master | 2023-06-01T12:13:38.664849 | 2021-06-11T15:42:42 | 2021-06-11T15:42:42 | 376,071,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/student/rosws/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/student/rosws/devel/.private/rosmsg/env.sh')
output_filename = '/home/student/rosws/build/rosmsg/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
826c8d7292e9111db0edd35e191bb14eaab8fb39 | 87bfb9fb074b9b0dcbfeb09a6ab4db3aef813114 | /docs/source/conf.py | eef313a8f41fed1affcc7955073a69c9b5f200b8 | [
"MIT"
] | permissive | djones1040/panstamps | 931005219954690f8e7b9bf3c17bc191a22bfac6 | b9e67b4dc168846ddb36e4b5f143c136660a0535 | refs/heads/master | 2020-12-09T13:26:43.259596 | 2020-01-22T19:19:26 | 2020-01-22T19:19:26 | 233,318,818 | 0 | 0 | MIT | 2020-01-12T00:52:14 | 2020-01-12T00:52:13 | null | UTF-8 | Python | false | false | 14,345 | py | # -*- coding: utf-8 -*-
#
# panstamps documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 29 15:00:29 2016.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# -- Allow Markdown -----------------------------------------------------
# source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
moduleDirectory = os.path.dirname(os.path.realpath(__file__))
exec(open(moduleDirectory + "/../../panstamps/__version__.py").read())
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', 'sphinx.ext.graphviz']
# Generate Summaries
autosummary_generate = True
# Show Todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime, date, time
now = datetime.now()
now = now.strftime("%Y")
project = u'panstamps'
copyright = u'%(now)s, Dave Young' % locals()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "v" + __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates',
'**__version__.py', '**setup.py', 'api/panstamps.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ["panstamps."]
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/thespacedoctor_icon_white_circle.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
html_add_permalinks = u" ∞"
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
html_help_basename = 'panstampsdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'panstamps.tex', u'panstamps Documentation',
u'Dave Young', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_images/thespacedoctor_icon_dark.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'panstamps', u'panstamps Documentation',
[u'Dave Young'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'panstamps', u'panstamps Documentation',
u'Dave Young', 'panstamps', 'A CL-Util to download stacked and/or warp image stamps from the STScI PanSTARRS image server',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Added to the start of every source file
# rst_prolog = """
# """
# The name of the default domain
primary_domain = "py"
trim_footnote_reference_space = True
def updateUsageRST():
from panstamps import cl_utils
usage = cl_utils.__doc__
if not "Usage:" in usage or "todo:" in usage:
return None
usageString = ""
for l in usage.split("\n"):
usageString += " " + l + "\n"
usage = """Command-Line Usage
==================
.. code-block:: bash
%(usageString)s""" % locals()
moduleDirectory = os.path.dirname(__file__)
uFile = moduleDirectory + "/_includes/usage.rst"
exists = os.path.exists(uFile)
if exists:
import codecs
writeFile = codecs.open(uFile, encoding='utf-8', mode='w')
writeFile.write(usage)
writeFile.close()
return None
updateUsageRST()
def generateAutosummaryIndex():
import panstamps
import inspect
import os.path
import time
# CHECK FOR LAST MODIFIED TIME - DON'T UPDATE IF < 5 SEC
# autobuild GOES INTO INFINITE LOOP OTHERWISE
moduleDirectory = os.path.dirname(__file__)
file = moduleDirectory + "/autosummary.rst"
exists = os.path.exists(file)
if not exists:
pathToWriteFile = file
try:
writeFile = open(pathToWriteFile, 'w')
writeFile.write("")
writeFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToWriteFile,)
raise IOError(message)
now = time.time()
delta = now - os.path.getmtime(file)
if delta < 5:
return None
# GET ALL SUBPACKAGES
allSubpackages = ["panstamps"]
allSubpackages += findAllSubpackges(
pathToPackage="panstamps"
)
# INSPECT TO FIND ALL MODULES, CLASSES AND FUNCTIONS
allModules = []
allClasses = []
allFunctions = []
for sp in allSubpackages:
for name, obj in inspect.getmembers(__import__(sp, fromlist=[''])):
if inspect.ismodule(obj):
if name in ["numpy"]:
continue
thisMod = sp + "." + name
if thisMod not in allSubpackages and len(name) and name[0:2] != "__" and name[-5:] != "tests" and name != "cl_utils" and name != "utKit":
allModules.append(sp + "." + name)
for spm in allSubpackages + allModules:
for name, obj in inspect.getmembers(__import__(spm, fromlist=[''])):
if inspect.isclass(obj):
thisClass = spm + "." + name
if (thisClass == obj.__module__ or spm == obj.__module__) and len(name) and name[0:2] != "__":
allClasses.append(thisClass)
if inspect.isfunction(obj):
thisFunction = spm + "." + name
if (spm == obj.__module__ or obj.__module__ == thisFunction) and len(name) and name != "main" and name[0:2] != "__":
allFunctions.append(thisFunction)
allSubpackages = allSubpackages[1:]
allSubpackages.sort(reverse=False)
allModules.sort()
allClasses.sort()
allFunctions.sort()
allSubpackages = ("\n ").join(allSubpackages)
allModules = ("\n ").join(allModules)
allClasses = ("\n ").join(allClasses)
allFunctions = ("\n ").join(allFunctions)
# FOR SUBPACKAGES USE THE SUBPACKAGE TEMPLATE INSTEAD OF DEFAULT MODULE
# TEMPLATE
thisText = u""
if len(allSubpackages):
thisText += """
Subpackages
-----------
.. autosummary::
:toctree: _autosummary
:nosignatures:
:template: autosummary/subpackage.rst
%(allSubpackages)s
""" % locals()
if len(allModules):
thisText += """
Modules
-------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allModules)s
""" % locals()
if len(allClasses):
thisText += """
Classes
-------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allClasses)s
""" % locals()
if len(allFunctions):
thisText += """
Functions
---------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allFunctions)s
""" % locals()
import codecs
moduleDirectory = os.path.dirname(__file__)
writeFile = codecs.open(
moduleDirectory + "/autosummary.rst", encoding='utf-8', mode='w')
writeFile.write(thisText)
writeFile.close()
import re
regex = re.compile(r'\n\s*.*?utKit\.utKit(\n|$)', re.I)
allClasses = regex.sub("\n", allClasses)
classAndFunctions = u"""
**Classes**
.. autosummary::
:nosignatures:
%(allClasses)s
**Functions**
.. autosummary::
:nosignatures:
%(allFunctions)s
""" % locals()
moduleDirectory = os.path.dirname(__file__)
writeFile = codecs.open(
moduleDirectory + "/classes_and_functions.rst", encoding='utf-8', mode='w')
writeFile.write(classAndFunctions)
writeFile.close()
return thisText
def findAllSubpackges(
pathToPackage
):
import pkgutil
importedPackage = __import__(
pathToPackage, fromlist=[''])
subPackages = []
for importer, modname, ispkg in pkgutil.walk_packages(importedPackage.__path__, prefix=importedPackage.__name__ + '.',
onerror=lambda x: None):
if ispkg and "tests" != modname[-5:] and "._" not in modname and ".tests." not in modname:
subPackages.append(modname)
return subPackages
autosummaryText = generateAutosummaryIndex()
# use the tab-trigger below for new function
# xt-def-with-logger
# Add substitutions here
rst_epilog = u"""
.. |tsd| replace:: thespacedoctor
""" % locals()
| [
"[email protected]"
] | |
79fa9bcaa7dd16ce5f84b87faa09734698925d58 | 9f53fdce8e10d648776719eec72d99b140343fff | /Section_1_Creating_GUIs_in_Python_with_Tkinter/Video2_5_GUI_tkinter_another_button.py | 1ffbd17db377c4915a825bef6d07e4d6f7ec376a | [] | no_license | syurskyi/Hands-On_Python_3_x_GUI_Programming | 9691d3ccbb3c3d3ffdec2184789cb62753e840d1 | c5144a5b90a036992e56de51c3d61d8c8f3cd2c1 | refs/heads/master | 2020-12-05T05:38:14.441849 | 2020-01-06T04:13:52 | 2020-01-06T04:13:52 | 232,022,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | import tkinter as tk # alias as tk
from tkinter import ttk # themed tk
gui = tk.Tk() # create class instance
gui.geometry('400x200+300+300') # specify window width, height and position
gui.title('GUI written in tkinter') # give the GUI a window title
gui.iconbitmap('py.ico') # icon expected inside the same folder
def click_event(): # call back function
gui.title('Button has been clicked') # update window title
button_one.config(text='I have been clicked!') # update button text
another_button = ttk.Button(gui, text="Another") # create another button
another_button.pack()
button_one = ttk.Button(gui, text="Click Me", command=click_event) # define command
button_one.pack()
gui.mainloop() # run main event loop
| [
"[email protected]"
] | |
57c54c1e797cfc9801c23e3f63fd88a11b98ede7 | 39bef50ed12468e57ad94a8e2551da6c7c45c8ed | /networkx/__init__.py | 4fad5ac5632b45550b2f08346b5bbf9e1fce22e0 | [] | no_license | biancini/Rorschach-Test-Platform | b1a5dfdbe5a15a68ce4dcf66887346fbf2e94169 | 7ae68e1054637046278325eaa419b23f09b420d3 | refs/heads/master | 2020-05-17T11:00:13.889678 | 2012-04-11T16:31:19 | 2012-04-11T16:31:19 | 3,789,381 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | """
NetworkX
========
NetworkX (NX) is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
https://networkx.lanl.gov/
Using
-----
Just write in Python
>>> import networkx as nx
>>> G=nx.Graph()
>>> G.add_edge(1,2)
>>> G.add_node("spam")
>>> print(G.nodes())
[1, 2, 'spam']
>>> print(G.edges())
[(1, 2)]
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
# Add platform dependent shared library path to sys.path
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 6):
m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
#These are import orderwise
from networkx.exception import *
from networkx import externalnx
from networkx import utils
# these packages work with Python >= 2.6
from networkx import classes
from networkx.classes import *
from networkx import convert
from networkx.convert import *
from networkx import relabel
from networkx.relabel import *
from networkx import generators
from networkx.generators import *
from networkx import readwrite
from networkx.readwrite import *
#Need to test with SciPy, when available
from networkx import algorithms
from networkx.algorithms import *
from networkx import linalg
from networkx.linalg import *
from networkx import drawing
from networkx.drawing import *
| [
"[email protected]"
] | |
1e33f94cd5e9126f1ef3513c2f0ad2a8358e625a | 0db67bff1f2dcdadecf635ae535add91cb54c4f3 | /PythonBasis/week01/task09.py | 85b6f0425d5ec02973a882d9ad950f451b84752b | [] | no_license | pavelbrnv/Coursera | 713fdb79dbf6fbde405fc991bd67db0cab30da00 | cc568f79229147866ff1df8539cf8ea66dc9ccca | refs/heads/master | 2023-03-07T23:21:09.685318 | 2021-02-22T15:08:27 | 2021-02-22T15:08:27 | 336,600,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | n = int(input())
v1 = n // 100
v2 = (n // 10) % 10
v3 = n % 10
print(v1 + v2 + v3)
| [
"[email protected]"
] | |
8872d3ac88ca46efd100864bc26ca5e79959ead5 | e425b9d1e837e39a2e73b7e8d18452deb903ce10 | /01_Fundamentals/Loops/EnumeratedList.py | 1f98d917e4633b3577d2d3aea2c60155da529f6e | [] | no_license | sokuro/PythonBFH | 1491a398c5a9930c454e96ad8834dea066bf82bf | 595ea77712c2397d9d92b1e21841e917d0a0c24d | refs/heads/master | 2021-09-15T21:47:13.613134 | 2018-06-11T13:39:58 | 2018-06-11T13:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | class EnumeratedList:
values = [1, 2, 3, 4, 5, 6]
# inputValue = input("Enter a value: ")
found_index = None
for index, value in enumerate(values):
if value == 5:
found_index = index
print('The value is in the Array')
break
print('The value is not in the array!')
print('The value\'s index is: ', found_index) | [
"[email protected]"
] | |
354e712282e44463be244eef28d59e535d34af94 | e01c5d1ee81cc4104b248be375e93ae29c4b3572 | /Sequence4/DS/Week4/phone-store-1.py | 0ee5e7dbd3e5090265334e2f9ae5dc50d307def2 | [] | no_license | lalitzz/DS | 7de54281a34814601f26ee826c722d123ee8bd99 | 66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1 | refs/heads/master | 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | # python3
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
self.prev = None
class HashNode:
def __init__(self):
self.head = None
self.tail = None
class HashMap:
def __init__(self, size=16):
self.size = size
self.hash = [HashNode()] * size
def add(self, key, value):
node = Node(key, value)
if type(key) is str:
index = self.hash_str_fn(key)
elif type(key) is int:
index = self.hash_function(key)
head = self.hash[index].head
if head is None:
self.hash[index].head = node
else:
prev = None
while head is not None:
if head.key == key:
head.value = value
break
prev = head
head = head.next
if head is None:
prev.next = node
def get(self, key):
if type(key) is str:
index = self.hash_str_fn(key)
elif type(key) is int:
index = self.hash_function(key)
head = self.hash[index].head
while head is not None:
if head.key == key:
return head.value
head = head.next
return "not found"
def delete(self, key):
index = self.hash_function(key)
curr = self.hash[index].head
prev = None
while curr is not None:
if curr.key == key:
if prev is None:
self.hash[index].head = curr.next
else:
prev.next = curr.next
break
prev = curr
curr = curr.next
def hash_function(self, data):
a = 34
b = 2
index = (a * data + b)
p = len(str(index)) - 1
p = 10 ** p + 19
index %= p
return index % self.size
def hash_str_fn(self, data):
h = 0
n = len(data)
x = 31
p = 119
for i in range(n-1, -1, -1):
h += ((h * x) + ord(data[i]))
h %= p
return h % self.size
class Query:
def __init__(self, query):
self.type = query[0]
self.number = int(query[1])
if self.type == 'add':
self.name = query[2]
def read_queries():
n = int(input())
return [Query(input().split()) for i in range(n)]
def write_responses(result):
print('\n'.join(result))
def process_queries_naive(queries):
result = []
# Keep list of all existing (i.e. not deleted yet) contacts.
contacts = []
for cur_query in queries:
if cur_query.type == 'add':
# if we already have contact with such number,
# we should rewrite contact's name
for contact in contacts:
if contact.number == cur_query.number:
contact.name = cur_query.name
break
else: # otherwise, just add it
contacts.append(cur_query)
elif cur_query.type == 'del':
for j in range(len(contacts)):
if contacts[j].number == cur_query.number:
contacts.pop(j)
break
else:
response = 'not found'
for contact in contacts:
if contact.number == cur_query.number:
response = contact.name
break
result.append(response)
return result
def process_queries(queries):
# for cur_query in queries:
n = len(queries)
H = HashMap(n)
result = []
for cur_query in queries:
if cur_query.type == 'add':
H.add(cur_query.number, cur_query.name)
elif cur_query.type == 'del':
H.delete(cur_query.number)
elif cur_query.type == 'find':
result.append(H.get(cur_query.number))
return result
if __name__ == '__main__':
write_responses(process_queries(read_queries()))
| [
"[email protected]"
] | |
d731f8d490604c02544f4dd74a06e34559eac977 | 5effafa500f8cf8fb262eb4cfec90dcec3190c52 | /WritingTestFunctions/setup.py | 008dd143db31f0476f56ea6e02b223293f4dd9a0 | [] | no_license | Twishar/PythonQA | d5eaf733caac42dc99492a4c92745f478850f5fb | cd25792934c5ad32d89ec08d3b6cdec714327a40 | refs/heads/master | 2020-03-23T18:30:07.460168 | 2018-11-06T19:41:20 | 2018-11-06T19:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py |
from setuptools import setup
setup(
name='tasks',
py_modules=['tasks']
)
| [
"[email protected]"
] | |
4f636103e9a8e14b99aa497586bf76c87afb7f13 | 1af6958461af6257264ace2a6d13385b47104606 | /pyscf/cc/ccsd_rdm.py | a7194be9df572fba1adb568c5b1e75e070f45624 | [
"Apache-2.0"
] | permissive | tmash/pyscf | ac9a86c078170044b52be71e5d00fa5f680f55af | 89c101c1c963e8247808635c61cd165bffab42d6 | refs/heads/master | 2020-12-04T04:41:23.456744 | 2020-01-02T18:05:16 | 2020-01-02T18:05:16 | 231,615,690 | 1 | 0 | Apache-2.0 | 2020-01-03T15:33:33 | 2020-01-03T15:33:32 | null | UTF-8 | Python | false | false | 20,803 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.cc import ccsd
#
# JCP, 95, 2623
# JCP, 95, 2639
#
def _gamma1_intermediates(mycc, t1, t2, l1, l2):
nocc, nvir = t1.shape
doo =-numpy.einsum('ja,ia->ij', t1, l1)
dvv = numpy.einsum('ia,ib->ab', t1, l1)
xtv = numpy.einsum('ie,me->im', t1, l1)
dvo = t1.T - numpy.einsum('im,ma->ai', xtv, t1)
theta = t2 * 2 - t2.transpose(0,1,3,2)
doo -= lib.einsum('jkab,ikab->ij', theta, l2)
dvv += lib.einsum('jica,jicb->ab', theta, l2)
xt1 = lib.einsum('mnef,inef->mi', l2, theta)
xt2 = lib.einsum('mnaf,mnef->ea', l2, theta)
dvo += numpy.einsum('imae,me->ai', theta, l1)
dvo -= numpy.einsum('mi,ma->ai', xt1, t1)
dvo -= numpy.einsum('ie,ae->ai', t1, xt2)
dov = l1
return doo, dov, dvo, dvv
# gamma2 intermediates in Chemist's notation
def _gamma2_intermediates(mycc, t1, t2, l1, l2, compress_vvvv=False):
f = lib.H5TmpFile()
_gamma2_outcore(mycc, t1, t2, l1, l2, f, compress_vvvv)
d2 = (f['dovov'].value, f['dvvvv'].value, f['doooo'].value, f['doovv'].value,
f['dovvo'].value, None, f['dovvv'].value, f['dooov'].value)
return d2
def _gamma2_outcore(mycc, t1, t2, l1, l2, h5fobj, compress_vvvv=False):
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nov = nocc * nvir
nvir_pair = nvir * (nvir+1) //2
dtype = numpy.result_type(t1, t2, l1, l2).char
if compress_vvvv:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), dtype)
else:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir,nvir,nvir,nvir), dtype)
dovvo = h5fobj.create_dataset('dovvo', (nocc,nvir,nvir,nocc), dtype,
chunks=(nocc,1,nvir,nocc))
fswap = lib.H5TmpFile()
time1 = time.clock(), time.time()
pvOOv = lib.einsum('ikca,jkcb->aijb', l2, t2)
moo = numpy.einsum('dljd->jl', pvOOv) * 2
mvv = numpy.einsum('blld->db', pvOOv) * 2
gooov = lib.einsum('kc,cija->jkia', t1, pvOOv)
fswap['mvOOv'] = pvOOv
pvOOv = None
pvoOV = -lib.einsum('ikca,jkbc->aijb', l2, t2)
theta = t2 * 2 - t2.transpose(0,1,3,2)
pvoOV += lib.einsum('ikac,jkbc->aijb', l2, theta)
moo += numpy.einsum('dljd->jl', pvoOV)
mvv += numpy.einsum('blld->db', pvoOV)
gooov -= lib.einsum('jc,cika->jkia', t1, pvoOV)
fswap['mvoOV'] = pvoOV
pvoOV = None
mia =(numpy.einsum('kc,ikac->ia', l1, t2) * 2
- numpy.einsum('kc,ikca->ia', l1, t2))
mab = numpy.einsum('kc,kb->cb', l1, t1)
mij = numpy.einsum('kc,jc->jk', l1, t1) + moo*.5
tau = numpy.einsum('ia,jb->ijab', t1, t1)
tau += t2
goooo = lib.einsum('ijab,klab->ijkl', tau, l2)*.5
h5fobj['doooo'] = (goooo.transpose(0,2,1,3)*2 -
goooo.transpose(0,3,1,2)).conj()
gooov += numpy.einsum('ji,ka->jkia', -.5*moo, t1)
gooov += lib.einsum('la,jkil->jkia', 2*t1, goooo)
gooov -= lib.einsum('ib,jkba->jkia', l1, tau)
gooov = gooov.conj()
gooov -= lib.einsum('jkba,ib->jkia', l2, t1)
h5fobj['dooov'] = gooov.transpose(0,2,1,3)*2 - gooov.transpose(1,2,0,3)
tau = goovo = None
time1 = log.timer_debug1('rdm intermediates pass1', *time1)
goovv = numpy.einsum('ia,jb->ijab', mia.conj(), t1.conj())
max_memory = max(0, mycc.max_memory - lib.current_memory()[0])
unit = nocc**2*nvir*6
blksize = min(nocc, nvir, max(ccsd.BLKMIN, int(max_memory*.95e6/8/unit)))
doovv = h5fobj.create_dataset('doovv', (nocc,nocc,nvir,nvir), dtype,
chunks=(nocc,nocc,1,nvir))
log.debug1('rdm intermediates pass 2: block size = %d, nvir = %d in %d blocks',
blksize, nvir, int((nvir+blksize-1)/blksize))
for p0, p1 in lib.prange(0, nvir, blksize):
tau = numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
tau += t2[:,:,p0:p1]
tmpoovv = lib.einsum('ijkl,klab->ijab', goooo, tau)
tmpoovv -= lib.einsum('jk,ikab->ijab', mij, tau)
tmpoovv -= lib.einsum('cb,ijac->ijab', mab, t2[:,:,p0:p1])
tmpoovv -= lib.einsum('bd,ijad->ijab', mvv*.5, tau)
tmpoovv += .5 * tau
tmpoovv = tmpoovv.conj()
tmpoovv += .5 * l2[:,:,p0:p1]
goovv[:,:,p0:p1] += tmpoovv
pvOOv = fswap['mvOOv'][p0:p1]
pvoOV = fswap['mvoOV'][p0:p1]
gOvvO = lib.einsum('kiac,jc,kb->iabj', l2[:,:,p0:p1], t1, t1)
gOvvO += numpy.einsum('aijb->iabj', pvOOv)
govVO = numpy.einsum('ia,jb->iabj', l1[:,p0:p1], t1)
govVO -= lib.einsum('ikac,jc,kb->iabj', l2[:,:,p0:p1], t1, t1)
govVO += numpy.einsum('aijb->iabj', pvoOV)
dovvo[:,p0:p1] = 2*govVO + gOvvO
doovv[:,:,p0:p1] = (-2*gOvvO - govVO).transpose(3,0,1,2).conj()
gOvvO = govVO = None
tau -= t2[:,:,p0:p1] * .5
for q0, q1 in lib.prange(0, nvir, blksize):
goovv[:,:,q0:q1,:] += lib.einsum('dlib,jlda->ijab', pvOOv, tau[:,:,:,q0:q1]).conj()
goovv[:,:,:,q0:q1] -= lib.einsum('dlia,jldb->ijab', pvoOV, tau[:,:,:,q0:q1]).conj()
tmp = pvoOV[:,:,:,q0:q1] + pvOOv[:,:,:,q0:q1]*.5
goovv[:,:,q0:q1,:] += lib.einsum('dlia,jlbd->ijab', tmp, t2[:,:,:,p0:p1]).conj()
pvOOv = pvoOV = tau = None
time1 = log.timer_debug1('rdm intermediates pass2 [%d:%d]'%(p0, p1), *time1)
h5fobj['dovov'] = goovv.transpose(0,2,1,3) * 2 - goovv.transpose(1,2,0,3)
goovv = goooo = None
max_memory = max(0, mycc.max_memory - lib.current_memory()[0])
unit = max(nocc**2*nvir*2+nocc*nvir**2*3,
nvir**3*2+nocc*nvir**2*2+nocc**2*nvir*2)
blksize = min(nvir, max(ccsd.BLKMIN, int(max_memory*.9e6/8/unit)))
iobuflen = int(256e6/8/blksize)
log.debug1('rdm intermediates pass 3: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
dovvv = h5fobj.create_dataset('dovvv', (nocc,nvir,nvir,nvir), dtype,
chunks=(nocc,min(nocc,nvir),1,nvir))
time1 = time.clock(), time.time()
for istep, (p0, p1) in enumerate(lib.prange(0, nvir, blksize)):
l2tmp = l2[:,:,p0:p1]
gvvvv = lib.einsum('ijab,ijcd->abcd', l2tmp, t2)
jabc = lib.einsum('ijab,ic->jabc', l2tmp, t1)
gvvvv += lib.einsum('jabc,jd->abcd', jabc, t1)
l2tmp = jabc = None
if compress_vvvv:
# symmetrize dvvvv because it does not affect the results of ccsd_grad
# dvvvv = gvvvv.transpose(0,2,1,3)-gvvvv.transpose(0,3,1,2)*.5
# dvvvv = (dvvvv+dvvvv.transpose(0,1,3,2)) * .5
# dvvvv = (dvvvv+dvvvv.transpose(1,0,2,3)) * .5
# now dvvvv == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
vvv = gvvvv[i].conj().transpose(1,0,2)
tmp[:] = vvv - vvv.transpose(2,1,0)*.5
lib.pack_tril(tmp+tmp.transpose(0,2,1), out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
tmp = tmpvvvv = None
else:
for i in range(p0, p1):
vvv = gvvvv[i-p0].conj().transpose(1,0,2)
dvvvv[i] = vvv - vvv.transpose(2,1,0)*.5
gvovv = lib.einsum('adbc,id->aibc', gvvvv, -t1)
gvvvv = None
gvovv += lib.einsum('akic,kb->aibc', fswap['mvoOV'][p0:p1], t1)
gvovv -= lib.einsum('akib,kc->aibc', fswap['mvOOv'][p0:p1], t1)
gvovv += lib.einsum('ja,jibc->aibc', l1[:,p0:p1], t2)
gvovv += lib.einsum('ja,jb,ic->aibc', l1[:,p0:p1], t1, t1)
gvovv += numpy.einsum('ba,ic->aibc', mvv[:,p0:p1]*.5, t1)
gvovv = gvovv.conj()
gvovv += lib.einsum('ja,jibc->aibc', t1[:,p0:p1], l2)
dovvv[:,:,p0:p1] = gvovv.transpose(1,3,0,2)*2 - gvovv.transpose(1,2,0,3)
gvvov = None
time1 = log.timer_debug1('rdm intermediates pass3 [%d:%d]'%(p0, p1), *time1)
fswap = None
dvvov = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], dvvov , h5fobj['dovvv'], h5fobj['dooov'])
def make_rdm1(mycc, t1, t2, l1, l2, ao_repr=False):
'''
Spin-traced one-particle density matrix in MO basis (the occupied-virtual
blocks from the orbital response contribution are not included).
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
return _make_rdm1(mycc, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(mycc, t1, t2, l1, l2):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
f = lib.H5TmpFile()
d2 = _gamma2_outcore(mycc, t1, t2, l1, l2, f, False)
return _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True)
def _make_rdm1(mycc, d1, with_frozen=True, ao_repr=False):
'''dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
doo, dov, dvo, dvv = d1
nocc, nvir = dov.shape
nmo = nocc + nvir
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo + doo.conj().T
dm1[:nocc,nocc:] = dov + dvo.conj().T
dm1[nocc:,:nocc] = dm1[:nocc,nocc:].conj().T
dm1[nocc:,nocc:] = dvv + dvv.conj().T
dm1[numpy.diag_indices(nocc)] += 2
if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
nmo = mycc.mo_occ.size
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 2
moidx = numpy.where(mycc.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
if ao_repr:
mo = mycc.mo_coeff
dm1 = lib.einsum('pi,ij,qj->pq', mo, dm1, mo.conj())
return dm1
# Note vvvv part of 2pdm have been symmetrized. It does not correspond to
# vvvv part of CI 2pdm
def _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True):
r'''
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
nocc, nvir = dovov.shape[:2]
nmo = nocc + nvir
dm2 = numpy.empty((nmo,nmo,nmo,nmo), dtype=doovv.dtype)
dovov = numpy.asarray(dovov)
dm2[:nocc,nocc:,:nocc,nocc:] = dovov
dm2[:nocc,nocc:,:nocc,nocc:]+= dovov.transpose(2,3,0,1)
dm2[nocc:,:nocc,nocc:,:nocc] = dm2[:nocc,nocc:,:nocc,nocc:].transpose(1,0,3,2).conj()
dovov = None
doovv = numpy.asarray(doovv)
dm2[:nocc,:nocc,nocc:,nocc:] = doovv
dm2[:nocc,:nocc,nocc:,nocc:]+= doovv.transpose(1,0,3,2).conj()
dm2[nocc:,nocc:,:nocc,:nocc] = dm2[:nocc,:nocc,nocc:,nocc:].transpose(2,3,0,1)
doovv = None
dovvo = numpy.asarray(dovvo)
dm2[:nocc,nocc:,nocc:,:nocc] = dovvo
dm2[:nocc,nocc:,nocc:,:nocc]+= dovvo.transpose(3,2,1,0).conj()
dm2[nocc:,:nocc,:nocc,nocc:] = dm2[:nocc,nocc:,nocc:,:nocc].transpose(1,0,3,2).conj()
dovvo = None
if len(dvvvv.shape) == 2:
# To handle the case of compressed vvvv, which is used in nuclear gradients
dvvvv = ao2mo.restore(1, dvvvv, nvir)
dm2[nocc:,nocc:,nocc:,nocc:] = dvvvv
dm2[nocc:,nocc:,nocc:,nocc:]*= 4
else:
dvvvv = numpy.asarray(dvvvv)
dm2[nocc:,nocc:,nocc:,nocc:] = dvvvv
dm2[nocc:,nocc:,nocc:,nocc:]+= dvvvv.transpose(1,0,3,2).conj()
dm2[nocc:,nocc:,nocc:,nocc:]*= 2
dvvvv = None
doooo = numpy.asarray(doooo)
dm2[:nocc,:nocc,:nocc,:nocc] = doooo
dm2[:nocc,:nocc,:nocc,:nocc]+= doooo.transpose(1,0,3,2).conj()
dm2[:nocc,:nocc,:nocc,:nocc]*= 2
doooo = None
dovvv = numpy.asarray(dovvv)
dm2[:nocc,nocc:,nocc:,nocc:] = dovvv
dm2[nocc:,nocc:,:nocc,nocc:] = dovvv.transpose(2,3,0,1)
dm2[nocc:,nocc:,nocc:,:nocc] = dovvv.transpose(3,2,1,0).conj()
dm2[nocc:,:nocc,nocc:,nocc:] = dovvv.transpose(1,0,3,2).conj()
dovvv = None
dooov = numpy.asarray(dooov)
dm2[:nocc,:nocc,:nocc,nocc:] = dooov
dm2[:nocc,nocc:,:nocc,:nocc] = dooov.transpose(2,3,0,1)
dm2[:nocc,:nocc,nocc:,:nocc] = dooov.transpose(1,0,3,2).conj()
dm2[nocc:,:nocc,:nocc,:nocc] = dooov.transpose(3,2,1,0).conj()
if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
nmo, nmo0 = mycc.mo_occ.size, nmo
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm2 = numpy.zeros((nmo,nmo,nmo,nmo), dtype=dm2.dtype)
moidx = numpy.where(mycc.get_frozen_mask())[0]
idx = (moidx.reshape(-1,1) * nmo + moidx).ravel()
lib.takebak_2d(rdm2.reshape(nmo**2,nmo**2),
dm2.reshape(nmo0**2,nmo0**2), idx, idx)
dm2 = rdm2
if with_dm1:
dm1 = _make_rdm1(mycc, d1, with_frozen)
dm1[numpy.diag_indices(nocc)] -= 2
for i in range(nocc):
dm2[i,i,:,:] += dm1 * 2
dm2[:,:,i,i] += dm1 * 2
dm2[:,i,i,:] -= dm1
dm2[i,:,:,i] -= dm1.T
for i in range(nocc):
for j in range(nocc):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
# dm2 was computed as dm2[p,q,r,s] = < p^\dagger r^\dagger s q > in the
# above. Transposing it so that it be contracted with ERIs (in Chemist's
# notation):
# E = einsum('pqrs,pqrs', eri, rdm2)
return dm2.transpose(1,0,3,2)
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf.cc import ccsd
from pyscf import ao2mo
mol = gto.M()
mf = scf.RHF(mol)
mcc = ccsd.CCSD(mf)
numpy.random.seed(2)
nocc = 5
nmo = 12
nvir = nmo - nocc
eri0 = numpy.random.random((nmo,nmo,nmo,nmo))
eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)
fock0 = numpy.random.random((nmo,nmo))
fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
l1 = numpy.random.random((nocc,nvir))
l2 = numpy.random.random((nocc,nocc,nvir,nvir))
l2 = l2 + l2.transpose(1,0,3,2)
h1 = fock0 - (numpy.einsum('kkpq->pq', eri0[:nocc,:nocc])*2
- numpy.einsum('pkkq->pq', eri0[:,:nocc,:nocc]))
eris = lambda:None
eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()
eris.ooov = eri0[:nocc,:nocc,:nocc,nocc:].copy()
eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()
eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()
eris.ovov = eri0[:nocc,nocc:,:nocc,nocc:].copy()
eris.ovvo = eri0[:nocc,nocc:,nocc:,:nocc].copy()
eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:].copy()
eris.vvvv = eri0[nocc:,nocc:,nocc:,nocc:].copy()
eris.fock = fock0
doo, dov, dvo, dvv = _gamma1_intermediates(mcc, t1, t2, l1, l2)
print((numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc]))*2+20166.329861034799)
print((numpy.einsum('ab,ab', dvv, fock0[nocc:,nocc:]))*2-58078.964019246778)
print((numpy.einsum('ai,ia', dvo, fock0[:nocc,nocc:]))*2+74994.356886784764)
print((numpy.einsum('ia,ai', dov, fock0[nocc:,:nocc]))*2-34.010188025702391)
fdm2 = lib.H5TmpFile()
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \
_gamma2_outcore(mcc, t1, t2, l1, l2, fdm2, True)
print('dovov', lib.finger(numpy.array(dovov)) - -14384.907042073517)
print('dvvvv', lib.finger(numpy.array(dvvvv)) - -25.374007033024839)
print('doooo', lib.finger(numpy.array(doooo)) - 60.114594698129963)
print('doovv', lib.finger(numpy.array(doovv)) - -79.176348067958401)
print('dovvo', lib.finger(numpy.array(dovvo)) - 9.864134457251815)
print('dovvv', lib.finger(numpy.array(dovvv)) - -421.90333700061342)
print('dooov', lib.finger(numpy.array(dooov)) - -592.66863759586136)
fdm2 = None
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \
_gamma2_intermediates(mcc, t1, t2, l1, l2)
print('dovov', lib.finger(numpy.array(dovov)) - -14384.907042073517)
print('dvvvv', lib.finger(numpy.array(dvvvv)) - 45.872344902116758)
print('doooo', lib.finger(numpy.array(doooo)) - 60.114594698129963)
print('doovv', lib.finger(numpy.array(doovv)) - -79.176348067958401)
print('dovvo', lib.finger(numpy.array(dovvo)) - 9.864134457251815)
print('dovvv', lib.finger(numpy.array(dovvv)) - -421.90333700061342)
print('dooov', lib.finger(numpy.array(dooov)) - -592.66863759586136)
print('doooo',numpy.einsum('kilj,kilj', doooo, eris.oooo)*2-15939.9007625418)
print('dvvvv',numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2-37581.823919588 )
print('dooov',numpy.einsum('jkia,jkia', dooov, eris.ooov)*2-128470.009687716)
print('dovvv',numpy.einsum('icba,icba', dovvv, eris.ovvv)*2+166794.225195056)
print('dovov',numpy.einsum('iajb,iajb', dovov, eris.ovov)*2+719279.812916893)
print('dovvo',numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2
+numpy.einsum('jiab,jiba', doovv, eris.oovv)*2+53634.0012286654)
dm1 = make_rdm1(mcc, t1, t2, l1, l2)
dm2 = make_rdm2(mcc, t1, t2, l1, l2)
e2 =(numpy.einsum('ijkl,ijkl', doooo, eris.oooo)*2
+numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2
+numpy.einsum('jkia,jkia', dooov, eris.ooov)*2
+numpy.einsum('icba,icba', dovvv, eris.ovvv)*2
+numpy.einsum('iajb,iajb', dovov, eris.ovov)*2
+numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2
+numpy.einsum('ijab,ijab', doovv, eris.oovv)*2
+numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc])*2
+numpy.einsum('ia,ia', dov, fock0[:nocc,nocc:])*2
+numpy.einsum('ai,ai', dvo, fock0[nocc:,:nocc])*2
+numpy.einsum('ab,ab', dvv, fock0[nocc:,nocc:])*2
+fock0[:nocc].trace()*2
-numpy.einsum('kkpq->pq', eri0[:nocc,:nocc,:nocc,:nocc]).trace()*2
+numpy.einsum('pkkq->pq', eri0[:nocc,:nocc,:nocc,:nocc]).trace())
print(e2+794721.197459942)
print(numpy.einsum('pqrs,pqrs', dm2, eri0)*.5 +
numpy.einsum('pq,qp', dm1, h1) - e2)
print(numpy.allclose(dm2, dm2.transpose(1,0,3,2)))
print(numpy.allclose(dm2, dm2.transpose(2,3,0,1)))
d1 = numpy.einsum('kkpq->qp', dm2) / 9
print(numpy.allclose(d1, dm1))
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run()
mycc = ccsd.CCSD(mf)
mycc.frozen = 2
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1 = make_rdm1(mycc, t1, t2, l1, l2)
dm2 = make_rdm2(mycc, t1, t2, l1, l2)
nmo = mf.mo_coeff.shape[1]
eri = ao2mo.kernel(mf._eri, mf.mo_coeff, compact=False).reshape([nmo]*4)
hcore = mf.get_hcore()
h1 = reduce(numpy.dot, (mf.mo_coeff.T, hcore, mf.mo_coeff))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
print(e1 - mycc.e_tot)
| [
"[email protected]"
] | |
4217f994c31b7b12ff06588625da2958e56654c7 | 4e62fcb385d9e8a6af0c6c9ec315f803d6ea190b | /testsuite/modulegraph-dir/pkg_a/__init__.py | 92d13dbaf80c3574728b3e5bc549e10a72675df8 | [
"MIT"
] | permissive | ronaldoussoren/modulegraph2 | 8d8a18b472574acc158c5c293ae4ed7b88f06ba9 | 227954f5037e291edc91e666f21bda44fd66fcb2 | refs/heads/master | 2023-09-01T05:16:44.873049 | 2023-04-09T10:28:19 | 2023-04-09T10:28:19 | 231,953,118 | 12 | 7 | MIT | 2023-04-09T10:29:06 | 2020-01-05T17:36:35 | C | UTF-8 | Python | false | false | 20 | py | from pkg_b import *
| [
"[email protected]"
] | |
df877881b18b0ebf0f407c54d2688ad61f7978b0 | babc3e26d66a8084c9f84a0431338bafabae6ffd | /TaeJuneJoung/PGM/LV2/lv2.스킬트리.py | 1a01d1426dfd38787b4de87af83c634a98096016 | [] | no_license | hoteldelluna/AlgoStudy | 5c23a1bfb07dbfbabc5bedd541d61784d58d3edc | 49ec098cecf2b775727d5648161f773e5488089b | refs/heads/dev | 2022-10-09T14:29:00.580834 | 2020-01-25T14:40:55 | 2020-01-25T14:40:55 | 201,632,052 | 5 | 0 | null | 2020-01-25T14:40:57 | 2019-08-10T13:11:41 | Python | UTF-8 | Python | false | false | 947 | py | def solution(skill, skill_trees):
answer = 0
for skill_tree in skill_trees:
stack = []
for tree in skill_tree:
if tree in set(skill):
idx = skill.index(tree)
if idx not in set(stack):
stack.append(idx)
isPlus = True
check = [False] * len(skill)
for i in stack:
check[i] = True
if check[:i].count(False):
isPlus = False
break
if isPlus:
answer += 1
return answer
"""
다른 사람 풀이]
Python의 `for~else`문 사용
"""
def solution(skill, skill_trees):
answer = 0
for skills in skill_trees:
skill_list = list(skill)
for s in skills:
if s in skill:
if s != skill_list.pop(0):
break
else:
answer += 1
return answer | [
"[email protected]"
] | |
a26ac469f2e087d4ceb54f3a8c82131f7bb8ad8c | da1a2d3b92e3cf8720712c82089cbc665087e355 | /test/functional/tool_wallet.py | 7baf87adb65072eef96f33c23d9a49b7e188dea5 | [
"MIT"
] | permissive | minblock/cureoptedcoin | a60e961cef536704023ff962a09c56fe25eee66e | 72909b7b1df261f840d24ecd8aa083fc9b7b7b49 | refs/heads/master | 2021-05-20T23:55:51.062222 | 2020-04-23T06:55:30 | 2020-04-23T06:55:30 | 252,460,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,837 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/cureoptedcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 0)
assert_equal(stderr, '')
assert_equal(stdout, output)
def run_test(self):
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` is an error. Use `bitcoin-wallet -help`
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
# stop the node to close the wallet to call info command
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
# mutate the wallet to check the info command output changes accordingly
self.start_node(0)
self.nodes[0].generate(1)
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
self.start_node(0, ['-wallet=foo'])
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
if __name__ == '__main__':
ToolWalletTest().main()
| [
"[email protected]"
] | |
8e5a14bb33047f99ee33e2a1ebb2ca9463f8df99 | 9a9d6052f8cf91dd57be9a9b6564290b0fac9e52 | /Algorithm/BOJ/2578_빙고.py | ec26caeef3194e552ab218e9a14f36e953527244 | [] | no_license | Gyeong-Yeon/TIL | 596ec6a093eec34a17dad68bcd91fa9dd08690e8 | eb1f43ee0525da93233b70716cd35caab8d82bda | refs/heads/master | 2023-03-31T19:56:30.979062 | 2021-03-28T13:09:27 | 2021-03-28T13:09:27 | 280,307,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | def game():
cnt = 0
for i in range(5):
for j in range(5):
for t in range(5):
for b in range(5):
if call[i][j] == bingo[t][b]:
bingo[t][b] = 0
cnt += 1
if count() >= 3:
return cnt
def count():
bingo_cnt = 0
for i in range(5): # 가로 빙고
zero_cnt = 0
for j in range(5):
if bingo[i][j] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
for i in range(5): # 세로 빙고
zero_cnt = 0
for j in range(5):
if bingo[j][i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
zero_cnt = 0
for i in range(5): # 대각선(/) 빙고
if bingo[i][4-i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
zero_cnt = 0
for i in range(5): # 대각선(\) 빙고
if bingo[i][i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
return bingo_cnt
bingo = [list(map(int,input().split())) for _ in range(5)]
call = [list(map(int,input().split())) for _ in range(5)]
result = game()
print(result) | [
"[email protected]"
] | |
835e103ddf2573f4b477b0c6d50490420a6cebea | dadef11410227993876f4019868587cde2716b53 | /crawling5.py | eb2f819303851f9827b640d666b088daa78bd483 | [] | no_license | ss820938ss/pythonProject_pandas | 6185f3e3af5402202ee80c8d941d6c5afd8c1298 | aa076f4c74ad11ceac90f0f85be8ea24a53f71f0 | refs/heads/master | 2023-07-07T05:11:06.565796 | 2021-07-30T07:51:58 | 2021-07-30T07:51:58 | 389,851,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | import requests
from bs4 import BeautifulSoup
import time
from urllib.request import urlretrieve
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
test = input('검색할 이름을 입력하세요 : ')
path = 'C:/chromedriver_win32/chromedriver'
driver = webdriver.Chrome(path)
driver.get('https://unsplash.com/')
time.sleep(1)
element = driver.find_element_by_name('searchKeyword')
element.send_keys(test, Keys.ENTER)
# image_link = driver.find_element_by_link_text('이미지') # 구글, 네이버
# image_link.click() # 구글, 네이버
# 구글용
# image_tag = driver.find_elements_by_tag_name('span > div > div > div > a > div > img')
# num = 10,000,000
# x = driver.find_elements_by_class_name('xLon9')
time.sleep(5)
driver.find_element_by_class_name('_2Mc8_').send_keys(Keys.ENTER)
# 숫자 비교 후 출력 테스트
# link = data.select_one('li.detail > a').attrs['href']
link = driver.find_elements_by_css_selector('href')
webpage = requests.get("https://unsplash.com/photos/" + link)
soup = BeautifulSoup(webpage.content, "html.parser")
time.sleep(10)
driver.find_element_by_xpath('/html/body/div[4]/div/div/div[1]/button').send_keys(Keys.ENTER)
# image_tag = driver.find_elements_by_class_name('oCCRx')
# # 뷰 클래스 코드 xLon9 / oCCRx _2Mc8_ / /html/body/div[4]/div/div/div[4]/div/div/div[1]/div[4]/div[1]/div[1]/span
#
# time.sleep(1)
#
# image_list = []
#
# for i in range(len(image_tag)):
# image_list.append(image_tag[i].get_attribute('src'))
# print(image_list)
#
# for i, link in enumerate(image_list):
# urlretrieve(link, './images/{}{}.jpg'.format(test, i + 1))
| [
"[email protected]"
] | |
306fd591700f130d7b6b11935a1b7b57f6924123 | 3e14ded9e00d482a5bbfee039b9d740bd0c44f08 | /venv/bin/easy_install-3.9 | 4eaef0f12e22d9b328f5fc4ebda52ecf93b0b2de | [
"MIT"
] | permissive | Rubyroy12/neighbourinfo | 7ebeb6d3ae12711c755baa0ea7adce8b58a28b55 | 619e6e7b20c7f06310d55c3922d9372c0e2455f7 | refs/heads/master | 2023-06-25T12:41:30.685708 | 2021-07-26T23:16:59 | 2021-07-26T23:16:59 | 388,932,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | 9 | #!/home/ibrahim/Documents/python/django-projects/mysource/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
f7fdabe13a91972e0ba4375d7e08dc1b9be65333 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/system_body/java/body/father_temp.py | a6eb560f9ba8cf2cebc95d13377303821828e3ce | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
namespace CafeT.Azures
{
public static class AzureTranslator
{
/// Demonstrates getting an access token and using the token to translate.
///
//Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
public static async Task<string> TranslateAsync(string text)
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
private const string SubscriptionKey = "11785aecda97606d15245d044954311a";
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
if(text.Contains("?vn"))
{
return translatorService.Translate(token, text, "en", "vi", "text/plain", "general", string.Empty);
}
else
{
return translatorService.Translate(token, text, "vi", "en", "text/plain", "general", string.Empty);
}
}
}
}
| [
"[email protected]"
] | |
af63fff10857b872190df0cceb777ccee45b30e3 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/show_applicable_instances_response.py | 93201cb81aa67c68a0fbe46d6680ca8486bcf088 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,337 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowApplicableInstancesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instances': 'list[ApplicableInstanceRsp]',
'count': 'int'
}
attribute_map = {
'instances': 'instances',
'count': 'count'
}
def __init__(self, instances=None, count=None):
"""ShowApplicableInstancesResponse
The model defined in huaweicloud sdk
:param instances: 实例列表
:type instances: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
:param count: 应用参数的实例数量限制。
:type count: int
"""
super(ShowApplicableInstancesResponse, self).__init__()
self._instances = None
self._count = None
self.discriminator = None
if instances is not None:
self.instances = instances
if count is not None:
self.count = count
@property
def instances(self):
"""Gets the instances of this ShowApplicableInstancesResponse.
实例列表
:return: The instances of this ShowApplicableInstancesResponse.
:rtype: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
"""
return self._instances
@instances.setter
def instances(self, instances):
"""Sets the instances of this ShowApplicableInstancesResponse.
实例列表
:param instances: The instances of this ShowApplicableInstancesResponse.
:type instances: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
"""
self._instances = instances
@property
def count(self):
"""Gets the count of this ShowApplicableInstancesResponse.
应用参数的实例数量限制。
:return: The count of this ShowApplicableInstancesResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ShowApplicableInstancesResponse.
应用参数的实例数量限制。
:param count: The count of this ShowApplicableInstancesResponse.
:type count: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowApplicableInstancesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
672aa5c05e36cc0c720a7e2b514fab148751f8c8 | 633eadb2ff150378fa44c14c010d5b657643ac55 | /utils/utils_glue.py | bdb509d788b0c523437391140e1e2eb8098de6cc | [
"MIT"
] | permissive | ankitvad/control-sum-cmdp | 4666e6bd0965de35fea6291ca8d01ff8bd860a96 | 5181e8e0c9bf6bef48f66457e06d3f398f4a428a | refs/heads/main | 2023-07-06T12:32:44.251961 | 2021-08-10T02:21:03 | 2021-08-10T02:21:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,262 | py | # This code is adapted from https://github.com/huggingface/pytorch-transformers
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
import re
import json
from os.path import join
import torch
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class JsonDataProcessor(object):
def _read_jsons_from_split(self, split_dir, label, set_type):
n_data = self._count_data(split_dir)
examples = []
for i in range(n_data):
js = json.load(open(join(split_dir, '{}.json'.format(i))))
if js['abstract']:
guid = None
abstract = js['abstract']
abstract_str = ' '.join(abstract).lower()
text_a = abstract_str
text_b = None
label = label
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _create_examples(self, split_dir, set_types):
examples = []
examples += self._read_jsons_from_split(split_dir, )
return examples
def _count_data(path):
""" count number of data in the given path"""
matcher = re.compile(r'[0-9]+\.json')
match = lambda name: bool(matcher.match(name))
names = os.listdir(path)
n_data = len(list(filter(match, names)))
return n_data
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_tensors_for_bert_seq_classify(examples, max_seq_length,
tokenizer,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=None))
all_input_ids_tensor = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask_tensor = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids_tensor = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
return all_input_ids_tensor, all_input_mask_tensor, all_segment_ids_tensor
def convert_examples_to_tensors_for_bert_qa(examples, max_seq_length,
tokenizer,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=None))
all_input_ids_list = [f.input_ids for f in features]
all_input_ids_tensor = torch.tensor(all_input_ids_list, dtype=torch.long)
all_input_mask_tensor = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids_tensor = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
return all_input_ids_tensor, all_input_mask_tensor, all_segment_ids_tensor, all_input_ids_list
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
| [
"[email protected]"
] | |
4db1ec489f51cd3f3ea0f26805ae9a0150a40fc4 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/core/pipeline/pipe_step.py | bd7fcb560b23ca6db99fa3f70a0385e5a571bdda | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 2,329 | py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PipeStep that used in Pipeline."""
import logging
from datetime import datetime
from vega.common import TaskOps, Status
from vega.common import ClassFactory, ClassType
from vega.core.pipeline.conf import PipeStepConfig
from vega.report import ReportServer
__all__ = ["PipeStep"]
logger = logging.getLogger(__name__)
class PipeStep(object):
"""PipeStep is the base components class that can be added in Pipeline."""
def __init__(self, name=None, **kwargs):
"""Initialize pipestep."""
self.task = TaskOps()
self.name = name if name else "pipestep"
self.start_time = datetime.now()
self.status = Status.unstarted
self.message = None
self.end_time = None
self.num_epochs = None
self.num_models = None
def __new__(cls, *args, **kwargs):
"""Create pipe step instance by ClassFactory."""
t_cls = ClassFactory.get_cls(ClassType.PIPE_STEP, PipeStepConfig.type)
return super().__new__(t_cls)
def do(self, *args, **kwargs):
"""Do the main task in this pipe step."""
pass
def save_info(self):
"""Save step info to report serve."""
info = {"step_name": self.name}
for attr in dir(self):
if attr in ["start_time", "end_time", "status", "message", "num_epochs", "num_models"]:
info[attr] = getattr(self, attr)
ReportServer().update_step_info(**info)
def update_status(self, status, desc=None):
"""Update step status."""
if status == Status.finished:
self.end_time = datetime.now()
self.status = status
self.message = desc
self.save_info()
| [
"[email protected]"
] | |
5eb0ccd5fb5a39b7282d2f257ac24c4ca8adeb32 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/earthborn.py | 6aeec1e8898441c7bd480aeb68160d440923dec9 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 81 | py | ii = [('RennJIT.py', 1), ('BailJD1.py', 1), ('NortSTC.py', 1), ('WordWYR.py', 1)] | [
"[email protected]"
] | |
c3ec46d15bd7840421b521daa2c180b6373eb77e | 05bdaafff13ec39f6120f4da5e09ffbb58505e85 | /main.py | 1ecb20e40e62d361d41c2d9c9262a50f8b2c8028 | [] | no_license | mcfletch/pyconca-tictactoe | 852c9d42283cfda3eaf25b0445584a35d854f241 | c14dc7cbff5c87f78edeff551d7a47ff9738b7dc | refs/heads/master | 2020-04-05T21:43:16.050727 | 2018-12-24T02:17:32 | 2018-12-24T02:17:32 | 157,230,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,402 | py | #! /usr/bin/env python
import gym
import numpy as np
import bisect
import random
import os
import argparse
from collections import deque
from keras.models import Model
from keras.layers import (
Dense,
Input,
Dropout,
Activation,
)
def predict(model, state):
"""Predict a single state's future reward"""
state = np.array(state,'f').reshape((1,-1))
action_weights = model.predict(state)
return action_weights[0]
def build_model( env ):
"""Build a Q function that predicts reward for a given state
Note here that we see two *different* values showing up in the
result of the Q function. The argmax (highest value's index)
is the "action to take to maximize expected reward" while the
max (highest value) is loosely corresponding to "expected reward"
for the given state.
"""
initial = layer = Input(env.observation_space.shape)
for size in [63,15,]:
layer = Dense(size)(layer)
layer = Activation('relu')(layer)
layer = Dense(env.action_space.n)(layer)
layer = Activation('linear')(layer)
model = Model(initial,layer)
model.compile(
'adam',
'mse'
)
return model
def run_game( env, model, epoch=0, exploit=.9 ):
done = False
state = env.reset()
history = []
overall_reward = 0
choices = []
while not done:
if not epoch % 100:
env.render()
if np.random.random() > exploit:
action = env.action_space.sample()
random_trial = True
else:
state = np.array(state,'f').reshape((1,-1))
action_weights = predict( model, state)
action = np.argmax( action_weights )
random_trial = False
choices.append(action)
new_state,reward,done,_ = env.step(action)
overall_reward += reward
history.append({
'state': state,
'new_state': new_state,
'action': action,
'random_trial': random_trial,
'overall_reward': overall_reward,
'reward': reward,
'done': done,
})
state = new_state
# exploit *= max((.995,exploit*1.1))
# print('%s/%s chose 0'%(choices.count(0), len(choices)))
return history
def generate_batches(epoch_history, batch_size):
"""Key insight here:
Deep RL seems to want to have lots of very rapid feedback at the start
of the process, so during completely random search, we're looking to
push the weights around immediately, so while we normally (supervised
learning, etc) want to process big batches of lots of data, here we're
doing very small batches that *sample* across the whole data-set.
As we progress, we include the early trials in the set of sampled
data, so they will be sampled more frequently than the current values,
but they are not all sampled N times, they just have a higher sampling
frequency than the latest/most recent trials.
"""
yield random.sample(epoch_history, min([len(epoch_history),batch_size]))
def train_model( model, epoch_history, env, batch_size=64):
states = np.zeros((batch_size,)+env.observation_space.shape,'f')
actions = np.zeros((batch_size,env.action_space.n),'f')
for batch in generate_batches(epoch_history, batch_size):
if len(batch) < batch_size:
break
for index,record in enumerate(batch):
states[index] = record['state']
action_reward = predict(model,record['state'])
if not record['done']:
action_reward[record['action']] = record['reward'] + 1.0 * np.max(
predict(model,record['new_state'])
)
else:
# assert not np.max(action_reward) > 1.0, action_reward
action_reward[record['action']] = record['reward']
actions[index] = action_reward
model.fit(
states,
actions,
verbose=0
)
def verify(env, model):
history = run_game(env, model, epoch=0, exploit=1.0)
score = history[-1]['overall_reward']
return score
def run(env_name='CartPole-v1',initial_epsilon=0.995):
env = gym.make(env_name)
model = build_model( env )
filename = '%s-weights.hd5'%(env_name)
if os.path.exists(filename):
model.load_weights(filename)
scores = deque(maxlen=100)
overall_history = []
epsilon_decay = .02
epsilon_min = 0.05
epsilon_max = .995
epsilon = initial_epsilon
for epoch in range(10000):
epoch_scores = []
epsilon = np.max([
epsilon_min, np.min([
epsilon,
1.0 - np.log10((epoch + 1) * epsilon_decay ),
epsilon_max,
]),
])
exploit = 1.0- epsilon
# while len(overall_history) < :
history = run_game( env, model, epoch, exploit )
score = history[-1]['overall_reward']
scores.append(score)
overall_history.extend( history )
train_model( model, overall_history, env, batch_size=64 )
if not epoch % 100:
avg = np.mean(scores)
print('Avg Score on last 100 tests: ', avg)
if avg > 195:
print('Success at epoch %s'%(epoch,))
model.save_weights(filename)
verification = [
verify(env, model)
for i in range(20)
]
print('Verification: mean %s stddev=%s'%(
np.mean(verification),
np.std(verification),
))
return verification
def get_options():
parser = argparse.ArgumentParser(
description = 'Run a deep reinforcement learning task on an OpenAI environment',
)
parser.add_argument(
'-e','--environment',
default = 'CartPole-v1',
help = 'OpenAI Gym environment to run'
)
parser.add_argument(
'--epsilon',
default=.995,
help = 'Initial epsilon value (1 meaning "explore on every step" and 0 meaning "just exploit your knowledge")',
type=float,
)
return parser
def main():
parser = get_options()
options = parser.parse_args()
return run(options.environment,initial_epsilon=options.epsilon)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5e7a0532d00a852b74bc781bd6336d237945b66a | 4fc1c45a7e570cc1204d4b5f21150f0771d34ea5 | /quan_table/insightface_v2/model/mobilefacenetv2/mobilefacenetv2.py | 2b3de2bf5bc86f118bd6e4f60d1870d2ff1e9795 | [] | no_license | CN1Ember/feathernet_mine | 77d29576e4ecb4f85626b94e6ff5884216af3098 | ac0351f59a1ed30abecd1088a46c7af01afa29d5 | refs/heads/main | 2023-05-28T17:19:06.624448 | 2021-06-17T04:39:09 | 2021-06-17T04:39:09 | 374,603,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,110 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/27 16:00
# @Author : xiezheng
# @Site :
# @File : insightface_mobilefacenet.py
import math
from torch import nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchsummary import summary
from torch.nn import Parameter
from insightface_v2.utils.model_analyse import ModelAnalyse
from insightface_v2.utils.logger import get_logger
import os
class Bottleneck_mobilefacenet(nn.Module):
def __init__(self, in_planes, out_planes, stride, expansion):
super(Bottleneck_mobilefacenet, self).__init__()
self.connect = stride == 1 and in_planes == out_planes
planes = in_planes * expansion
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.prelu1 = nn.PReLU(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.prelu2 = nn.PReLU(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = self.prelu1(self.bn1(self.conv1(x)))
out = self.prelu2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.connect:
return x + out
else:
return out
class Mobilefacenetv2(nn.Module):
Mobilefacenet_bottleneck_setting = [
# [t, c , n ,s] = [expansion, out_planes, num_blocks, stride]
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1]
]
def __init__(self, bottleneck_setting=Mobilefacenet_bottleneck_setting, embedding_size=512):
super(Mobilefacenetv2, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.prelu1 = nn.PReLU(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, groups=64, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.prelu2 = nn.PReLU(64)
self.layers = self._make_layer(Bottleneck_mobilefacenet, bottleneck_setting)
self.conv3 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(512)
self.prelu3 = nn.PReLU(512)
self.conv4 = nn.Conv2d(512, 512, kernel_size=7, groups=512, stride=1, padding=0, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.linear = nn.Linear(512, embedding_size)
# self.bn5 = nn.BatchNorm1d(128, affine=False)
self.bn5 = nn.BatchNorm1d(embedding_size, affine=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
if m.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, setting):
layers = []
for t, c, n, s in setting:
for i in range(n):
if i == 0:
layers.append(block(self.inplanes, c, s, t))
else:
layers.append(block(self.inplanes, c, 1, t))
self.inplanes = c
return nn.Sequential(*layers)
def forward(self, x):
out = self.prelu1(self.bn1(self.conv1(x)))
out = self.prelu2(self.bn2(self.conv2(out)))
out = self.layers(out)
out = self.prelu3(self.bn3(self.conv3(out)))
out = self.bn4(self.conv4(out))
out = out.view(out.size(0), -1)
out = self.bn5(self.linear(out))
return out
if __name__ == "__main__":
model = Mobilefacenetv2(embedding_size=512)
# print(model.state_dict())
# print("---------------------")
# for key in model.state_dict().keys():
# print(key)
print(model)
# summary(model, (3, 112, 112))
save_path = './finetune-test'
if not os.path.exists(save_path):
os.makedirs(save_path)
logger = get_logger(save_path, "finetune-test")
test_input = torch.randn(1, 3, 112, 112)
model_analyse = ModelAnalyse(model, logger)
params_num = model_analyse.params_count()
flops = model_analyse.flops_compute(test_input)
count = 0
for module in model.modules():
if isinstance(module, nn.Conv2d):
count = count + 1
print("\nmodel layers_num = {}".format(count))
print("model size={} MB".format(params_num * 4 / 1024 / 1024))
print("model flops={} M".format(sum(flops) / (10 ** 6))) | [
"[email protected]"
] | |
05001ef31b85c2352c6b5b6b7b127b21b7df3d46 | 09a8d6e247e4575f09d68b0dae3cf0b94255717b | /Regex/Repetitions/MatchingEndingItems.py | fce374c054c87cd44c246818f371662993e789ec | [] | no_license | ShihabAhmed09/HackerRank-Solutions-Python | 5e648eac81b70b502d77aafb7cf676a72f2583c3 | dc1e25d726ed13e12da7f767e4b4acbadef2bd2a | refs/heads/main | 2023-02-15T05:51:47.753723 | 2021-01-05T18:02:14 | 2021-01-05T18:02:14 | 300,979,036 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | import re
Regex_Pattern = r'^[a-zA-Z]*s$'
print(str(bool(re.search(Regex_Pattern, input()))).lower())
| [
"[email protected]"
] | |
bf5f46916c88603682168907b9949179d8eb2f9a | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/KoubeiTradeVoucherItemTemplete.py | 2b672dc3dade37e8142b0e107898540b8ee370a2 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 8,921 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AvailableTimeInfo import AvailableTimeInfo
from alipay.aop.api.domain.KoubeiItemDescription import KoubeiItemDescription
from alipay.aop.api.domain.UnAvailableTimeInfo import UnAvailableTimeInfo
class KoubeiTradeVoucherItemTemplete(object):
def __init__(self):
self._available_time_info_list = None
self._buyer_notes = None
self._support_book = None
self._un_available_time_info_list = None
self._validity_period = None
self._validity_period_range_from = None
self._validity_period_range_to = None
self._validity_period_type = None
self._verify_enable_times = None
self._verify_frequency = None
@property
def available_time_info_list(self):
return self._available_time_info_list
@available_time_info_list.setter
def available_time_info_list(self, value):
if isinstance(value, list):
self._available_time_info_list = list()
for i in value:
if isinstance(i, AvailableTimeInfo):
self._available_time_info_list.append(i)
else:
self._available_time_info_list.append(AvailableTimeInfo.from_alipay_dict(i))
@property
def buyer_notes(self):
return self._buyer_notes
@buyer_notes.setter
def buyer_notes(self, value):
if isinstance(value, list):
self._buyer_notes = list()
for i in value:
if isinstance(i, KoubeiItemDescription):
self._buyer_notes.append(i)
else:
self._buyer_notes.append(KoubeiItemDescription.from_alipay_dict(i))
@property
def support_book(self):
return self._support_book
@support_book.setter
def support_book(self, value):
self._support_book = value
@property
def un_available_time_info_list(self):
return self._un_available_time_info_list
@un_available_time_info_list.setter
def un_available_time_info_list(self, value):
if isinstance(value, list):
self._un_available_time_info_list = list()
for i in value:
if isinstance(i, UnAvailableTimeInfo):
self._un_available_time_info_list.append(i)
else:
self._un_available_time_info_list.append(UnAvailableTimeInfo.from_alipay_dict(i))
@property
def validity_period(self):
return self._validity_period
@validity_period.setter
def validity_period(self, value):
self._validity_period = value
@property
def validity_period_range_from(self):
return self._validity_period_range_from
@validity_period_range_from.setter
def validity_period_range_from(self, value):
self._validity_period_range_from = value
@property
def validity_period_range_to(self):
return self._validity_period_range_to
@validity_period_range_to.setter
def validity_period_range_to(self, value):
self._validity_period_range_to = value
@property
def validity_period_type(self):
return self._validity_period_type
@validity_period_type.setter
def validity_period_type(self, value):
self._validity_period_type = value
@property
def verify_enable_times(self):
return self._verify_enable_times
@verify_enable_times.setter
def verify_enable_times(self, value):
self._verify_enable_times = value
@property
def verify_frequency(self):
return self._verify_frequency
@verify_frequency.setter
def verify_frequency(self, value):
self._verify_frequency = value
def to_alipay_dict(self):
params = dict()
if self.available_time_info_list:
if isinstance(self.available_time_info_list, list):
for i in range(0, len(self.available_time_info_list)):
element = self.available_time_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.available_time_info_list[i] = element.to_alipay_dict()
if hasattr(self.available_time_info_list, 'to_alipay_dict'):
params['available_time_info_list'] = self.available_time_info_list.to_alipay_dict()
else:
params['available_time_info_list'] = self.available_time_info_list
if self.buyer_notes:
if isinstance(self.buyer_notes, list):
for i in range(0, len(self.buyer_notes)):
element = self.buyer_notes[i]
if hasattr(element, 'to_alipay_dict'):
self.buyer_notes[i] = element.to_alipay_dict()
if hasattr(self.buyer_notes, 'to_alipay_dict'):
params['buyer_notes'] = self.buyer_notes.to_alipay_dict()
else:
params['buyer_notes'] = self.buyer_notes
if self.support_book:
if hasattr(self.support_book, 'to_alipay_dict'):
params['support_book'] = self.support_book.to_alipay_dict()
else:
params['support_book'] = self.support_book
if self.un_available_time_info_list:
if isinstance(self.un_available_time_info_list, list):
for i in range(0, len(self.un_available_time_info_list)):
element = self.un_available_time_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.un_available_time_info_list[i] = element.to_alipay_dict()
if hasattr(self.un_available_time_info_list, 'to_alipay_dict'):
params['un_available_time_info_list'] = self.un_available_time_info_list.to_alipay_dict()
else:
params['un_available_time_info_list'] = self.un_available_time_info_list
if self.validity_period:
if hasattr(self.validity_period, 'to_alipay_dict'):
params['validity_period'] = self.validity_period.to_alipay_dict()
else:
params['validity_period'] = self.validity_period
if self.validity_period_range_from:
if hasattr(self.validity_period_range_from, 'to_alipay_dict'):
params['validity_period_range_from'] = self.validity_period_range_from.to_alipay_dict()
else:
params['validity_period_range_from'] = self.validity_period_range_from
if self.validity_period_range_to:
if hasattr(self.validity_period_range_to, 'to_alipay_dict'):
params['validity_period_range_to'] = self.validity_period_range_to.to_alipay_dict()
else:
params['validity_period_range_to'] = self.validity_period_range_to
if self.validity_period_type:
if hasattr(self.validity_period_type, 'to_alipay_dict'):
params['validity_period_type'] = self.validity_period_type.to_alipay_dict()
else:
params['validity_period_type'] = self.validity_period_type
if self.verify_enable_times:
if hasattr(self.verify_enable_times, 'to_alipay_dict'):
params['verify_enable_times'] = self.verify_enable_times.to_alipay_dict()
else:
params['verify_enable_times'] = self.verify_enable_times
if self.verify_frequency:
if hasattr(self.verify_frequency, 'to_alipay_dict'):
params['verify_frequency'] = self.verify_frequency.to_alipay_dict()
else:
params['verify_frequency'] = self.verify_frequency
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiTradeVoucherItemTemplete()
if 'available_time_info_list' in d:
o.available_time_info_list = d['available_time_info_list']
if 'buyer_notes' in d:
o.buyer_notes = d['buyer_notes']
if 'support_book' in d:
o.support_book = d['support_book']
if 'un_available_time_info_list' in d:
o.un_available_time_info_list = d['un_available_time_info_list']
if 'validity_period' in d:
o.validity_period = d['validity_period']
if 'validity_period_range_from' in d:
o.validity_period_range_from = d['validity_period_range_from']
if 'validity_period_range_to' in d:
o.validity_period_range_to = d['validity_period_range_to']
if 'validity_period_type' in d:
o.validity_period_type = d['validity_period_type']
if 'verify_enable_times' in d:
o.verify_enable_times = d['verify_enable_times']
if 'verify_frequency' in d:
o.verify_frequency = d['verify_frequency']
return o
| [
"[email protected]"
] | |
c67e116ea2e57bfe9d9df24ae5c86d8b5df7ba4b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02933/s590503236.py | f98f40ee1248ff65fae4dca98193224b194ce15f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | if(int(input())>3199):
print(input())
else:
print("red")
| [
"[email protected]"
] | |
607f59255088fbb01756be227cbf38e9c8055832 | 6630694f401f6f475dd81bb01ff9368db844ccff | /mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py | 6ffcf6d13c049fa8802766d74f7e5c9a803b706e | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 1,905 | py | # Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
with read_base():
from .._base_.models.mae_hivit_base_p16 import *
from .._base_.datasets.imagenet_bs512_mae import *
from .._base_.default_runtime import *
from mmengine.hooks.checkpoint_hook import CheckpointHook
from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper
from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR
from mmengine.runner.loops import EpochBasedTrainLoop
from torch.optim.adamw import AdamW
# optimizer wrapper
optim_wrapper = dict(
type=AmpOptimWrapper,
loss_scale='dynamic',
optimizer=dict(
type=AdamW,
lr=1.5e-4 * 4096 / 256,
betas=(0.9, 0.95),
weight_decay=0.05),
paramwise_cfg=dict(
custom_keys={
'norm': dict(decay_mult=0.0),
'bias': dict(decay_mult=0.0),
'pos_embed': dict(decay_mult=0.),
'mask_token': dict(decay_mult=0.),
}))
# learning rate scheduler
param_scheduler = [
dict(
type=LinearLR,
start_factor=0.0001,
by_epoch=True,
begin=0,
end=40,
convert_to_iter_based=True),
dict(
type=CosineAnnealingLR,
T_max=360,
by_epoch=True,
begin=40,
end=400,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400)
# only keeps the latest 3 checkpoints
default_hooks.checkpoint = dict(
type=CheckpointHook, interval=1, max_keep_ckpts=3)
randomness.update(seed=0, diff_rank_seed=True)
# auto resume
resume = True
find_unused_parameters = True
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=4096)
| [
"[email protected]"
] | |
2a5fb71b730f244801d591ed74c96803ab7eccd9 | e0d16d2dd3bf8490d660fc5ba5ce789bd4f20384 | /temperature_converter/simple.py | d2123c90ef5fccb38def5255d1816123bd236af0 | [] | no_license | tt-n-walters/python-course | 9be8344f4e40f6abe2f8c6355117e8ea1891c7cb | 255dbcddf1f4bd258474df04f3a3a9209c74c01f | refs/heads/master | 2023-06-04T05:57:08.215733 | 2021-06-25T15:25:24 | 2021-06-25T15:25:24 | 380,279,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | print("Enter a temperature in Celcius:")
celcius = input("> ")
celcius = int(celcius)
fahrenheit = celcius * (9 / 5) + 32
print(celcius, "ºC is", fahrenheit, "ºF")
| [
"[email protected]"
] | |
335e23f9cf6ef4b4e6c4541c52d496119e4469ce | 673bf701a310f92f2de80b687600cfbe24612259 | /misoclib/com/liteeth/core/tty/__init__.py | 7ead3ef4cd4d95f47e91d7b8077d5db3d7bc5da7 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mogorman/misoc | d78340a9bf67feaede20e8cac473bcfddbd186a3 | 4ec49e2aadcff0c3ca34ebd0d35013d88f4d3e1f | refs/heads/master | 2021-01-18T05:38:39.670977 | 2015-03-10T05:37:52 | 2015-03-10T05:37:52 | 30,672,191 | 1 | 0 | null | 2015-02-11T22:05:05 | 2015-02-11T22:05:05 | null | UTF-8 | Python | false | false | 2,774 | py | from misoclib.com.liteeth.common import *
from misoclib.com.liteeth.generic import *
class LiteEthTTYTX(Module):
def __init__(self, ip_address, udp_port, fifo_depth=None):
self.sink = sink = Sink(eth_tty_description(8))
self.source = source = Source(eth_udp_user_description(8))
###
if fifo_depth is None:
self.comb += [
source.stb.eq(sink.stb),
source.sop.eq(1),
source.eop.eq(1),
source.length.eq(1),
source.data.eq(sink.data),
sink.ack.eq(source.ack)
]
else:
self.submodules.fifo = fifo = SyncFIFO([("data", 8)], fifo_depth)
self.comb += Record.connect(sink, fifo.sink)
self.submodules.level = level = FlipFlop(max=fifo_depth)
self.comb += level.d.eq(fifo.fifo.level)
self.submodules.counter = counter = Counter(max=fifo_depth)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
If(fifo.source.stb,
level.ce.eq(1),
counter.reset.eq(1),
NextState("SEND")
)
)
fsm.act("SEND",
source.stb.eq(fifo.source.stb),
source.sop.eq(counter.value == 0),
If(level.q == 0,
source.eop.eq(1),
).Else(
source.eop.eq(counter.value == (level.q-1)),
),
source.src_port.eq(udp_port),
source.dst_port.eq(udp_port),
source.ip_address.eq(ip_address),
If(level.q == 0,
source.length.eq(1),
).Else(
source.length.eq(level.q),
),
source.data.eq(fifo.source.data),
fifo.source.ack.eq(source.ack),
If(source.stb & source.ack,
counter.ce.eq(1),
If(source.eop,
NextState("IDLE")
)
)
)
class LiteEthTTYRX(Module):
def __init__(self, ip_address, udp_port, fifo_depth=None):
self.sink = sink = Sink(eth_udp_user_description(8))
self.source = source = Source(eth_tty_description(8))
###
valid = Signal()
self.comb += valid.eq(
(sink.ip_address == ip_address) &
(sink.dst_port == udp_port)
)
if fifo_depth is None:
self.comb += [
source.stb.eq(sink.stb & valid),
source.data.eq(sink.data),
sink.ack.eq(source.ack)
]
else:
self.submodules.fifo = fifo = SyncFIFO([("data", 8)], fifo_depth)
self.comb += [
fifo.sink.stb.eq(sink.stb & valid),
fifo.sink.data.eq(sink.data),
sink.ack.eq(fifo.sink.ack),
Record.connect(fifo.source, source)
]
class LiteEthTTY(Module):
def __init__(self, udp, ip_address, udp_port,
rx_fifo_depth=64,
tx_fifo_depth=64):
self.submodules.tx = tx = LiteEthTTYTX(ip_address, udp_port, tx_fifo_depth)
self.submodules.rx = rx = LiteEthTTYRX(ip_address, udp_port, rx_fifo_depth)
udp_port = udp.crossbar.get_port(udp_port, dw=8)
self.comb += [
Record.connect(tx.source, udp_port.sink),
Record.connect(udp_port.source, rx.sink)
]
self.sink, self.source = self.tx.sink, self.rx.source
| [
"[email protected]"
] | |
3588536acd5fbd95b034ed81cc6e33948259afd1 | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/compute/monitors/__init__.py | 8b0ba4a7aee30af3c51cafc15049eb4abc5292bf | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,280 | py | # Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource monitor API specification.
ResourceMonitorBase provides the definition of minimum set of methods
that needs to be implemented by Resource Monitor.
"""
import functools
import types
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from patron.i18n import _LW
from patron import loadables
compute_monitors_opts = [
cfg.MultiStrOpt('compute_available_monitors',
default=['patron.compute.monitors.all_monitors'],
help='Monitor classes available to the compute which may '
'be specified more than once.'),
cfg.ListOpt('compute_monitors',
default=[],
help='A list of monitors that can be used for getting '
'compute metrics.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_monitors_opts)
LOG = logging.getLogger(__name__)
class ResourceMonitorMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to create a function map and call it later
to get the metric names and their values.
"""
super(ResourceMonitorMeta, cls).__init__(names, bases, dict_)
prefix = '_get_'
prefix_len = len(prefix)
cls.metric_map = {}
for name, value in cls.__dict__.iteritems():
if (len(name) > prefix_len
and name[:prefix_len] == prefix
and isinstance(value, types.FunctionType)):
metric_name = name[prefix_len:].replace('_', '.')
cls.metric_map[metric_name] = value
@six.add_metaclass(ResourceMonitorMeta)
class ResourceMonitorBase(object):
"""Base class for resource monitors
"""
def __init__(self, parent):
self.compute_manager = parent
self.source = None
self._data = {}
@classmethod
def add_timestamp(cls, func):
"""Decorator to indicate that a method needs to add a timestamp.
When a function returning a value is decorated by the decorator,
which means a timestamp should be added into the returned value.
That is, a tuple (value, timestamp) is returned.
The timestamp is the time when we update the value in the _data.
If users hope to define how the timestamp is got by themselves,
they should not use this decorator in their own classes.
"""
@functools.wraps(func)
def wrapper(self, **kwargs):
return func(self, **kwargs), self._data.get("timestamp", None)
return wrapper
def _update_data(self):
"""Method to update the metrics data.
Each subclass can implement this method to update metrics
into _data. It will be called in get_metrics.
"""
pass
def get_metric_names(self):
"""Get available metric names.
Get available metric names, which are represented by a set of keys
that can be used to check conflicts and duplications
:returns: a set of keys representing metrics names
"""
return self.metric_map.keys()
def get_metrics(self, **kwargs):
"""Get metrics.
Get metrics, which are represented by a list of dictionaries
[{'name': metric name,
'value': metric value,
'timestamp': the time when the value is retrieved,
'source': what the value is got by}, ...]
:param kwargs: extra arguments that might be present
:returns: a list to tell the current metrics
"""
data = []
self._update_data()
for name, func in self.metric_map.iteritems():
ret = func(self, **kwargs)
data.append(self._populate(name, ret[0], ret[1]))
return data
def _populate(self, metric_name, metric_value, timestamp=None):
"""Populate the format what we want from metric name and metric value
"""
result = {}
result['name'] = metric_name
result['value'] = metric_value
result['timestamp'] = timestamp or timeutils.utcnow()
result['source'] = self.source
return result
class ResourceMonitorHandler(loadables.BaseLoader):
"""Base class to handle loading monitor classes.
"""
def __init__(self):
super(ResourceMonitorHandler, self).__init__(ResourceMonitorBase)
def choose_monitors(self, manager):
"""This function checks the monitor names and metrics names against a
predefined set of acceptable monitors.
"""
monitor_classes = self.get_matching_classes(
CONF.compute_available_monitors)
monitor_class_map = {cls.__name__: cls for cls in monitor_classes}
monitor_cls_names = CONF.compute_monitors
good_monitors = []
bad_monitors = []
metric_names = set()
for monitor_name in monitor_cls_names:
if monitor_name not in monitor_class_map:
bad_monitors.append(monitor_name)
continue
try:
# make sure different monitors do not have the same
# metric name
monitor = monitor_class_map[monitor_name](manager)
metric_names_tmp = set(monitor.get_metric_names())
overlap = metric_names & metric_names_tmp
if not overlap:
metric_names = metric_names | metric_names_tmp
good_monitors.append(monitor)
else:
msg = (_LW("Excluding monitor %(monitor_name)s due to "
"metric name overlap; overlapping "
"metrics: %(overlap)s") %
{'monitor_name': monitor_name,
'overlap': ', '.join(overlap)})
LOG.warn(msg)
bad_monitors.append(monitor_name)
except Exception as ex:
msg = (_LW("Monitor %(monitor_name)s cannot be used: %(ex)s") %
{'monitor_name': monitor_name, 'ex': ex})
LOG.warn(msg)
bad_monitors.append(monitor_name)
if bad_monitors:
LOG.warning(_LW("The following monitors have been disabled: %s"),
', '.join(bad_monitors))
return good_monitors
def all_monitors():
"""Return a list of monitor classes found in this directory.
This method is used as the default for available monitors
and should return a list of all monitor classes available.
"""
return ResourceMonitorHandler().get_all_classes()
| [
"[email protected]"
] | |
b9097aa2f33448f7f6f4090ed4a250cea3af2622 | c66955c6fc178955c2024e0318ec7a91a8386c2d | /programQuick/chapterFifteen/demo6.py | 6f2c5581195897bb831999363b61ad251def1e72 | [] | no_license | duheng18/python-study | a98642d6ee1b0043837c3e7c5b91bf1e28dfa588 | 13c0571ac5d1690bb9e615340482bdb2134ecf0e | refs/heads/master | 2022-11-30T17:36:57.060130 | 2019-11-18T07:31:40 | 2019-11-18T07:31:40 | 147,268,053 | 1 | 0 | null | 2022-11-22T03:36:51 | 2018-09-04T00:49:42 | Python | UTF-8 | Python | false | false | 1,691 | py | import datetime, time
'''
strftime 指令 含义
%Y 带世纪的年份,例如'2014'
%y 不带世纪的年份,'00'至'99'(1970 至 2069)
%m 数字表示的月份, '01'至'12'
%B 完整的月份,例如'November'
%b 简写的月份,例如'Nov'
%d 一月中的第几天,'01'至'31'
%j 一年中的第几天,'001'至'366'
%w 一周中的第几天,'0'(周日)至'6'(周六)
%A 完整的周几,例如'Monday'
%a 简写的周几,例如'Mon'
%H 小时(24 小时时钟),'00'至'23'
%I 小时(12 小时时钟),'01'至'12'
%M 分,'00'至'59'
%S 秒,'00'至'59'
%p 'AM'或'PM'
%% 就是'%'字符
'''
halloween2016 = datetime.datetime(2016, 10, 31, 0, 0, 0)
while datetime.datetime.now() < halloween2016:
time.sleep(1)
oct21st = datetime.datetime(2015, 10, 21, 16, 29, 0)
# 2015/10/21 16:29:00
# print(oct21st.strftime('%Y/%m/%d %H:%M:%S'))
# 04:29 PM
# print(oct21st.strftime('%I:%M %p'))
# October of '15
print(oct21st.strftime("%B of '%y"))
# 2015-10-21 00:00:00
# print(datetime.datetime.strptime('October 21,2015', '%B %d,%Y'))
# 2015-10-21 16:29:00
print(datetime.datetime.strptime('2015/10/21 16:29:00', '%Y/%m/%d %H:%M:%S'))
# 2015-10-01 00:00:00
# print(datetime.datetime.strptime("October of '15", "%B of '%y"))
# 2063-11-01 00:00:00
# print(datetime.datetime.strptime("November of '63", "%B of '%y"))
| [
"[email protected]"
] | |
64a82974142c99441155b1b98d16bb62a2af6d43 | 114c1f7ceff04e00591f46eeb0a2eb387ac65710 | /g4g/DS/Linked_Lists/Singly_linked_lists/8_search_element_in_Linked_list.py | 7ead23a5a18b1cc959c0f2875e812d2a5015ab62 | [] | no_license | sauravgsh16/DataStructures_Algorithms | 0783a5e6dd00817ac0b6f2b856ad8d82339a767d | d3133f026f972f28bd038fcee9f65784f5d3ea8b | refs/heads/master | 2020-04-23T03:00:29.713877 | 2019-11-25T10:52:33 | 2019-11-25T10:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | ''' Search an element in a Linked List (Iterative and Recursive) '''
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def push(self, val):
nN = Node(val)
if not self.head:
self.head = nN
self.tail = nN
else:
self.tail.next = nN
self.tail = nN
self.size += 1
def searchIterative(self, key):
if self.head.val == key:
return self.head.val
cur = self.head.next
while cur:
if cur.val == key:
return cur.val
cur = cur.next
return None
def _searchRecursive(self, node, key):
if node.val == key:
return node.val
if not node:
return None
return self._searchRecursive(node.next, key)
def searchRecursive(self, key):
return self._searchRecursive(self.head, key)
ll = LinkedList()
ll.push(1)
ll.push(2)
ll.push(3)
ll.push(4)
ll.push(5)
print ll.searchIterative(10)
print ll.searchRecursive(2) | [
"[email protected]"
] | |
59c6012139aa84f9d4db9417bcfe97c7e3d33d64 | 710e96fb56f48a91dbd5e34c3e7b07fc24b4d95a | /WebContent/WEB-INF/program/unit/unit_resource.py | b1c62755682edb820081d76745f39050c457dea3 | [] | no_license | yxxcrtd/jitar2.0 | bf6ade6aaf0bdb0ff9a94b011041e0faa13789f1 | 9215d51cf536518ab4c8fea069ef5ae1ff6466c8 | refs/heads/master | 2020-05-31T15:28:38.821345 | 2019-06-05T08:01:39 | 2019-06-05T08:01:39 | 190,351,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,589 | py | from unit_page import *
from base_action import SubjectMixiner
from resource_query import ResourceQuery
class unit_resource(UnitBasePage, SubjectMixiner):
def __init__(self):
UnitBasePage.__init__(self)
def execute(self):
self.unit = self.getUnit()
if self.unit == None:
self.addActionError(u"您所访问的机构不存在!")
return self.ERROR
self.get_resource_list()
#res_cate = __jitar__.categoryService.getCategoryTree("resource")
#request.setAttribute("res_cate", res_cate)
self.get_cate_tree_without_cache()
request.setAttribute("head_nav", "unit_resource")
request.setAttribute("unit", self.unit)
self.putGradeList()
self.putSubjectList()
self.putResouceCateList()
templateName = "template1"
if self.unit.templateName != None:
templateName = self.unit.templateName
return "/WEB-INF/unitspage/" + templateName + "/unit_resource.ftl"
def get_resource_list(self):
qry = ResourceQuery(""" r.resourceId, r.href, r.title, r.fsize, r.createDate, r.recommendState,
u.loginName, u.nickName, r.subjectId as subjectId, grad.gradeName, sc.name as scName """)
#qry.unitId = self.unit.unitId
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
list_type = ""
if type == "hot":
qry.orderType = ResourceQuery.ORDER_TYPE_VIEWCOUNT_DESC
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"最高人气"
elif type == "rcmd":
#qry.recommendState = True
#qry.rcmdState = True
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%' And r.rcmdPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"编辑推荐"
elif type == "cmt":
qry.orderType = ResourceQuery.ORDER_TYPE_COMMENTCOUNT_DESC
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"评论最多"
else:
type = "new"
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"最新资源"
request.setAttribute("type", type)
request.setAttribute("list_type", list_type)
qry.gradelevel = self.params.getIntParamZeroAsNull("level")
qry.subjectId = self.params.getIntParamZeroAsNull("subjectId")
qry.sysCateId = self.params.getIntParamZeroAsNull("categoryId")
qry.gradeId = self.params.getIntParamZeroAsNull("gradeId")
qry.k = self.params.getStringParam("k")
pager = self.createPager()
pager.totalRows = qry.count()
resource_list = qry.query_map(pager)
request.setAttribute("resource_list", resource_list)
request.setAttribute("pager", pager)
request.setAttribute("subjectId", qry.subjectId)
request.setAttribute("categoryId", qry.sysCateId)
def get_cate_tree_without_cache(self):
self.sbj_svc = __jitar__.subjectService
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
outHtml = ""
subject_list = self.sbj_svc.getMetaSubjectList()
for s in subject_list:
msid = s.getMsubjId()
outHtml = outHtml + "d.add(" + str(msid) + ",0,'" + s.getMsubjName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&unitId=" + str(self.unit.unitId) + "');"
gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)
if gradeIdList != None:
for gid in gradeIdList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + "," + str(msid) + ",'" + gid.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(gid.getGradeId()) + "&target=child&unitId=" + str(self.unit.unitId) + "');"
gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())
for glevel in gradeLevelList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + "," + str(msid) + str(gid.getGradeId()) + ",'" + glevel.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(glevel.getGradeId()) + "&level=1&unitId=" + str(self.unit.unitId) + "');"
request.setAttribute("outHtml", outHtml)
def get_cate_tree(self):
#下面的带缓存的版本有bug,没有过滤机构
cache = __jitar__.cacheProvider.getCache('category')
self.sbj_svc = __jitar__.subjectService
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
outHtml = cache.get(type + "_outHtml_resource")
if outHtml == None or outHtml == "":
cache_key = "_subject_list_resource"
subject_list = cache.get(cache_key)
if subject_list == None:
subject_list = self.sbj_svc.getMetaSubjectList()
cache.put(cache_key, subject_list)
outHtml = ""
for s in subject_list:
msid = s.getMsubjId()
outHtml = outHtml + "d.add(" + str(msid) + ",0,'" + s.getMsubjName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&unitId=" + str(self.unit.unitId) + "');"
cache_key = "_gradeIdList_resource" + str(msid)
gradeIdList = cache.get(cache_key)
if gradeIdList == None:
gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)
cache.put(cache_key, gradeIdList)
if gradeIdList != None:
for gid in gradeIdList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + "," + str(msid) + ",'" + gid.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(gid.getGradeId()) + "&target=child&unitId=" + str(self.unit.unitId) + "');"
cache_key = "_gradeLevelList_resource" + str(gid.getGradeId())
gradeLevelList = cache.get(cache_key)
if gradeLevelList == None:
gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())
cache.put(cache_key, gradeLevelList)
for glevel in gradeLevelList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + "," + str(msid) + str(gid.getGradeId()) + ",'" + glevel.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(glevel.getGradeId()) + "&level=1&unitId=" + str(self.unit.unitId) + "');"
cache.put(type + "_outHtml_resource", outHtml)
request.setAttribute("outHtml", outHtml)
def createPager(self):
pager = self.params.createPager()
pager.itemName = u"资源"
pager.itemUnit = u"个"
pager.pageSize = 20
return pager
| [
"[email protected]"
] | |
5baa906189990436b9e8671cccd9250487f5b8f8 | f138be1e8e382c404cfe1ff6a35e90fc77fa9bff | /ABC/python/113/A.py | ba8ef3118eabaf3859c5aa4bcbdfe03dec29ba4c | [] | no_license | jackfrostwillbeking/atcoder_sample | 8547d59ca2f66b34905f292191df6c474010fded | d5b2fe8f628fd56eaf23ee7e92938e8ac1b1fef9 | refs/heads/master | 2023-07-25T19:16:14.340414 | 2021-08-26T15:26:08 | 2021-08-26T15:26:08 | 273,857,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import sys
import math
X,Y = map(int,input().split())
if not (1 <= X <= 100 and 1 <= Y <= 100): sys.exit()
if not (Y % 2 == 0): sys.exit()
print(X+math.floor(Y/2))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.