ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a391736fb1542f686e3e426253b48f342555216 | import numpy as np
import random
from collections import defaultdict
from environment import Env
class SARSAgent:
def __init__(self, actions):
self.actions = actions
self.learning_rate = 0.01
self.discount_factor = 0.9
self.epsilon = 0.1
self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])
# <s, a, r, s', a'>의 샘플로부터 큐함수를 업데이트
def learn(self, state, action, reward, next_state, next_action):
print(self.q_table)
current_q = self.q_table[state][action]
next_state_q = self.q_table[next_state][next_action]
new_q = (current_q + self.learning_rate *
(reward + self.discount_factor * next_state_q - current_q))
self.q_table[state][action] = new_q
# 입실론 탐욕 정책에 따라서 행동을 반환
def get_action(self, state):
if np.random.rand() < self.epsilon:
# 무작위 행동 반환
action = np.random.choice(self.actions)
else:
# 큐함수에 따른 행동 반환
state_action = self.q_table[state]
action = self.arg_max(state_action)
return action
@staticmethod
def arg_max(state_action):
max_index_list = []
max_value = state_action[0]
for index, value in enumerate(state_action):
if value > max_value:
max_index_list.clear()
max_value = value
max_index_list.append(index)
elif value == max_value:
max_index_list.append(index)
return random.choice(max_index_list)
if __name__ == "__main__":
env = Env()
agent = SARSAgent(actions=list(range(env.n_actions)))
for episode in range(1000):
# 게임 환경과 상태를 초기화
state = env.reset()
# 현재 상태에 대한 행동을 선택
action = agent.get_action(str(state))
while True:
env.render()
# 행동을 위한 후 다음상태 보상 에피소드의 종료 여부를 받아옴
next_state, reward, done = env.step(action)
# 다음 상태에서의 다음 행동 선택
next_action = agent.get_action(str(next_state))
# <s,a,r,s',a'>로 큐함수를 업데이트
agent.learn(str(state), action, reward, str(next_state), next_action)
state = next_state
action = next_action
# 모든 큐함수를 화면에 표시
env.print_value_all(agent.q_table)
if done:
break
|
py | 1a3917dc763ec2e28bb5f21fe00ebe71154590fe | #!/usr/bin/env python
#
# __init__.py - The fslpy library.
#
# Author: Paul McCarthy <[email protected]>
#
"""The :mod:`fsl` package is a library which contains convenience classes
and functions for use by FSL python tools. It is broadly split into the
following sub-packages:
.. autosummary::
fsl.data
fsl.utils
fsl.scripts
fsl.transform
fsl.version
fsl.wrappers
.. note:: The ``fsl`` namespace is a ``pkgutil``-style *namespace package* -
it can be used across different projects - see
https://packaging.python.org/guides/packaging-namespace-packages/
for details.
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__) # noqa
|
py | 1a391853c9bc43d1dac6b9848d11ab26c2926dd6 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_summ
DO_MEDIAN = True
def randint_triangular(low, high, mode): # inclusive bounds
t = random.triangular(low, high, mode)
# round to nearest int. Assume > 0
if t > 0:
return int(t + 0.5)
else:
return int(t - 0.5)
def write_syn_dataset(csvPathname, rowCount, colCount, low, high, mode, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = [randint_triangular(low, high, mode) for j in range(colCount)]
rowDataCsv = ",".join(map(str, rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_percentile(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100000, 1, 'cD', 300),
(100000, 2, 'cE', 300),
]
timeoutSecs = 10
trial = 1
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
print 'Trial:', trial
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
legalValues = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} # set. http://docs.python.org/2/library/stdtypes.html#set
expectedMin = min(legalValues)
expectedMax = max(legalValues)
expectedUnique = (expectedMax - expectedMin) + 1
mode = 0.5 # rounding to nearest int will shift us from this for expected mean
expectedMean = 0.5
expectedSigma = 0.5
write_syn_dataset(csvPathname, rowCount, colCount,
low=expectedMin, high=expectedMax, mode=mode,
SEED=SEEDPERFILE)
csvPathnameFull = h2i.find_folder_and_filename('.', csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
summaryResult = h2o_cmd.runSummary(key=hex_key)
if h2o.verbose:
print "summaryResult:", h2o.dump_json(summaryResult)
summaries = summaryResult['summaries']
scipyCol = 0
for column in summaries:
colname = column['colname']
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
mean = stats['mean']
sd = stats['sd']
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
pct = stats['pct']
pctile = stats['pctile']
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
for b in hbrk:
self.assertIn(int(b), legalValues)
self.assertEqual(len(hbrk), len(legalValues))
# self.assertAlmostEqual(hcnt[0], 0.5 * rowCount, delta=.01*rowCount)
# self.assertAlmostEqual(hcnt[1], 0.5 * rowCount, delta=.01*rowCount)
print "pctile:", pctile
print "maxs:", maxs
# we round to int, so we may introduce up to 0.5 rounding error? compared to "mode" target
self.assertAlmostEqual(maxs[0], expectedMax, delta=0.01)
print "mins:", mins
self.assertAlmostEqual(mins[0], expectedMin, delta=0.01)
for v in pctile:
self.assertTrue(v >= expectedMin,
"Percentile value %s should all be >= the min dataset value %s" % (v, expectedMin))
self.assertTrue(v <= expectedMax,
"Percentile value %s should all be <= the max dataset value %s" % (v, expectedMax))
eV1 = [1.0, 1.0, 1.0, 3.0, 4.0, 5.0, 7.0, 8.0, 9.0, 10.0, 10.0]
if expectedMin==1:
eV = eV1
elif expectedMin==0:
eV = [e-1 for e in eV1]
elif expectedMin==2:
eV = [e+1 for e in eV1]
else:
raise Exception("Test doesn't have the expected percentileValues for expectedMin: %s" % expectedMin)
if colname!='':
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
# h2oQuantilesApprox=qresult_single,
# h2oQuantilesExact=qresult,
)
scipyCol += 1
if __name__ == '__main__':
h2o.unit_main()
|
py | 1a39186a154aa21ba0850586361d8d4003345757 | # Authors: Chris Holdgraf <[email protected]>
#
# License: BSD Style.
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
data_name = 'mtrf'
has_mtrf_data = partial(has_dataset, name=data_name)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name=data_name,
download=download)
data_path.__doc__ = _data_path_doc.format(name=data_name,
conf='MNE_DATASETS_MTRF_PATH')
def get_version(): # noqa: D103
return _get_version(data_name)
get_version.__doc__ = _version_doc.format(name=data_name)
|
py | 1a391a484478e680275d7b4780644d38bc2db082 | from django.shortcuts import render
from rest_framework.generics import ListAPIView, CreateAPIView
from .serializers import ProductSerializer
from .models import Product
# Create your views here.
class createProduct(CreateAPIView):
serializer_class = ProductSerializer
queryset = Product.objects.all()
class listProduct(ListAPIView):
serializer_class = ProductSerializer
queryset = Product.objects.all() |
py | 1a391adec198c54d7770fe307e999a2c80965606 | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from zope.interface import implementer
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import _defaultCurveName
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, ContextFactory
from twisted.python.failure import Failure
logger = logging.getLogger(__name__)
class ServerContextFactory(ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
try:
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
context.set_tmp_ecdh(_ecCurve)
except Exception:
logger.exception("Failed to enable elliptic curve for TLS")
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate_chain_file(config.tls_certificate_file)
if not config.no_tls:
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
def _idnaBytes(text):
"""
Convert some text typed by a human into some ASCII bytes. This is a
copy of twisted.internet._idna._idnaBytes. For documentation, see the
twisted documentation.
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text)
def _tolerateErrors(wrapped):
"""
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
the error is immediately logged and the connection is dropped if possible.
This is a copy of twisted.internet._sslverify._tolerateErrors. For
documentation, see the twisted documentation.
"""
def infoCallback(connection, where, ret):
try:
return wrapped(connection, where, ret)
except: # noqa: E722, taken from the twisted implementation
f = Failure()
logger.exception("Error during info_callback")
connection.get_app_data().failVerification(f)
return infoCallback
@implementer(IOpenSSLClientConnectionCreator)
class ClientTLSOptions(object):
"""
Client creator for TLS without certificate identity verification. This is a
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
verification left out. For documentation, see the twisted documentation.
"""
def __init__(self, hostname, ctx):
self._ctx = ctx
self._hostname = hostname
self._hostnameBytes = _idnaBytes(hostname)
ctx.set_info_callback(
_tolerateErrors(self._identityVerifyingInfoCallback)
)
def clientConnectionForTLS(self, tlsProtocol):
context = self._ctx
connection = SSL.Connection(context, None)
connection.set_app_data(tlsProtocol)
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
if where & SSL.SSL_CB_HANDSHAKE_START:
connection.set_tlsext_host_name(self._hostnameBytes)
class ClientTLSOptionsFactory(object):
"""Factory for Twisted ClientTLSOptions that are used to make connections
to remote servers for federation."""
def __init__(self, config):
# We don't use config options yet
pass
def get_options(self, host):
return ClientTLSOptions(
host.decode('utf-8'),
CertificateOptions(verify=False).getContext()
)
|
py | 1a391b7282db0fb328b2b75838c37d544e9bc86a | import syft
class Module(object):
pass
class Overloaded:
def __init__(self):
self.method = Overloaded.overload_method
self.function = Overloaded.overload_function
self.module = Overloaded.overload_module
@staticmethod
def overload_method(attr):
"""
hook args and response for methods that hold the @overloaded.method decorator
"""
def hook_args(self, *args, **kwargs):
# Replace all syft tensor with their child attribute
new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.hook_method_args(
attr.__name__, self, args, kwargs
)
# Send it to the appropriate class and get the response
response = attr(self, new_self, *new_args, **new_kwargs)
# Put back SyftTensor on the tensors found in the response
response = syft.frameworks.torch.hook_args.hook_response(
attr.__name__, response, wrap_type=type(self), wrap_args=self.get_class_attributes()
)
return response
return hook_args
@staticmethod
def overload_function(attr):
"""
hook args and response for functions that hold the @overloaded.function decorator
"""
def hook_args(*args, **kwargs):
# TODO have a better way to infer the type of tensor -> this is implies
# that the first argument is a tensor (even if this is the case > 99%)
tensor = args[0] if not isinstance(args[0], tuple) else args[0][0]
cls = type(tensor)
# Replace all syft tensor with their child attribute
new_args, new_kwargs, new_type = syft.frameworks.torch.hook_args.hook_function_args(
attr.__name__, args, kwargs
)
# Send it to the appropriate class and get the response
response = attr(*new_args, **new_kwargs)
# Put back SyftTensor on the tensors found in the response
response = syft.frameworks.torch.hook_args.hook_response(
attr.__name__, response, wrap_type=cls, wrap_args=tensor.get_class_attributes()
)
return response
return hook_args
@staticmethod
def overload_module(attr):
module = Module()
attr(module)
return module
overloaded = Overloaded()
|
py | 1a391c9c8081b808f908808301c82a2d36e283aa | import unittest
from nose.plugins.attrib import attr
from jnpr.healthbot import HealthBotClient
from jnpr.healthbot import PlaybookSchema
from jnpr.healthbot import PlayBookInstanceBuilder
from mock import patch
@attr('unit')
class TestPlaybooks(unittest.TestCase):
@patch('jnpr.healthbot.healthbot.requests.Session')
def setUp(self, mock_request):
self.mock_request = mock_request
self.conn = HealthBotClient(
server='1.1.1.1',
user='test',
password='password123')
def test_add_playbook_using_schema_check_existance(self):
self.mock_request().get.side_effect = self._mock_manager
pbs = PlaybookSchema(playbook_name="automation-coredump-pb")
pbs.description = "HbEZ Demo Examples"
pbs.synopsis = 'fpc status'
pbs.rules = ['hbez/hbez-fpc-heap-utilization']
self.assertTrue(self.conn.playbook.add(pbs))
def test_add_playbook_using_schema(self):
self.mock_request().get.side_effect = self._mock_manager
pbs = PlaybookSchema(playbook_name="testing")
pbs.description = "HbEZ Demo Examples"
pbs.rules = ['hbez/hbez-fpc-heap-utilization']
self.assertTrue(self.conn.playbook.add(pbs))
def test_delete_playbook(self):
self.assertTrue(
self.conn.playbook.delete(
playbook_name="testing"))
def test_get_playbook(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get(
playbook_name="automation-coredump-pb")
self.assertEqual(obj.rules, [
"protocol-automation-coredumps/check-coredumps"
])
def test_update_playbook(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get(
playbook_name="automation-coredump-pb")
obj.description = "testing"
self.conn.playbook.update(obj)
self.assertEqual(
self.mock_request().mock_calls[4][2]['json']['description'],
"testing")
def test_get_playbooks(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get()
self.assertGreaterEqual(len(obj), 1)
def test_playbook_instance_builder_with_no_variable(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.apply()
self.assertEqual(self.mock_request().mock_calls[6][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[6][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_instance_builder_delete(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.delete()
self.assertEqual(self.mock_request().mock_calls[7][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[7][1][0],
'https://1.1.1.1:8080/api/v1/device/vmx')
self.assertEqual(
self.mock_request().mock_calls[11][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_apply_commit(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.apply(commit=True)
self.assertEqual(self.mock_request().mock_calls[10][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[10][1][0],
'https://1.1.1.1:8080/api/v1/configuration')
def test_playbook_instance_builder_with_no_device_group(self):
from jnpr.healthbot.exception import NotFoundError
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'xyz', 'real')
self.assertRaises(NotFoundError, pbb.apply)
def test_playbook_instance_builder_with_variable(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.apply()
self.assertEqual(self.mock_request().mock_calls[7][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[7][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_instance_builder_with_variable_per_device(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.apply(device_ids=['vmx'])
self.assertEqual(self.mock_request().mock_calls[8][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[8][1][0],
'https://1.1.1.1:8080/api/v1/device/vmx')
def test_playbook_instance_builder_with_non_existing_device(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
self.assertRaises(RuntimeError, pbb.apply, device_ids=['fake'])
def test_clear(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.clear()
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
self.assertEqual(
routesummary_fib_summary.route_count_threshold,
'10000')
def test_playbook_schema_setter(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
with self.assertRaises(RuntimeError):
pbb.playbook_schema = 30
def test_get_playbook_schema_error(self):
self.mock_request().get.side_effect = self._mock_manager
self.assertRaises(AttributeError, PlayBookInstanceBuilder, self.conn,
'dummy', 'HbEZ-instance', 'Core')
def _mock_manager(self, *args):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.ok = True
def json(self):
return self.json_data
def raise_for_status(self):
return None
if args[0] == 'https://1.1.1.1:8080/api/v1/playbook/automation-coredump-pb/?working=true':
obj = MockResponse({
"playbook-name": "automation-coredump-pb",
"rules": [
"protocol-automation-coredumps/check-coredumps"
]
}, 200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/topic/protocol-automation-coredumps/rule/check-coredumps/?working=true':
obj = MockResponse({"description": "This rule will monitor for the automation coredumps",
"field": [{"description": "Actual coredump filename",
"field-name": "coredump-filename",
"formula": {"user-defined-function": {"argument": [{"argument": "message",
"value": "$coredump-message"}],
"function-name": "get-core-filename-from-message"}},
"type": "string"},
{"description": "The actual syslog that appears when a coredump happens",
"field-name": "coredump-message",
"sensor": [{"path": "/junos/events/event/message",
"sensor-name": "coredump-detectors",
"where": [{"query": "/junos/events/event/message =~ /.*Core and context for (eventd|cscript).*/"}]}],
"type": "string"},
{"description": "Timestamp of the coredump as registered by the telemetry sensor",
"field-name": "coredump-timestamp",
"sensor": [{"path": "/junos/events/event/timestamp/seconds",
"sensor-name": "coredump-detectors"}],
"type": "string"}],
"keys": ["coredump-message"],
"rule-name": "check-coredumps",
"sensor": [{"open-config": {"frequency": "0s",
"sensor-name": "/junos/events/event[id='SYSTEM']"},
"sensor-name": "coredump-detectors"}],
"synopsis": "To monitor automation coredumps",
"trigger": [{"frequency": "15s",
"term": [{"term-name": "core-generated",
"then": {"status": {"color": "red",
"message": "Coredump was seen: $coredump-message"},
"user-defined-action": [{"argument": [{"argument": "local_dir_name",
"value": "coredumps"}],
"function-name": "get-automation-traces"},
{"argument": [{"argument": "local_dir_name",
"value": "coredump"},
{"argument": "remote_dir_name",
"value": "$coredump-filename"}],
"function-name": "get-file-from-device"},
{"argument": [{"argument": "local_dir_name",
"value": "coredump"},
{"argument": "remote_dir_name",
"value": "/var/log/* /var/tmp/*"}],
"function-name": "get-log-file-from-device"}]},
"when": {"matches-with": [{"left-operand": "$coredump-message",
"right-operand": ".*Core and context.*",
"time-range": "30s"}]}},
{"term-name": "Core-not-generated",
"then": {"status": {"color": "green",
"message": "No core found"}}}],
"trigger-name": "core-generated"}]},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/playbooks/?working=true':
obj = MockResponse({"playbook": [{"playbook-name": "netsvc-playbook",
"rules": ["chassis.networkservices/netsvc-rule"]},
{"playbook-name": "phyport",
"rules": ["external/interface-info"]},
{"playbook-name": "automation-coredump-pb",
"rules": ["protocol-automation-coredumps/check-coredumps"]},
{"description": "This playbook help to collect eventd debug logs",
"playbook-name": "eventd-debug-collection",
"rules": ["protocol-eventd-debug/collect-debugs"],
"synopsis": "Collect eventd logs"}]},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/topic/protocol.routesummary/rule/check-fib-summary/?working=true':
obj = MockResponse({
"description": "Collects forwarding-table's total-route-count of each protocol and sets dynamic thresholds and notify anomaly when route count is abnormal",
"field": [
{
"description": "Address family name to be monitored",
"field-name": "address-family",
"sensor": [
{
"path": "address-family",
"sensor-name": "fib-sensor",
"where": [
{
"query": "address-family =~ /^{{route-address-family}}$/"
}
]
}
],
"type": "string"
},
{
"description": "Detects anamoly dynamically using kmeans algorithm",
"field-name": "dt-route-count",
"formula": {
"dynamic-threshold": {
"algorithm": "3sigma",
"field-name": "$route-count",
"learning-period": "7d",
"pattern-periodicity": "1h"
}
},
"type": "integer"
},
{
"description": "Route table type to be monitored",
"field-name": "route-table-type",
"sensor": [
{
"path": "route-table-type",
"sensor-name": "fib-sensor",
"where": [
{
"query": "route-table-type =~ /^{{table-type}}$/"
}
]
}
],
"type": "string"
},
{
"description": "Route table name to be monitored",
"field-name": "table-name",
"sensor": [
{
"path": "table-name",
"sensor-name": "fib-sensor",
"where": [
{
"query": "table-name =~ /^{{route-table-name}}$/"
}
]
}
],
"type": "string"
},
{
"constant": {
"value": "{{route-count-threshold}}"
},
"description": "Route count static threshold",
"field-name": "threshold",
"type": "integer"
}
],
"keys": [
"address-family",
"route-table-type",
"table-name"
],
"rule-name": "check-fib-summary",
"sensor": [
{
"description": "iAgent sensor collect forwarding-table route-count stats from network device",
"iAgent": {
"file": "fib.yml",
"frequency": "10m",
"table": "FibSummaryTable"
},
"sensor-name": "fib-sensor",
"synopsis": "FIB iAgent sensor definition"
}
],
"synopsis": "Forwarding-table protocols routes statistics analyzer",
"trigger": [
{
"frequency": "10m",
"term": [
{
"term-name": "is-route-count-abnormal",
"then": {
"status": {
"color": "red",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) abnormal"
}
},
"when": {
"greater-than-or-equal-to": [
{
"left-operand": "$route-count",
"right-operand": "$threshold",
"time-range": "30m"
}
]
}
},
{
"term-name": "is-route-count-above-dt",
"then": {
"status": {
"color": "yellow",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) is above dynamic threshold"
}
},
"when": {
"equal-to": [
{
"left-operand": "$dt-route-count",
"right-operand": "1",
"time-range": "30m"
}
]
}
},
{
"term-name": "route-count-normal",
"then": {
"status": {
"color": "green",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) normal"
}
}
}
],
"trigger-name": "fib-route-count"
}
],
"variable": [
{
"description": "address-family names to monitor, regular expression, eg 'Internet|Internet6|MPLS|VPLS'",
"name": "route-address-family",
"type": "string",
"value": ".+"
},
{
"description": "Forwarding table's each protocol's route count threshold",
"name": "route-count-threshold",
"type": "int",
"value": "10000"
},
{
"description": "route table names to monitor, regular expression, eg 'default.inet|default.inet6|vpn_0.inet'",
"name": "route-table-name",
"type": "string",
"value": ".+"
},
{
"description": "route table types to monitor, regular expression, eg 'perm|intf|user'",
"name": "table-type",
"type": "string",
"value": ".+"
}
]
},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/playbook/forwarding-table-summary/?working=true':
obj = MockResponse({
"description": "Playbook monitors forwarding-table's each protocol's route count and notifies anomaly when route count is above static or dynamic threshold",
"playbook-name": "forwarding-table-summary",
"rules": [
"protocol.routesummary/check-fib-summary"
],
"synopsis": "Forwarding table and protocol routes key performance indicators"
}, 200)
obj.ok = False
return obj
elif args[0] == 'https://1.1.1.1:8080/api/v1/device-group/Core/?working=true':
return MockResponse({"description": "testing",
"device-group-name": "Core",
"devices": ["vmx"],
"native-gpb": {"ports": [22000]},
"notification": {},
"playbooks": ["eventd-debug-collection",
"eventd-kpis-playbook",
'automation-coredump-pb'],
"reports": [],
"variable": [{"@": {"changed-seconds": 1564722219},
"instance-id": "HbEZ-instance",
"playbook": "automation-coredump-pb",
"rule": "x/y",
"variable-value": []}]},
200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/device/vmx/?working=true':
return MockResponse({
"authentication": {
"password": {
"password": "xxxx",
"username": "xxxx"
}
},
"device-id": "vmx",
"host": "10.221.136.140",
"open-config": {
"port": 32767
},
"system-id": "testing",
"variable": [{"@": {"changed-seconds": 1564722219},
"instance-id": "HbEZ-instance",
"playbook": "automation-coredump-pb",
"rule": "x/y",
"variable-value": []}],
"vendor": {
"juniper": {
"operating-system": "junos"
}
}
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/playbook/testing/?working=true':
obj = MockResponse({
"detail": "Playbook not found",
"status": 404
}, 404)
obj.ok = False
return obj
elif args[0] == 'https://1.1.1.1:8080/api/v1/device/vmx/?working=true':
return MockResponse({
"authentication": {
"password": {
"password": "xxxx",
"username": "xxxx"
}
},
"device-id": "vmx",
"host": "10.221.xxx.xxx",
"open-config": {
"port": 32767
},
"system-id": "testing",
"variable": [],
"vendor": {
"juniper": {
"operating-system": "junos"
}
}
}, 200)
return MockResponse(None, 404)
|
py | 1a391ca7d724fb63b94357ad63f619fef9e447bc | import typing
from discord_bot_eternal_dice.model.discord_command import DiscordCommand
from discord_bot_eternal_dice.model.discord_event import CommandType, DiscordEvent
class DiscordRoute:
def __init__(self, handler, command_type: CommandType, command: str, subcommand: str = None,
options: typing.Dict = None):
self.handler = handler
self.command_type = command_type
self.subcommand = subcommand
self.command = command
self.options = options
def matches(self, event: DiscordEvent) -> bool:
if event.command.command_name != self.command:
return False
if event.command.subcommand_name is not None and event.command.subcommand_name != self.subcommand:
return False
return True
def validate(self, command: DiscordCommand) -> bool:
unconsumed_options = list(command.options.keys())
for option, option_type in self.options.items():
if option not in command.options:
return False
if type(command.options[option]) is not option_type:
return False
unconsumed_options.remove(option)
if len(unconsumed_options) > 0:
return False
return True
|
py | 1a391cc4276e388a49ad01045dd6195bfb75fef6 | import string
def is_pangram(my_text, a='abcdefghijklmnopqrstuvwxyz'):
# return not set(a) - set(mytext.lower())
return set(a) <= set(my_text.lower())
# my_pangram = is_pangram("Sodien ir otrdiena divpadsmitais janvaris")
# print(my_pangram)
# my_pangram = is_pangram("Šodien ir otrdiena divpadsmitais janvāris")
# print(my_pangram)
# print(string.ascii_lowercase)
print(is_pangram("The five boxing wizards jump quickly", a=string.ascii_lowercase))
# print(is_pangram("The not boxing wizards jump quickly", a=string.ascii_lowercase))
# def is_pangram(my_text: str, eng="abcdefghijklmnopqrstuvwxyz",
# lv="aābcčdeēfgģhiījkķlļmnņoprsštuūvzž0123456789") -> bool:
# """ Returns True or False: is given text a pangram.
# You can choose pangram language (ENG or LV)"""
# # my_text_list = my_text.split()
# # my_text = "".join(my_text_list).lower()
# choose = input("In which language would you like to chek pangram? "
# "Write ENG or LV: ")
# language = []
# if choose.lower() == "eng":
# language = eng
# elif choose.lower() == "lv":
# language = lv
# else:
# print("Incorrect answer")
# return set(language) <= set(my_text.lower())
print(is_pangram('Tfū, čeh, džungļos 123456789 0 blīkšķ, zvaņģim jācērp!')) |
py | 1a391e1fb9993db2f64539167421d6b987165829 | '''ALUMNA: HUAMANI TACOMA ANDY
EJERCICIO 03: STRING MATCHING'''
# DESCRIPCION: La entrada consta de varios casos de prueba. Cada caso de prueba consta de dos líneas, primero un patrón no vacío y luego un texto no vacío. La entrada finaliza al final del archivo. El archivo de entrada no superará los 5 Mb. Para cada caso de prueba, genere una línea que contenga las posiciones de todas las ocurrencias del patrón en el texto, de la primera a la última, separadas por un solo espacio
import sys
def construct_partial_match_table(pattern):
# EL METODO CONSTRUCT_PARTIAL_MATCH_TABLE RECIBE UN PATRÓN Y DEVUELVE UNA TABLA DE COINCIDENCIAS PARCIALES PARA USAR EN EL ALGORITMO KMP
string_length = len(pattern)
current_prefix_length = 0
# Inicialice la tabla de coincidencias parciales para tener 0 en todos los índices
partial_match_table = [0] * string_length
if string_length == 1:
return partial_match_table
# Repita cada carácter del patrón para comprobar los sufijos parciales en diferentes puntos del patrón.
# Comenzamos en el índice 1 porque en el índice 0, trivialmente no hay un sufijo parcial posible
for current_index, current_char in enumerate(pattern[1:], 1):
# Si tuviéramos un sufijo parcial que coincidiera con un prefijo adecuado, pero el siguiente carácter de nuestra cadena rompe la coincidencia, nuestra longitud de coincidencia parcial máxima posible es el valor de nuestra tabla en el índice anterior (nuestra coincidencia más reciente) Reiteramos esto hasta que carácter al que ahora apuntamos coincide con nuestro carácter actual (o la longitud del prefijo es 0, lo que significa que no hay coincidencia parcial en este índice)
while current_prefix_length > 0 and pattern[current_prefix_length] != current_char:
current_prefix_length = partial_match_table[current_prefix_length - 1]
if pattern[current_prefix_length] == current_char:
current_prefix_length += 1
partial_match_table[current_index] = current_prefix_length
return partial_match_table
# El algoritmo de Knuth-Morris-Pratt para la coincidencia de cadenas
def kmp_string_search(given_string, pattern):
# Cree una tabla de coincidencias parciales para usar en el algoritmo de búsqueda KMP
table = construct_partial_match_table(pattern)
given_string_length = len(given_string)
pattern_length = len(pattern)
index_to_begin_search = 0
given_index = 0
pattern_index = 0
locations_of_matches = []
# Iterar a través de cada carácter de la cadena que deseamos comprobar.
while given_string_length - index_to_begin_search > pattern_length:
# Si bien el carácter actual en nuestra subcadena y la cadena dada coinciden, incremente cada uno en 1 para comparar los siguientes caracteres (a menos que lleguemos al final de la cadena)
while pattern_index < pattern_length and given_string[given_index] == pattern[pattern_index]:
given_index += 1
pattern_index += 1
# pattern_index solo se incrementa mientras
# substring[0:pattern_index] == given_string[index_to_begin_search:given_index]
# Entonces, si pattern_index alcanza la longitud de la subcadena, sabemos que hemos encontrado una coincidencia para toda la subcadena
if pattern_index >= pattern_length:
locations_of_matches.append(str(index_to_begin_search))
# Verificamos el valor en nuestra tabla de coincidencia parcial para la coincidencia más reciente que hemos encontrado.
# Si esta coincidencia está en cualquier lugar más allá del comienzo de nuestra cadena, given_index permanece igual,
# pattern_index toma este valor, e index_to_begin_search intuitivamente se convierte en la diferencia en estos números
if pattern_index > 0 and table[pattern_index - 1] > 0:
index_to_begin_search = given_index - table[pattern_index - 1]
pattern_index = table[pattern_index - 1]
# Si esta coincidencia está al principio de nuestra cadena y no hemos encontrado ningún carácter en nuestra subcadena en
# el actual index_to_begin_search, incrementamos nuestro given_index en 1 para comenzar a buscar allí.
# En cualquier caso, actualizamos nuestro index_to_begin_search y nuestro pattern_index (si aún no está al comienzo de nuestra subcadena)
else:
if given_index == index_to_begin_search:
given_index += 1
index_to_begin_search = given_index
if pattern_index > 0:
pattern_index = table[pattern_index - 1]
# Nuestro código solo reconoce 'finding' una coincidencia de subcadena cuando incrementamos pattern_index más allá de la longitud del
# subcadena, y esto no puede suceder una vez que llegamos al final de nuestra cadena dada
# Para dar cuenta de esto, simplemente verificamos por separado si el final de nuestra cadena dada coincide con nuestra subcadena
if given_string[-pattern_length:] == pattern:
locations_of_matches.append(str(len(given_string) - pattern_length))
print(' '.join(locations_of_matches))
# CASO DE PRUEBA
# Inicialice todas las variables booleanas y de cadena que usaremos durante nuestra prueba
string_to_check = ''
pattern_to_check = ''
check_ready = False
# Leer cada línea independientemente del stdin
# En las líneas impares, se nos da el patrón para verificar
# En las líneas pares, se nos da la cadena para verificar
for line in sys.stdin:
if not check_ready:
pattern_to_check = line.rstrip('\n')
check_ready = True
else:
string_to_check = line.rstrip('\n')
check_ready = False
kmp_string_search(string_to_check, pattern_to_check) |
py | 1a391fbda3e32d4d4d27f88844930cee9990236e | #Aula 107
#Desafio:
'''
Crie um módulo chamado moeda.py que tenha as funções incorporadas aumentar(), diminuir(), dobro() e metade().
Faça também um programa que importe esse módulo e use algumas dessas funções.
''' |
py | 1a3920bc87aa3a92a82b58aeb927bdc2db949645 | #!/usr/bin/env python
###############################################################################
#
# superimposessemap.py - Superimpose structures according to SSE mapping
#
# File: superimposessemap.py
# Author: Alex Stivala
# Created: August 2008
#
#
# Supermipose in 3D the residues in corresponding SSEs by orthogonal
# transformations (using SVD) using the Bio.PDB.Superimposer module.
#
# $Id: superimposessemap.py 1821 2008-08-18 00:54:56Z astivala $
#
###############################################################################
"""
Using the SSE mapping from soln2ssemap.py, which shows pairs of SSE
sequential (from 1) numbers that correspond to each other, use orthogonal
transormation to superimpose the residues in corresponding SSEs,
calculating RMSD and producing superimposition in a PDB file for visualization.
Requires the ptsecstruct.py module to get secondary structures using
DSSP (or STRIDE) (add directory contianing to to PYTHONPATH).
Note that these must be the same definintions used
to produce the mapping, i.e. that the tableaux database and query
were built with, otherwise it won't realy make sense.
"""
import warnings # so we can suppress the annoying tempnam 'security' warning
import sys,os
import getopt
from time import strftime,localtime
import Bio.PDB
import ptsecstruct
from ptutils import biopdbresid_to_pdbresseq,get_int_icode
from parsessemap import parse_ssemap,SearchMap,QuerySSEMap
from pathdefs import ASTRAL_ROOT
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def get_structure(scopsid, thepdbfile=None):
"""
Get Bio.PDB parsed structure for specified identifier or PDB file.
Parameters:
scopsid - SCOP identifier to get SSEs for; used to locate file
under ASTRAL SCOP hierarchy.
thepdbfile - (default None) if not None, PDB file to get SSEs for,
overriding scopsid.
Return value:
Bio.PDB parsed structure.
"""
if thepdbfile:
pdbfile = thepdbfile
else:
pdbfilename = os.path.join(scopsid[2:4].lower(),
scopsid.lower() + '.ent')
pdbfile = os.path.join(ASTRAL_ROOT, pdbfilename)
parser = Bio.PDB.PDBParser()
structure = parser.get_structure(scopsid, pdbfile)
return structure
def get_sse_nodes(scopsid, thepdbfile=None):
"""
Get SSE definitions in form of PTNode objects
from the supplied SCOP sid using
DSSP. Uses the ptsecstruct.py module, note comments at top of this
module also regarding ensuring the same definitions are used here
as for the actual search.
Parameters:
scopsid - SCOP identifier to get SSEs for; used to locate file
under ASTRAL SCOP hierarchy.
thepdbfile - (default None) if not None, PDB file to get SSEs for,
overriding scopsid.
Return value:
list of PTNode objects represneting the SSEs.
"""
if thepdbfile:
pdbfile = thepdbfile
else:
pdbfilename = os.path.join(scopsid[2:4].lower(),
scopsid.lower() + '.ent')
pdbfile = os.path.join(ASTRAL_ROOT, pdbfilename)
secstruct = ptsecstruct.read_secstruct_from_dssp(pdbfile)
return secstruct.get_sse_tuple_list()
def get_residue_list(model):
"""
Get list of Bio.PDB.Residue objects in supplied Bio.PDB.Model
Parmeters:
model - Bio.PDB.Model object
Return value:
List of Bio.PDB.Residue objects in the model
"""
residue_list = []
for chain in model:
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
residue_list += [ residue for residue in chain.get_unpacked_list()
if Bio.PDB.is_aa(residue) ]
return residue_list
def build_resid_dict(residue_list):
"""
Build dictionary mapping (chainid, pdb_resid) to index in residue_list
for all residues, not just those in this domain.
Parameters:
residue_list - list of Bio.PDB.Residue objects
Return value:
dict of { {chainid,pdb_resseq) : seqindx }
where chainid and pdb_resseq make up
the PDB residue identifier, the pdb_resseq
being string resnum+icode if any e.g.
'60' or '60A', seqindx is the indiex
into sequential list of all residues
residue_list.
"""
pdb_resid_dict = {}
seq_indx = 0
while seq_indx < len(residue_list):
residue = residue_list[seq_indx]
pdb_resid_dict[( ptsecstruct.pdb_chainid_to_stride_chainid(
residue.get_full_id()[2]),
biopdbresid_to_pdbresseq(residue.get_id()) )] = seq_indx
seq_indx += 1
return pdb_resid_dict
def get_matched_residues(matched_sses, query_struct, db_struct):
"""
Given the list of correpsonding SSEs in the two structures, return
list of corresponding Bio.PDB.Residue objects.
Parameters:
matched_sses - list of (A,B) tuples where A and B are
tuples (chain, start_resi, end_resi, type) in
query_struct and db_struct respectively.
query_struct - Bio.PDB.Structure
db_struct - Bio.PDB.Structure
Return value:
tuple (match_query_residues, match_db_residues) of equal length lists of
corresponding Bio.PDB.Residue objects in query and db structs resp.
"""
query_model = query_struct[0] # always using model 0 (TODO)
db_model = db_struct[0] # always using model 0 (TODO)
query_residue_list = get_residue_list(query_model)
query_resid_dict = build_resid_dict(query_residue_list)
db_residue_list = get_residue_list(db_model)
db_resid_dict = build_resid_dict(db_residue_list)
match_query_residues = []
match_db_residues = []
for ((qchain, qstart_resi, qend_resi, qtype),
(dchain, dstart_resi, dend_resi, dtype)) in matched_sses:
try:
start_indx = query_resid_dict[(qchain, qstart_resi)]
except KeyError:
# May be HETATM
while not query_resid_dict.has_key((qchain, qstart_resi)):
qstart_resi = str(get_int_icode(qstart_resi)[0] + 1)
start_indx = query_resid_dict[(qchain, qstart_resi)]
try:
end_indx = query_resid_dict[(qchain, qend_resi)]
except KeyError:
# May be HETATM
while not query_resid_dict.has_key((qchain, qend_resi)):
qend_resi = str(get_int_icode(qend_resi)[0] - 1)
end_indx = query_resid_dict[(qchain, qend_resi)]
query_residues = query_residue_list[start_indx : end_indx + 1]
try:
start_indx = db_resid_dict[(dchain, dstart_resi)]
except KeyError:
# May be HETATM
while not db_resid_dict.has_key((dchain, dstart_resi)):
dstart_resi = str(get_int_icode(dstart_resi)[0] + 1)
start_indx = db_resid_dict[(dchain, dstart_resi)]
try:
end_indx = db_resid_dict[(dchain, dend_resi)]
except KeyError:
# May be HETATM
while not db_resid_dict.has_key((dchain, dend_resi)):
dend_resi = str(get_int_icode(dend_resi)[0] - 1)
end_indx = db_resid_dict[(dchain, dend_resi)]
db_residues = db_residue_list[start_indx : end_indx + 1]
# # if the SSEs are of unequal length, just truncate the longer
# # FIXME: should do something better here, e.g. use residues
# # in middle of SSEs since definitions at ends probably less certain
# if len(db_residues) > len(query_residues):
# db_residues = db_residues[:len(query_residues)]
# elif len(query_residues) > len(db_residues):
# query_residues = query_residues[:len(db_residues)]
# match_query_residues += query_residues
# match_db_residues += db_residues
# # use the first and last residues in each SSE
# # FIXME: should really use projected enpoints on vector
# # to represent the vector actually used to construct tableau
# # as per fit_axis in ptnode.py
# match_query_residues += [query_residues[0], query_residues[-1]]
# match_db_residues += [db_residues[0], db_residues[-1]]
# another dodgy way: just the 'most cetnral' residue (FIXME)
match_query_residues.append(query_residues[len(query_residues)/2])
match_db_residues.append(db_residues[len(db_residues)/2])
assert(len(match_query_residues) == len(match_db_residues))
return (match_query_residues, match_db_residues)
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " +progname + " [-d domainid] [-u query_pdbfile] [-b db_pdbfile] [-o outputdir] \n")
sys.stderr.write(
"-d domainid: use this structure, if more than one in input\n"
"-u query_pdbfile: filename of query PDB file. If not specified then\n"
" identifier is used to find in ASTRAL SCOP hierarchy.\n"
"-b db_pdbfile: filename of database PDB file. If not specfied then\n"
" identifier is used to find in ASTRAL SCOP hierarchy.\n"
" Only valid if there is only one domain (either becuase -d is\n"
" specified or there is only one in the input).\n"
"-o outputdir: directory to write PDB of superimposed structures in.\n"
)
sys.exit(1)
def main():
"""
main for superimposessemap.py
Usage: superimposessemap.py [-d domainid] [-s] [-u query_pdbfile] [-b db_pdbfile] [-o outputdir]
-d domainid: only output for this domain, not all
-u query_pdbfile: filename of query PDB file. If not specified then
identifier is used to find in ASTRAL SCOP hierarchy.
-b db_pdbfile: filename of database PDB file. If not specfied then
identifier is used to find in ASTRAL SCOP hierarchy.
Only valid if there is only one domain (either becuase -d is
specified or there is only one in the input).
-o outputdir: directory to write PDB files of superimposed structures in.
Input is on stdin, the output of soln2ssemap.py,
identifier and score (as per input), then
for each matching a line containing
i and j separated by a space,
one per line (with blank line before next id) e.g.:
d1wiua_ -23.0000
1 1
3 2
8 4
9 5
11 6
14 9
The first SSE number on each line is in the query structure
(specified in header information), the second
is in the db structure (d1wiua_ in example).
Output is RMSD value on stdout, and PDB file(s) in specified directory if -o
specfied.
stdout output format is one result per line, fields whitespace delimited:
identifier score num_sses_matched num_aligned_points rmsd
e.g.
d1t10a_ -40.9999 8 16 16.93
num_aligned_points is number of points used in the superposition,
RMSD is the RMS deviation of those points (in Angstroms).
"""
global verbose
verbose = False
dbdomid = None
query_pdbfile = None
db_pdbfile = None
outputdir = None
try:
opts,args = getopt.getopt(sys.argv[1:], "d:u:b:o:")
except:
usage(os.path.basename(sys.argv[0]))
for opt,arg in opts:
if opt == "-d": # domain id specified, only get this one
dbdomid = arg
elif opt == "-u": # query PDB filename
query_pdbfile = arg
elif opt == "-b": # db PDB filename
db_pdbfile = arg
elif opt == "-o": # output directory
outputdir = arg
else:
usage(os.path.basename(sys.argv[0]))
if len(args) != 0:
usage(os.path.basename(sys.argv[0]))
search_maps = parse_ssemap(sys.stdin)
if (db_pdbfile and not dbdomid and len(search_maps.query_ssemap_list) > 1):
sys.stderr.write("ERROR: -b specified without -d and more than one "
"structure on input\n")
sys.exit(1)
query_sse_nodes = get_sse_nodes(search_maps.queryid, query_pdbfile)
query_structure = get_structure(search_maps.queryid, query_pdbfile)
for query_ssemap in search_maps.query_ssemap_list:
if ((not dbdomid) or (query_ssemap.domid == dbdomid)):
db_sse_nodes = get_sse_nodes(query_ssemap.domid, db_pdbfile)
db_structure = get_structure(query_ssemap.domid, db_pdbfile)
sse_map = query_ssemap.sse_map
if len(sse_map) == 0:
sys.stderr.write('no SSEs matched for ' + query_ssemap.domid +
': skipping\n')
continue
matched_sse_nodes = [(query_sse_nodes[i-1],db_sse_nodes[j-1]) for (i,j) in sse_map]
matched_residues = get_matched_residues(matched_sse_nodes,
query_structure,
db_structure)
# get Carbon alpha atoms for matched residues
query_atoms = [residue['CA'] for residue in matched_residues[0]]
db_atoms = [residue['CA'] for residue in matched_residues[1]]
# get orthogonal transformation to superimpose query and db atoms
superimposer = Bio.PDB.Superimposer()
superimposer.set_atoms(query_atoms, db_atoms)
# get the RMSD for the atoms used to calculate transformation
rmsd = superimposer.rms
sys.stdout.write('%s %8.4f %4d %4d %6.2f\n' %
(query_ssemap.domid,query_ssemap.score,
len(sse_map),
len(matched_residues[0]), rmsd))
if outputdir:
if not os.path.isdir(outputdir):
sys.stderr.write("'" + outputdir + "' is not an existing "
"directory, no output written\n")
else:
# apply the transformation to all db atoms
superimposer.apply(db_structure.get_atoms())
# save aligned structure as PDB file
io = Bio.PDB.PDBIO()
io.set_structure(db_structure)
outpdbfilename = search_maps.queryid.lstrip().rstrip() + \
'_' + \
query_ssemap.domid.lstrip().rstrip() + \
'.pdb'
outpdbfh = open(os.path.join(outputdir,outpdbfilename), 'w')
outpdbfh.write('REMARK generated by ' +
os.path.basename(sys.argv[0]) + '\n')
timestamp = strftime("%d%b%Y %H:%M:%S", localtime())
outpdbfh.write('REMARK on ' + timestamp + '\n')
outpdbfh.write('REMARK \n')
outpdbfh.write('REMARK ' + query_ssemap.domid +
' superimposed on ' + search_maps.queryid +
'\n')
outpdbfh.write('REMARK SCORE = %8.4f\n' % query_ssemap.score)
outpdbfh.write('REMARK NSSES = %4d\n' % len(sse_map))
outpdbfh.write('REMARK NRES = %4d\n' % len(matched_residues[0]))
outpdbfh.write('REMARK RMSD = %6.2f\n' % rmsd)
outpdbfh.write('REMARK \n')
outpdbfh.write('REMARK from:\n')
for cline in search_maps.comment_lines:
outline = cline[:65]
outpdbfh.write('REMARK ' + outline)
if outline[-1] != '\n':
outpdbfh.write('\n')
io.save(outpdbfh)
outpdbfh.close()
if __name__ == "__main__":
warnings.filterwarnings('ignore', 'tempnam', RuntimeWarning)
main()
|
py | 1a392137c527b5f616c18f6e5f8dd35d59d2b693 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from jupyter_core import paths as jpaths
from IPython import paths as ipaths
from ipykernel.kernelspec import install
pjoin = os.path.join
tmp = None
patchers = []
def setup():
"""setup temporary env for tests"""
global tmp
tmp = tempfile.mkdtemp()
patchers[:] = [
patch.dict(os.environ, {
'HOME': tmp,
# Let tests work with --user install when HOME is changed:
'PYTHONPATH': os.pathsep.join(sys.path),
}),
]
for p in patchers:
p.start()
# install IPython in the temp home:
install(user=True)
def teardown():
for p in patchers:
p.stop()
try:
shutil.rmtree(tmp)
except (OSError, IOError):
# no such file
pass
|
py | 1a3921e8d5f209cd7768da92d3493d3002180033 | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="djvu_properties.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
class DjvuProperties(object):
"""Represents properties of djvu file.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'background_color': 'str',
'has_background_color': 'bool',
'pages_count': 'int'
}
attribute_map = {
'background_color': 'BackgroundColor',
'has_background_color': 'HasBackgroundColor',
'pages_count': 'PagesCount'
}
def __init__(self, background_color=None, has_background_color=None, pages_count=None):
"""DjvuProperties - a model defined in Swagger"""
super(DjvuProperties, self).__init__()
self._background_color = None
self._has_background_color = None
self._pages_count = None
if background_color is not None:
self.background_color = background_color
if has_background_color is not None:
self.has_background_color = has_background_color
if pages_count is not None:
self.pages_count = pages_count
@property
def background_color(self):
"""Gets the background_color of this DjvuProperties.
Gets or sets background color.
:return: The background_color of this DjvuProperties.
:rtype: str
"""
return self._background_color
@background_color.setter
def background_color(self, background_color):
"""Sets the background_color of this DjvuProperties.
Gets or sets background color.
:param background_color: The background_color of this DjvuProperties.
:type: str
"""
self._background_color = background_color
@property
def has_background_color(self):
"""Gets the has_background_color of this DjvuProperties.
Gets or sets a value indicating whether background color is used.
:return: The has_background_color of this DjvuProperties.
:rtype: bool
"""
return self._has_background_color
@has_background_color.setter
def has_background_color(self, has_background_color):
"""Sets the has_background_color of this DjvuProperties.
Gets or sets a value indicating whether background color is used.
:param has_background_color: The has_background_color of this DjvuProperties.
:type: bool
"""
if has_background_color is None:
raise ValueError("Invalid value for `has_background_color`, must not be `None`")
self._has_background_color = has_background_color
@property
def pages_count(self):
"""Gets the pages_count of this DjvuProperties.
Gets or sets pages count.
:return: The pages_count of this DjvuProperties.
:rtype: int
"""
return self._pages_count
@pages_count.setter
def pages_count(self, pages_count):
"""Sets the pages_count of this DjvuProperties.
Gets or sets pages count.
:param pages_count: The pages_count of this DjvuProperties.
:type: int
"""
if pages_count is None:
raise ValueError("Invalid value for `pages_count`, must not be `None`")
self._pages_count = pages_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DjvuProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a392218586a381fedc55f6b52a3f05e9b0288f0 | import os
import glob
import sys
import shutil
import pysam
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio import bam
from bcbio import broad
from bcbio.wgbsseq import kits
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
assert data["analysis"].lower().startswith("wgbs-seq"), "No comparible alignment."
config = data["config"]
sample = dd.get_sample_name(data)
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_dir = os.path.join(align_dir, "%s_bismark" % dd.get_lane(data))
if not ref_file:
logger.error("bismark index not found. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners bismark --genomes genome-build-name --data")
sys.exit(1)
final_out = os.path.join(align_dir, "{0}.bam".format(sample))
if file_exists(final_out):
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
bismark = config_utils.get_program("bismark", config)
# bismark uses 5 threads/sample and ~12GB RAM/sample (hg38)
resources = config_utils.get_resources("bismark", data["config"])
max_cores = dd.get_num_cores(data)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) / (1024.0 * 1024.0)
instances = calculate_bismark_instances(max_cores, max_mem * max_cores)
# override instances if specified in the config
if resources and resources.get("bismark_threads"):
instances = resources.get("bismark_threads")
logger.info(f"Using {instances} bismark instances - overriden by resources")
bowtie_threads = 1
if resources and resources.get("bowtie_threads"):
bowtie_threads = resources.get("bowtie_threads")
logger.info(f"Using {bowtie_threads} bowtie threads per bismark instance")
kit = kits.KITS.get(dd.get_kit(data), None)
directional = "--non_directional" if kit and not kit.is_directional else ""
other_opts = resources.get("options", [])
other_opts = " ".join([str(x) for x in other_opts]).strip()
fastq_files = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
safe_makedir(align_dir)
cmd = "{bismark} {other_opts} {directional} --bowtie2 --temp_dir {tx_out_dir} --gzip --parallel {instances} -p {bowtie_threads} -o {tx_out_dir} --unmapped {ref_file} {fastq_file} "
if pair_file:
fastq_file = "-1 %s -2 %s" % (fastq_file, pair_file)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
if not raw_bam:
with tx_tmpdir() as tx_out_dir:
run_message = "Running Bismark aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
shutil.move(tx_out_dir, out_dir)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
# don't process bam in the bismark pipeline!
utils.symlink_plus(raw_bam[0], final_out)
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
def _process_bam(bam_file, in_fastq, sample, reference, config):
broad_runner = broad.runner_from_config(config)
names = {'rg': in_fastq, 'library': 'WGBS_LIB', 'pl': 'Illumina', 'pu': 'R1', 'sm': in_fastq, 'sample': sample}
out_fix_bam = broad_runner.run_fn("picard_fix_rgs", bam_file, names)
order_bam = utils.append_stem(out_fix_bam, "_order")
broad_runner.run_fn("picard_reorder", out_fix_bam, reference, order_bam)
bam.index(order_bam, config)
# order_bam = _set_quality(order_bam)
# bam.index(order_bam, config)
return order_bam
def remap_index_fn(ref_file):
"""Map sequence references to equivalent bismark indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "bismark")
def _set_quality(in_bam):
"""
change all quality to 255
"""
bam = pysam.AlignmentFile(in_bam, "rb")
out_file = utils.append_stem(in_bam, "_normqual")
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out:
with pysam.AlignmentFile(tx_out, "wb", template=bam) as out_handle:
for read in bam.fetch():
read.mapping_quality = 255
out_handle.write(read)
return out_file
def index(ref_file, out_dir, data):
"""Create a bismark index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
bismark = config_utils.find_program("bismark", data["config"])
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a bismark index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
other_opts = config_utils.get_resources("bismark", data["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts]).strip()
cmd = "{bismark} {other_opts} --bowtie2 -p {num_cores} -n 1 -o {tx_out_dir} --basename {sample} --unmapped {ref_file} {in_fastq}"
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def calculate_bismark_instances(cores, memory):
"""
calculate number of parallel bismark instances to run, based on disussion here
https://github.com/FelixKrueger/Bismark/issues/96
cores and memory here are the maximum amounts available for us to use
"""
BISMARK_CORES = 1
BOWTIE_CORES_PER_INSTANCE = 2
SAMTOOLS_CORES_PER_INSTANCE = 1
CORES_PER_INSTANCE = BOWTIE_CORES_PER_INSTANCE + SAMTOOLS_CORES_PER_INSTANCE
GENOME_MEMORY_GB = 12
INSTANCE_MEMORY_GB = 10
available_instance_memory = memory - GENOME_MEMORY_GB
instances_in_memory = max(available_instance_memory / INSTANCE_MEMORY_GB, 1)
available_instance_cores = cores - BISMARK_CORES
instances_in_cores = max(available_instance_cores / CORES_PER_INSTANCE, 1)
instances = int(min(instances_in_memory, instances_in_cores))
logger.info(f"{cores} cores and {memory} memory are available. Spinning up {instances} instances of bismark.")
return instances
|
py | 1a392218bbb2769fc0e453003919ecf7388114d9 | # -*- coding: utf-8 -*-
"""
@Time:2020/8/20 22:25
@Auth"JunLin615
@File:example1.py
@IDE:PyCharm
@Motto:With the wind light cloud light mentality, do insatiable things
@email:[email protected]
"""
from shiningspectrum import pretreatment
from shiningspectrum import database
import os
import matplotlib.pyplot as plt
import numpy as np
from shiningspectrum import shiningnoodles
import time
def main():
time_start = time.time()
data_path=os.getcwd()+"\\Prepare incoming data"
file_data1 = database.read_file(data_path, "氯仿.txt")#67-66-3
file_data2 = database.read_file(data_path, "甲苯.txt")#108-88-3
list_spectrum1 = database.data_extraction(file_data1)
list_spectrum2 = database.data_extraction(file_data2)
list_spectrum_compound = [list_spectrum1[0],list(np.array(list_spectrum1[1])+np.array(list_spectrum2[1]))]
plt.figure(figsize=(10,10))
plt.plot(list_spectrum_compound[0],list_spectrum_compound[1],"k-",label="list_spectrum_compound")
plt.plot(list_spectrum1[0],list_spectrum1[1],"r-",label="list_spectrum1")
plt.plot(list_spectrum2[0],list_spectrum2[1],"b-",label="list_spectrum2")
plt.legend()
#plt.show()
all_spectrum = database.read_all("raman_database")
list_of_compounds = shiningnoodles.shining2noodles(all_spectrum)
#1.1
#unknown_x, unknown_y = shiningnoodles.combine_spectra(list_of_compounds[1], list_of_compounds[3])#noodles会对数据进行插值,减慢运行速度。
#__
#1.2
unknown_x = np.asarray(list_spectrum_compound[0])
unknown_y = np.asarray(list_spectrum_compound[1])
#__
x_data, y_data, y_base = pretreatment.autbaseline(unknown_x, unknown_y, deg=4, max_it=200,tol=None)#shining重构
unknow_compound = {"title":"unkonw","x":x_data,"y":y_data}
#2.1
#A=shiningnoodles.component_testing(peak_algorithm = "noodles")#"shining" 或者 "noodles",前者快,后者准。
#2.2
A = shiningnoodles.component_testing(peak_algorithm="shining")
print("准备启动多进程")
unkonw_peak_center, unknown_peak_assignments, percentages = A.peak_assignment(unknow_compound, list_of_compounds)
cnames = {
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# colors = ['b', 'r', 'g', 'c', 'm', 'y', 'b']
colors = list(cnames.keys())
# fig = plt.figure(figsize=(10, 4), dpi=300)
time_end = time.time()
time_time = time_end - time_start
title_s = 'Elapsed time {}. The sample to be tested contains :'.format(time_time)
lower_confidence_limit = 50 #大于该值认为含有,小于等于该值认为不含有。
for key in percentages:
if percentages[key] > lower_confidence_limit:
title_s = title_s + key + ';'
plt.figure(figsize=(10, 10))
plt.plot(unknown_x, unknown_y, color='black', label='Unknown Spectrum')
for i, _ in enumerate(unkonw_peak_center):
plt.axvline(x=unkonw_peak_center[i], color=colors[i],
label=unknown_peak_assignments[i],
linestyle='--')
plt.legend(loc=0, framealpha=1)
plt.xlabel('Wavenumber (cm$^{-1}$)', fontsize=12)
plt.ylabel('Counts', fontsize=12)
plt.ylim(unknown_y.min(), unknown_y.max())
plt.xlim(unknown_x.min(), unknown_x.max())
plt.title(title_s)
plt.show()
print(percentages)
if __name__ == '__main__':
main() |
py | 1a3922c4b572b70faf0ce9b8ca53211da38267ab | """
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.request import proxy_bypass_environment
import urllib3
import ipaddress
from pfruck_contabo.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy and not should_bypass_proxies(
configuration.host, no_proxy=configuration.no_proxy or ''):
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
# Only set a default Content-Type for POST, PUT, PATCH and OPTIONS requests
if (method != 'DELETE') and ('Content-Type' not in headers):
headers['Content-Type'] = 'application/json'
if query_params:
url += '?' + urlencode(query_params)
if ('Content-Type' not in headers) or (re.search('json',
headers['Content-Type'], re.IGNORECASE)):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
# end of class RESTClientObject
def is_ipv4(target):
""" Test if IPv4 address or not
"""
try:
chk = ipaddress.IPv4Address(target)
return True
except ipaddress.AddressValueError:
return False
def in_ipv4net(target, net):
""" Test if target belongs to given IPv4 network
"""
try:
nw = ipaddress.IPv4Network(net)
ip = ipaddress.IPv4Address(target)
if ip in nw:
return True
return False
except ipaddress.AddressValueError:
return False
except ipaddress.NetmaskValueError:
return False
def should_bypass_proxies(url, no_proxy=None):
""" Yet another requests.should_bypass_proxies
Test if proxies should not be used for a particular url.
"""
parsed = urlparse(url)
# special cases
if parsed.hostname in [None, '']:
return True
# special cases
if no_proxy in [None, '']:
return False
if no_proxy == '*':
return True
no_proxy = no_proxy.lower().replace(' ', '');
entries = (
host for host in no_proxy.split(',') if host
)
if is_ipv4(parsed.hostname):
for item in entries:
if in_ipv4net(parsed.hostname, item):
return True
return proxy_bypass_environment(parsed.hostname, {'no': no_proxy})
|
py | 1a3922d885d3dc4c1fe772290bb1b23e6100feb0 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loops for DP-FTRL."""
import os.path
import pprint
import random
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from dp_ftrl import dp_fedavg
from utils import utils_impl
from tensorboard.plugins.hparams import api as hp
def _setup_outputs(root_output_dir: str, experiment_name: str,
hparam_dict: Dict[str, Any]):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
program_state_dir = os.path.join(root_output_dir, 'checkpoints',
experiment_name)
program_state_mngr = tff.program.FileProgramStateManager(program_state_dir)
logging_mngr = tff.program.LoggingReleaseManager()
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.program.CSVFileReleaseManager(
file_path=csv_file, key_fieldname='round_num')
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tensorboard_mngr = tff.program.TensorBoardReleaseManager(summary_logdir)
if hparam_dict:
summary_writer = tf.summary.create_file_writer(summary_logdir)
hparam_dict['metrics_file'] = csv_file
hparams_file = os.path.join(results_dir, 'hparams.csv')
utils_impl.atomic_write_series_to_csv(hparam_dict, hparams_file)
with summary_writer.as_default():
hp.hparams({k: v for k, v in hparam_dict.items() if v is not None})
logging.info('Writing...')
logging.info(' program state to: %s', program_state_dir)
logging.info(' metrics csv to: %s', csv_file)
logging.info(' summaries to: %s', summary_logdir)
return program_state_mngr, [logging_mngr, metrics_mngr, tensorboard_mngr]
def _write_metrics(metrics_mngrs, metrics, round_num):
"""Atomic metrics writer which inlines logic from MetricsHook class."""
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
for metrics_mngr in metrics_mngrs:
metrics_mngr.release(metrics, round_num)
def run(
iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int, int], Tuple[List, int]], # pylint: disable=g-bare-generic
validation_fn: Callable[[Any], Dict[str, float]],
total_epochs: int,
total_rounds: int,
experiment_name: str,
train_eval_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
hparam_dict: Optional[Dict[str, Any]] = None,
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50,
rounds_per_train_eval: Optional[int] = 100,
server_state_epoch_update_fn: Optional[Callable[
[dp_fedavg.ServerState], dp_fedavg.ServerState]] = None):
"""Runs federated training for a given `tff.templates.IterativeProcess`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
Args:
iterative_process: A `tff.templates.IterativeProcess` instance to run.
client_datasets_fn: Function accepts integer arguments (the round number and
the epoch) and returns a tuple of a list of client datasets to use as data
data for that round, and the updated epoch index.
validation_fn: A callable accepting the `model` attribute of the iterative
process state and returning a dict of evaluation metrics. Used to compute
validation metrics throughout the training process.
total_epochs: Nubmer of total epochs if using `ClientIDShuffler` to shuffle
clients. Use 0 when sampling clients and control by `total_rounds`.
total_rounds: The number of federated training rounds to perform. If
`ClientIDShuffler` is used for `client_datasets_fn`, the total rounds will
take the minimum of `total_rounds` and rounds_per_epoch*`total_epochs`.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
train_eval_fn: An optional callable accepting the `model` attribute of the
iterative process state and returning a dict of evaluation metrics. Used
to compute training metrics over the entire training dataset throughout
the course of the iterative process. If set to `None`, no such evaluation
is done.
test_fn: An optional callable accepting the `model` attribute of the
iterative process state and returning a dict of test metrics. Used to
compute test metrics at the end of the training process.
root_output_dir: The name of the root output directory for writing
experiment outputs.
hparam_dict: An optional dictionary specifying hyperparameters of the
experiment. If provided, the hyperparameters will be written to CSV.
rounds_per_eval: How often to compute validation metrics.
rounds_per_checkpoint: How often to checkpoint the iterative process state.
If you expect the job to restart frequently, this should be small. If no
interruptions are expected, this can be made larger.
rounds_per_train_eval: How often to compute metrics over the entire training
dataset. Note that this is only done if a `train_eval_fn` argument is
supplied.
server_state_epoch_update_fn: A function to update the `SeverState` outside
of TFF iterative process. It is called at the beginning of each epoch
traversing all the clients. Used to restart tree for FTRL algorithm.
Returns:
The final `state` of the iterative process after training.
"""
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise TypeError('iterative_process should be type '
'`tff.templates.IterativeProcess`.')
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if train_eval_fn is not None and not callable(train_eval_fn):
raise TypeError('train_eval_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
program_state_mngr, metrics_mngrs = _setup_outputs(root_output_dir,
experiment_name,
hparam_dict)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = program_state_mngr.load_latest(initial_state)
# TODO(b/172867399): we disable restarting from checkpoint when shuffling
# client IDs by epochs. Non-trivial amount of change has to be made to make
# sure disjoint clients are used cross rounds when restarts. A better design
# of client dataset generator with random seed instead of `client_datasets_fn`
# accepting `epoch` as argument, can help.
epoch = 0 if total_epochs > 0 else -1
if state is None or total_epochs > 0:
state = initial_state
round_num = 0
logging.info('Initializing experiment from scratch at round %d.', round_num)
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1 # Increment to avoid overwriting current checkpoint
loop_start_time = time.time()
while epoch < total_epochs and round_num < total_rounds:
data_prep_start_time = time.time()
prev_epoch = epoch
federated_train_data, epoch = client_datasets_fn(round_num, epoch)
# Server state is updated outside of TFF iterative process, which is used
# to restart the tree in DP-FTRL.
if server_state_epoch_update_fn is not None and epoch == prev_epoch + 1:
logging.info('External server state update at epoch %d', epoch)
state = server_state_epoch_update_fn(state)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
state, _ = iterative_process.next(state, federated_train_data)
train_metrics['training_secs'] = time.time() - training_start_time
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, (time.time() - loop_start_time) / (round_num + 1)))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
try:
program_state_mngr.save(state, round_num)
except Exception: # pylint: disable=broad-except
logging.info('Checkpoint saving exception: %s', Exception)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if train_eval_fn and round_num % rounds_per_train_eval == 0:
# Compute metrics over the entire training dataset
train_eval_start = time.time()
train_eval_metrics = train_eval_fn(state.model)
train_eval_metrics['evaluate_secs'] = time.time() - train_eval_start
metrics['train_eval'] = train_eval_metrics
if round_num % rounds_per_eval == 0:
# Compute validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state.model)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngrs, metrics, round_num)
round_num += 1
# Final metrics evaluation once the training has completed
metrics = {}
# Validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state.model)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
# Training set metrics
if train_eval_fn:
train_eval_start = time.time()
train_eval_metrics = train_eval_fn(state.model)
train_eval_metrics['evaluate_secs'] = time.time() - train_eval_start
metrics['train_eval'] = train_eval_metrics
# Test set metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(state.model)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngrs, metrics, round_num)
return state
class ClientIDShuffler(object):
"""Shuffling clients in federated learning for DP-FTRL."""
def __init__(self,
clients_per_round: int,
client_data: tff.simulation.datasets.ClientData,
drop_remainder: bool = True):
self._client_ids = list(client_data.client_ids)
self._clients_per_round = clients_per_round
self._drop_remainder = drop_remainder
self._epoch = 0
self._start_index = 0
def _shuffle_client_ids(self):
random.shuffle(self._client_ids)
self._start_index = 0
self._epoch += 1
def sample_client_ids(self, round_num: int, epoch: int) -> Tuple[List, int]: # pylint: disable=g-bare-generic
"""Returns sampled client IDs and the updated epoch index.
This function can be used as `client_datasets_fn` in `training_loop.run`.
Args:
round_num: the current round index.
epoch: the current epoch index.
"""
if epoch != self._epoch:
raise ValueError(
'Epoch index for client shuffling does not match: {} vs {}'.format(
epoch, self._epoch))
end_index = min(self._start_index + self._clients_per_round,
len(self._client_ids))
sampled_ids = self._client_ids[self._start_index:end_index]
skip_remainder_flag = (
self._drop_remainder and
(end_index + self._clients_per_round) > len(self._client_ids))
if skip_remainder_flag or end_index >= len(self._client_ids):
logging.info(
'shuffling clients at epoch %d, round %d, client start index %d',
epoch, round_num, self._start_index)
self._shuffle_client_ids()
else:
self._start_index = end_index
return sampled_ids, self._epoch
|
py | 1a39232a0a2c0c915dbae603c6c08c6c04744abc | # -*- coding: utf-8 -*-
#
# Copyright 2018-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating external connectors
"""
import json
import sys
from .world import world, setup_module, teardown_module, show_doc, show_method
from . import create_source_steps as source_create
from . import create_external_steps as connector_create
class TestExternalConnector(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating an external connector:
Given I create an external connector from environment vars
And I wait until the external connector is ready less than <conn_wait> secs
And I update the external connector with args <args>
And the external connector has arguments <args>
# And I create a source from the external connector id
# Then the source has arguments "<source_args>"
"""
show_doc(self.test_scenario1)
headers = ["conn_wait", "args"]
examples = [
['20', '{"name": "my connector name"}']]
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
connector_create.i_create_external_connector(self)
connector_create.the_external_connector_is_finished(
self, example["conn_wait"])
connector_create.i_update_external_connector_with(
self, example["args"])
connector_create.the_external_connector_is_finished(
self, example["conn_wait"])
connector_create.external_connector_has_args(
example["args"])
"""
args = {"source": "postgresql",
"externalconnector_id": world.external_connector["resource"][18:],
"query": "SELECT * FROM public.iris"}
source_create.i_create_using_connector(self, \
{"source": "postgresql",
"externalconnector_id": world.external_connector["resource"][18:],
"query": "SELECT * FROM public.iris"})
source_create.the_source_is_finished(self, example[3])
source_create.source_has_args(self, json.dumps({"external_data": args}))
"""
|
py | 1a39239ec79f9ff3d11c4a8e7d579388166b5415 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
__version__ = '12.8'
|
py | 1a39239f89710d280f27ec3b394141f8ec573114 | # -*- coding: utf-8 -*-
"""
@date: 2021/7/20 下午10:27
@file: operation.py
@author: zj
@description:
"""
import os
import torch
from zcls.config.key_word import KEY_OUTPUT
from slim.config import cfg
from slim.model.build import build_model
from slim.prune.build import build_prune
from slim.util.profile import computer_flops_and_params, compute_model_time
def load_model(config_file, data_shape=(1, 3, 224, 224), device=torch.device('cpu')):
cfg.merge_from_file(config_file)
model = build_model(cfg).to(device)
# print(model)
computer_flops_and_params(model)
compute_model_time(data_shape, model, device)
return model, cfg.MODEL.RECOGNIZER.NAME
def prune_model(arch_name, model, ratio=0.2, minimum_channels=8, divisor=8):
pruned_ratio, threshold = build_prune(model,
model_name=arch_name,
ratio=ratio,
minimum_channels=minimum_channels,
divisor=divisor,
)
computer_flops_and_params(model)
compute_model_time((1, 3, 224, 224), model, torch.device('cpu'))
return model, pruned_ratio, threshold
def save_model(model, model_name):
data = torch.randn(1, 3, 224, 224)
res = model(data)[KEY_OUTPUT]
print(res.shape)
output_dir = os.path.split(os.path.abspath(model_name))[0]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(model, model_name) |
py | 1a392485ba24dd19eb40981dda8e3e4f33295394 | # Generated by Django 2.1.15 on 2020-09-11 08:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | 1a3925378a51ff915f65f6c0a7c2f315f4170bba | from __future__ import division
import numpy as np
from warnings import warn
__all__ = ['img_as_float', 'img_as_int', 'img_as_uint', 'img_as_ubyte',
'img_as_bool', 'dtype_limits']
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.float32: (-1, 1),
np.float64: (-1, 1)}
integer_types = (np.uint8, np.uint16, np.int8, np.int16)
_supported_types = (np.bool_, np.bool8,
np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float32, np.float64)
if np.__version__ >= "1.6.0":
dtype_range[np.float16] = (-1, 1)
_supported_types += (np.float16, )
def dtype_limits(image, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
Parameters
----------
image : ndarray
Input image.
clip_negative : bool
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def convert(image, dtype, force_copy=False, uniform=False):
"""
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
uniform : bool
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
References
----------
(1) DirectX data conversion rules.
http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
(2) Data Conversions.
In "OpenGL ES 2.0 Specification v2.0.25", pp 7-8. Khronos Group, 2010.
(3) Proper treatment of pixels as integers. A.W. Paeth.
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
(4) Dirty Pixels. J. Blinn.
In "Jim Blinn's corner: Dirty Pixels", pp 47-57. Morgan Kaufmann, 1998.
"""
image = np.asarray(image)
dtypeobj = np.dtype(dtype)
dtypeobj_in = image.dtype
dtype = dtypeobj.type
dtype_in = dtypeobj_in.type
if dtype_in == dtype:
if force_copy:
image = image.copy()
return image
if not (dtype_in in _supported_types and dtype in _supported_types):
raise ValueError("can not convert %s to %s." % (dtypeobj_in, dtypeobj))
def sign_loss():
warn("Possible sign loss when converting negative image of type "
"%s to positive image of type %s." % (dtypeobj_in, dtypeobj))
def prec_loss():
warn("Possible precision loss when converting from "
"%s to %s" % (dtypeobj_in, dtypeobj))
def _dtype(itemsize, *dtypes):
# Return first of `dtypes` with itemsize greater than `itemsize`
return next(dt for dt in dtypes if itemsize < np.dtype(dt).itemsize)
def _dtype2(kind, bits, itemsize=1):
# Return dtype of `kind` that can store a `bits` wide unsigned int
c = lambda x, y: x <= y if kind == 'u' else x < y
s = next(i for i in (itemsize, ) + (2, 4, 8) if c(bits, i * 8))
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
# Scale unsigned/positive integers from n to m bits
# Numbers can be represented exactly only if m is a multiple of n
# Output array is of same kind as input.
kind = a.dtype.kind
if n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
prec_loss()
if copy:
b = np.empty(a.shape, _dtype2(kind, m))
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
casting='unsafe')
return b
else:
a //= 2**(n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of n bits
if copy:
b = np.empty(a.shape, _dtype2(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = np.array(a, _dtype2(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of n bits,
# then downscale with precision loss
prec_loss()
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype2(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2**(o - m)
return b
else:
a = np.array(a, _dtype2(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2**(o - m)
return a
kind = dtypeobj.kind
kind_in = dtypeobj_in.kind
itemsize = dtypeobj.itemsize
itemsize_in = dtypeobj_in.itemsize
if kind == 'b':
# to binary image
if kind_in in "fi":
sign_loss()
prec_loss()
return image > dtype_in(dtype_range[dtype_in][1] / 2)
if kind_in == 'b':
# from binary image, to float and to integer
result = image.astype(dtype)
if kind != 'f':
result *= dtype(dtype_range[dtype][1])
return result
if kind in 'ui':
imin = np.iinfo(dtype).min
imax = np.iinfo(dtype).max
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_in == 'f':
if np.min(image) < -1.0 or np.max(image) > 1.0:
raise ValueError("Images of type float must be between -1 and 1.")
if kind == 'f':
# floating point -> floating point
if itemsize_in > itemsize:
prec_loss()
return image.astype(dtype)
# floating point -> integer
prec_loss()
# use float type that can represent output integer type
image = np.array(image, _dtype(itemsize, dtype_in,
np.float32, np.float64))
if not uniform:
if kind == 'u':
image *= imax
else:
image *= imax - imin
image -= 1.0
image /= 2.0
np.rint(image, out=image)
np.clip(image, imin, imax, out=image)
elif kind == 'u':
image *= imax + 1
np.clip(image, 0, imax, out=image)
else:
image *= (imax - imin + 1.0) / 2.0
np.floor(image, out=image)
np.clip(image, imin, imax, out=image)
return image.astype(dtype)
if kind == 'f':
# integer -> floating point
if itemsize_in >= itemsize:
prec_loss()
# use float type that can exactly represent input integers
image = np.array(image, _dtype(itemsize_in, dtype,
np.float32, np.float64))
if kind_in == 'u':
image /= imax_in
# DirectX uses this conversion also for signed ints
#if imin_in:
# np.maximum(image, -1.0, out=image)
else:
image *= 2.0
image += 1.0
image /= imax_in - imin_in
return image.astype(dtype)
if kind_in == 'u':
if kind == 'i':
# unsigned integer -> signed integer
image = _scale(image, 8 * itemsize_in, 8 * itemsize - 1)
return image.view(dtype)
else:
# unsigned integer -> unsigned integer
return _scale(image, 8 * itemsize_in, 8 * itemsize)
if kind == 'u':
# signed integer -> unsigned integer
sign_loss()
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize)
result = np.empty(image.shape, dtype)
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
return result
# signed integer -> signed integer
if itemsize_in > itemsize:
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize - 1)
image = image.astype(_dtype2('i', itemsize * 8))
image -= imin_in
image = _scale(image, 8 * itemsize_in, 8 * itemsize, copy=False)
image += imin
return image.astype(dtype)
def img_as_float(image, force_copy=False):
"""Convert an image to double-precision floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float64
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
"""
return convert(image, np.float64, force_copy)
def img_as_uint(image, force_copy=False):
"""Convert an image to 16-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of uint16
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 65535.
"""
return convert(image, np.uint16, force_copy)
def img_as_int(image, force_copy=False):
"""Convert an image to 16-bit signed integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of uint16
Output image.
Notes
-----
The values are scaled between -32768 and 32767.
If the input data-type is positive-only (e.g., uint8), then
the output image will still only have positive values.
"""
return convert(image, np.int16, force_copy)
def img_as_ubyte(image, force_copy=False):
"""Convert an image to 8-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of ubyte (uint8)
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 255.
"""
return convert(image, np.uint8, force_copy)
def img_as_bool(image, force_copy=False):
"""Convert an image to boolean format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of bool (`bool_`)
Output image.
Notes
-----
The upper half of the input dtype's positive range is True, and the lower
half is False. All negative values (if present) are False.
"""
return convert(image, np.bool_, force_copy)
|
py | 1a3925f6add7fb618bd9766fcbcbf8f1d522d426 | # -*- coding: utf-8 -*-
import re
from mobify.source import MobifySource
class DziejePlSource(MobifySource):
HEADER = u"""
<h1>{title}</h1>
<p><strong>{lead}</strong></p>
<p><small>{author}</small><br></p>
"""
FOOTER = u"""
<br><br>
<hr>
<p><small>
Wszelkie materiały (w szczególności depesze agencyjne, zdjęcia, grafiki, filmy) zamieszczone w niniejszym
Portalu chronione są przepisami ustawy z dnia 4 lutego 1994 r. o prawie autorskim i prawach pokrewnych oraz ustawy
z dnia 27 lipca 2001 r. o ochronie baz danych. Materiały te mogą być wykorzystywane wyłącznie na postawie stosownych
umów licencyjnych. Jakiekolwiek ich wykorzystywanie przez użytkowników Portalu, poza przewidzianymi przez przepisy
prawa wyjątkami, w szczególności dozwolonym użytkiem osobistym, bez ważnej umowy licencyjnej jest zabronione.
</small></p>
<p><small><strong>Źródło</strong>: <a href="{url}">{url}</a></small></p>
"""
@staticmethod
def is_my_url(url):
return '//dzieje.pl/' in url
def get_inner_html(self):
article = self.xpath('//*[@property="schema:text"]')
# clean up the HTML
xpaths = [
'blockquote[p]'
]
article = self.remove_nodes(article, xpaths)
html = self.get_node_html(article)
return html
def get_html(self):
# add a title and a footer
return '\n'.join([
self.HEADER.format(title=self.get_title(), author=self.get_author(), lead=self.get_lead()).strip(),
self.get_inner_html(),
self.FOOTER.format(url=self._url).strip()
]).strip()
def get_title(self):
# <meta property="og:title" content="Radio w Poznaniu rozpoczęło nadawanie 90 lat temu" />
return self.get_node('//meta[@property="og:title"]', attr='content').strip()
def get_lead(self):
# <meta property="og:description" content="90 lat temu, 24 kwietnia 1927 roku nadawanie rozpoczęła..." />
lead = self.get_node('//meta[@property="og:description"]', attr='content').strip()
return lead.strip() if lead else ''
def get_author(self):
return 'dzieje.pl'
def get_language(self):
return 'pl'
|
py | 1a3926b835ea2684307d47f2d85fef172c99f1d2 | import atexit
import configparser
import telegram
import time
from binance.client import Client
from datetime import datetime
#Bot and config instances
config = None
telegram_bot = None
binance_bot = None
#General variables
refresh_rate = -1
chat_id = -1
time_started = -1
#Order-related variables
orders = []
send_open = False
send_closed = False
def start():
init()
process()
def init():
#Print starting string, and save the current time
print("Starting your bot...\n")
global time_started
time_started = str(datetime.now())
#Initialize the config parser and bots
initConfig()
initTelegram()
initBinance()
#Print and send success messages
print("\nBot started successfully... Beginning processing...\n")
telegram_bot.send_message(chat_id=chat_id, text=("Your bot instance (" + time_started + ") has started. Monitoring has begun."))
def initConfig():
#Initialize the config file
global config
config = configparser.ConfigParser()
config.read("config.ini")
#Raise an error if it cannot import the configuration datasets
if('GENERAL' in config):
global refresh_rate, send_open, send_closed
refresh_rate = config['GENERAL']['refresh_rate']
send_open = config['GENERAL'].getboolean('update_open')
send_closed = config['GENERAL'].getboolean('update_closed')
else:
raise ValueError("Cannot find the 'General' dataset in your config file.")
def initTelegram():
#Telegram
if('TELEGRAM' in config):
#Initialize the Telegram bot
global telegram_bot, chat_id
telegram_bot = telegram.Bot(token=config['TELEGRAM']['token'])
chat_id = config['TELEGRAM']['chat_id']
#Fetches and prints bot ID to ensure valid token
try:
print("Your Telegram API information is valid (Bot ID: {})!".format(telegram_bot.get_me().id))
except:
print("Your Telegram API information is invalid.")
else:
raise ValueError("Cannot find the 'Telegram' dataset in your config file.")
def initBinance():
#Binance
if('BINANCE' in config):
#Initialize the Binance bot
global binance_bot
binance_bot = Client(config['BINANCE']['key'], config['BINANCE']['secret'])
#Fetches your BTC address to test successful API information
btc_address = binance_bot.get_deposit_address(asset='BTC')
if(btc_address.get("success") == True):
print("Your Binance API information is valid!")
else:
print("Your Binance API information is invalid.")
else:
raise ValueError("Cannot find the 'Binance' dataset in your config file.")
def process():
while(1):
#Fetches all open orders
open_orders = binance_bot.get_open_orders()
#Iterate through all orders fetched from Binance and append any new orders
for order in open_orders:
if not order in orders:
addOrder(order)
#Iterate through all orders in our own list and remove any orders not on Binance anymore
for order in orders:
if not order in open_orders:
closeOrder(order)
#Sleep for refresh_rate amount of seconds
time.sleep((int(refresh_rate)*60))
def addOrder(order):
#Add the order to the global list
global orders
orders.append(order)
#Send a message to Telegram if enabled in the config
if(send_open):
msg = "*{} Order Created*\n\n*Symbol*: {}\n*Price*: {}\n*Quantity*: {}".format(order.get("side").capitalize(), order.get("symbol"), order.get("price"), order.get("origQty"))
telegram_bot.send_message(chat_id=chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
def closeOrder(order):
#Remove the order from the global list
global orders
orders.remove(order)
#Send a message to Telegram if enabled in the config
if(send_closed):
msg = "*{} Order Closed*\n\n*Symbol*: {}\n*Price*: {}\n*Quantity*: {}".format(order.get("side").capitalize(), order.get("symbol"), order.get("price"), order.get("origQty"))
telegram_bot.send_message(chat_id=chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
@atexit.register
def exit():
#Send an "exiting bot" message before exiting script
telegram_bot.send_message(chat_id=chat_id, text=("Your bot instance (" + time_started + ") has exited. Monitoring has stopped."))
print("Bot has exited successfully...")
if(__name__ == "__main__"):
start() |
py | 1a3926cac5a8a4fcdc4c4fa6679e46964b5c707c | # Generated by Django 2.0.3 on 2018-03-16 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vigil', '0036_remove_alertchannel_old_logic_actions'),
]
operations = [
migrations.RemoveField(
model_name='channellogicaction',
name='alert_channel',
),
migrations.RemoveField(
model_name='channellogicaction',
name='logic_action',
),
migrations.AddField(
model_name='logicalertaction',
name='notification_actions',
field=models.ManyToManyField(blank=True, null=True, to='vigil.NotificationAlertAction'),
),
migrations.DeleteModel(
name='ChannelLogicAction',
),
]
|
py | 1a3927a8b456479f00883eaa3d7f53caa6cb1194 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from airflow.providers.amazon.aws.secrets.ssm import AwsSsmSecretsBackend
@mock.patch("airflow.providers.amazon.aws.secrets.ssm.AwsSsmSecretsBackend.get_conn_uri")
def test_aws_ssm_get_connections(mock_get_uri):
mock_get_uri.side_effect = ["scheme://user:pass@host:100"]
conn_list = AwsSsmSecretsBackend().get_connections("fake_conn")
conn = conn_list[0]
assert conn.host == 'host'
|
py | 1a39283f907da7a2280fdbf32619bcf78b3a1ede | import datetime
import logging
import requests
import auditUtils
import constants
import dbUtil
import rds_config
import restUtils
import tables
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SUCCESS_RESPONSE = {"statusCode": 200}
def prepUserSettings(conn, chatId, userId, app):
setting = getUserSettings(conn, chatId, app)
# logger.info(setting)
if setting is None:
# logger.info("creating setting")
createSetting(conn, chatId, userId, app)
setting = getUserSettings(conn, chatId, app)
return setting
def getUserSettings(conn, chatId, app):
sql = "select * "
sql += " from " + tables.tgsetting
sql += " where chatId = %s and app=%s "
sql += " limit 1"
# logger.info(sql)
setting = dbUtil.getSingleRecordNoJsonWithConn(sql, conn, (chatId, app))
# logger.info(setting)
return setting
def createSetting(conn, chatId, userId, app):
sql = "insert into " + tables.tgsetting
sql += " (chatId, userId, app, lastUpdated) "
sql += " values (%s, %s, %s, %s) "
conn.cursor().execute(sql, (chatId, userId, app, datetime.datetime.now()))
conn.commit()
def reset(conn, chatId, app):
# conn = dbUtil.getConnection()
sql = "update " + tables.tgsetting
sql += " set address=null, favPools=null, addressAlias=null, "
sql += " lastUpdated=%s where chatId=%s and app=%s "
conn.cursor().execute(sql, (datetime.datetime.now(), chatId, app))
conn.commit()
# conn.close()
def updateSetting(conn, chatId, attribute, value, app):
logger.info("in update setting for attributes")
logger.info((chatId, attribute, value))
sql = "update " + tables.tgsetting
sql += " set " + attribute + "=%s, "
sql += " lastUpdated=%s where chatId=%s and app=%s "
logger.info(sql)
params = (value, datetime.datetime.now(), chatId, app)
# logger.info(params)
conn.cursor().execute(sql, params)
conn.commit()
def getParam(message, key, error):
msg = message.strip()
if not message.startswith(key):
return error
poolMsg = message[len(key):]
# logger.info(poolMsg)
return poolMsg.strip()
def getStakingNetworkInfo():
url = "https://hmny-t.co/networks/mainnet/staking_network_info"
return restUtils.callRestJson(url)
def getBidRanks(current):
response = getStakingNetworkInfo()
# logger.info(response)
if current:
table = response["table"]
return table
else:
table = response["live_table"]
return table
def getValByAddress(validators, address):
# logger.info("in getValByAddress: address: {})".format(address))
for val in validators:
if val["address"] == address:
return val
return None
def validateAndGetText(chatId, message, startTime):
if "text" not in message:
return None, missingTextError(chatId, message, startTime)
textCmd = str(message["text"])
# if not textCmd.startswith("/"):
# return None, SUCCESS_RESPONSE
if "@" in textCmd:
textCmd = textCmd.replace(constants.TG_BOT_NAME_1, "")
textCmd = textCmd.replace(constants.TG_BOT_NAME_2, "")
return textCmd, None
def missingTextError(chatId, message, startTime):
logger.info("processing missingTextError for message: ")
logger.info(message)
conn = dbUtil.getConnection()
auditUtils.auditBot(conn, chatId, "Missing Text", startTime)
conn.close()
response = "The bot appears to have hit a bug. Please contact the bot operator @bigb4ever \n"
respData = {"text": response.encode("utf8"), "chat_id": chatId}
url = rds_config.TG_BASE_URL + "/sendMessage?parse_mode=html"
requests.post(url, respData)
return SUCCESS_RESPONSE
def getAllAddressesForTxAlert(conn, app):
sql = "select address from " + tables.tgsetting
sql += " where app = %s and "
sql += " address is not null and notifyAddress='True'"
addresses = dbUtil.listResultsWithConn(sql, conn, app)
uniqueAddresses = set()
for addressDetails in addresses:
logger.info("processing address: {}".format(addressDetails))
address = addressDetails["address"]
addressList = address.split(",")
for value in addressList:
uniqueAddresses.add(value)
logger.info("after adding list ({}), unique addresses are: {}".format(addressList, uniqueAddresses))
logger.info("list of unique addresses is: {}".format(uniqueAddresses))
return uniqueAddresses
|
py | 1a3928510818b399780ffc80548f8196536cde89 | # -*- coding: utf-8 -*-
START_BUTTONS = ("❓ Id", "⏱ Time", "🙈 Memes", "🎭 Send feedback", "🌤 Weather", "👥 Support", "🔗 Link shortner", "✒️ Calculate", "🎧 Mp3Tag", "📡 IP Geolocation", "◻️ QR Code", "💵 Exchange rate") # SPLITS 3 PER LINE
language = { "en" : {
"Assalomu_alaykum_user" : "Assalomu alaykum!\n\nUshbu bot orqali siz taksi va yuk tashish mashinalarga buyurtmalar berishingiz mumkin!\n\n🔵 Siz qanday mashinaga buyurtma bermoqchisiz?",
"Enter_all" : "Hamma ma'lumotlarni to'liq kiritng! 📝",
"Where" : " qayerga borishi kerak? ⬅️\nYoki qayerdan qayerga borishi kerak? 🔁\n\nManzillarni kiriting:",
"What" : " nima olib kelishi kerak? ⬅️\nYoki nima olib va yetkazib berishi kerak? 🔁",
"Yuborish" : "chilarga buyurtmangizni yuborasizmi?",
"Yes_send_it" : "Ha, yuborish",
"No_cancel_it" : "Yo'q, bekor qilish",
"Bekor_qilish_user" : "🔵 Unda siz qanday mashinaga buyurtma bermoqchisiz?",
"Main_user" : "🔵 Siz qanday mashinaga buyurtma bermoqchisiz?",
"Settings_user" : "Sozlamalarda siz tilni o'zgartirishingiz mumkin!",
"Help_message_user" : "Assalomu alaykum.\n\nUshbu telegram bot orqali siz buyurtmachi bo'lsangiz taxi va yuk tashish mashinalariga buyurtma berishingiz mumkin.\n\nAgar taxi yoki yuk tashish mashinasini egasi bo'lsangiz ushbu telegram bot orqali buyurtmalar qabul qilishingiz mumkin.\nBuning uchun ishchilarning kodni kiriting:",
"Wait_user" : "⏳ Haydovchilardan javobni kuting!",
"Resend_user" : "Agar yaqin orada sizga javob kelmasa unda haydovchilar band yoki buyurtma ma'qul bo'lmagan yoki ishda emas bo'lishadi.",
"Resend_button_user" : "Qaytadan buyurtma berish",
"Price_user" : "Narx: ",
"Currency_user" : " so'm",
"Dont_care" : "♾ Farqisi yo'q ♾",
"Register_user" : "Ro'yhatdan o'tish",
"Assalomu_alaykum_ishchi" : "Assalomu alaykum!\nUshbu bot orqali siz taksi yoki yuk tashish mashinangizga buyurtmalar olishingiz mumkin!",
"Not_in_list_ishchi" : "Siz bizning ro'yhatda xali yo'q ekansiz! Ro'yhatdan o'ting!",
"Register_ishchi" : "Ro'yhatdan o'tish",
"Settings_ishchi" : "Sozlamalarda siz tilni o'zgartirishingiz mumkin!",
"Start_register" : "Taxi yoki yuk tashish mashinangizni ushbu botda ro'yhatdan o'tkazing!",
"Registration_car_ishchi" : "Mashinangizni ro'yhatdan o'tkazish uchun mashinangizni turini tanlang:",
"Registration_car_number_ishchi" : "Mashinangizni davlat raqamini kiriting:",
"Car_number_order_ishchi" : "Ushbu tartibda kiriting: '60A600AA'",
"Enter_name_ishchi" : "Endi ismingizni kiriting:",
"Ishda_ishchi" : "Xozirda siz Ishdasiz!",
"Tanaffus_ishchi" : "Xozir sizda Tanaffus!",
"Ishni_boshlash" : "Ishni boshlash",
"Tanaffus_qilish" :"Tanaffus qilish",
"Bajargan_ishlarim" : "Bajargan ishlarim",
"Bajarilgan_ishlar" : "Bajargan ishlarim:",
"Bajarilgan_ishlar_not" : "Sizda bajargan ishlaringiz xali beri yo'q!",
"Back_to_menu" : "Asosiy menyuga qaytish.",
"Send_button" : "Yuborish",
"Message_is_deleted" : "Buyurtma sizdan o'chirildi ✖️",
"Sana" : "Sana: ",
"Vaqti" : "\nVaqti: ",
"Narxi" : "\nNarxi: ",
"Buyurtma" : "\nBuyurtma: ",
"Manzili" : "\nManzili: ",
"IshchiRegistration" : "Yuk tashish avtomobilingizni ushbu botda ro'yhatdan o'tkazing",
"IshchiRegistration_car" : ":",
"START_MSG" : "Hey! 😊 \n \nWelcome to the *ZigZagBot*! 😱🚀 \nDeveloped by @WebShark25! \n \nAll bot commands: \n💢 _/help_ - Get help message \n💢 _/time <city>_ - Gets current time in any timezone! \n💢 _/calc_ - Lets do some maths \n💢 _/support_ - Chat with us! \n💢 _/sendcontact_ - Forward contact to admin \n💢 _Send feedback_ - Send feedback! \n💢 _/echo <msg>_ - Echoes the message \n💢 _/short <link>_ - Shorts the link! \n💢 _/weather <city>_ - Gets weather! \n💢 _/mp3tag <artist>||<title>_ - Edits audios tags! \n💢 _/tocontact <phone>||<name>_ - Turns string to Telegram contact! \n💢 _/qrcode <text>_ - QR Code creator!! \n💢 _/ip <IP/Hostname>_ - Get IP location & more! \n💢 _/rate <currency>_ - Get latest exchange rates! \n💢 _/addcounter add_ - Add seen counter to your message! \n💢 _/lmgtfy <text>_ - Let me google that for you! \n💢 _/download <link>_ - Download a file and send it using telegram! \n💢 _/addreply <syntax>_ - Learn the bot how to respond! \n💢 _/id_ - Get your ID & Group's ID \n \n_More commands comming soon!_ \n \nI Hope you enjoy it! ",
"START_BUTTONS" : ("❓ Id", "⏱ Time", "🙈 Memes", "🎭 Send feedback", "🌤 Weather", "👥 Support", "🔗 Link shortner", "✒️ Calculate", "🎧 Mp3Tag", "📡 IP Geolocation", "◻️ QR Code", "💵 Exchange rate"),
"SHOWED_BUTTONS_MSG" : "Here you are ;)",
"TEST_MSG" : "This is a test message.",
"HELP" : "Help",
"CHANNEL" : "Channel",
"SETTINGS" : "Settings (Beta)",
"SHOW_ALL" : "Show all commands below keyboard",
"INLINE_HELP" : "Inline mode help",
"SHARE_CONTACT_MSG" : "Please share your contact to the bot (in a private message).",
"NO_ECHO_IN_SUPERGP_MSG" : "Unfortunately I wont reply to messages sent in a supergroup to prevent spamming.",
"ECHO_REPLY_MSG" : "Please enter a text so I reply to it!",
"ERROR_MSG" : "Error occured.",
"CONTACT_RECIEVED_MSG" : "New contact recieved:",
"CONTACT_FORWARDED_MSG" : "Contact successfully forwarded!",
"GP_GREETING_MSG" : "Hey *{0}*! \n \nWelcome to the group *{1}* 😊 \n \n_Have fun & Enjoy!_",
"GP_FAREWELL_MSG" : "Oh no 😕 \n*{0} left the group ☹️",
"ID_MSG" : "💢 Your name: *{0}* \n💢 Your ID: *{1}* \n",
"INGP_ID_MSG" : "💢 Group ID: *{2}* \n",
"CHATID_ID_MSG" : "💢 Forwarded chat ID: *{}* \n",
"REPLIED_ID_MSG" : "💢 Users ID: *{}* \n",
"FORWARDED_ID_MSG" : "💢 Forwarded message sender's ID: *{}* \n",
"NON_ADMIN_ADDED_BOT_MSG" : "Im sorry ☹️ \nIm not authorized to join groups by normal members. \nOnly *admins* can add me to groups.",
"BOT_JOINED_MSG" : "Hey😧! I am here, At your service.",
"MESSANGER_JOIN_MSG" : "Now sending feedback. \nPlease enter your message!",
"MESSANGER_SUBMIT_MSG" : "OK. I got your message. Are you sure you want to send it?",
"MESSANGER_CANCEL_MSG" : "Canceled!",
"MESSANGER_LEAVE_MSG" : "Thanks for your feedback! Exiting.",
"WEBSHOT_CAPTION_MSG": "By the ZigZag bot",
"COMMAND_NOT_FOUND" : "Im sorry 😢 But the command you entered does not exists.",
"BANNED_MSG" : ":O You got banned from the bot!",
"UNBANNED_MSG" : "Welcome back! You are unbanned now ;)",
"GP_STATUS_MSG" : "⚡️ All group messages: {0}",
"TIME_MSG" : "⚡️ Date & Time: {0}",
"WAITING_APPROVAL_MESSENGER_MSG" : "⚡️ Your request has been submitted! Please wait for manual approval.",
"ACCEPTED_MESSENGER_MSG" : "⚡️ Your request has been approved! Now chatting with support.",
"DENIED_MESSENGER_MSG" : "⚡️ Im sorry, But your request has been denied.",
"MESSAGE_SENT_MESSENGER_MSG" : "Message sent! Please wait for response",
"KICKED_MESSENGER_MSG" : "You got auto-kicked from the messenger because you spammed alot.",
"ALREADY_IN_MESSENGER_MSG" : "You are already chatting with support! to exit, type '/leave'",
"NOT_IN_MESSENGER_MSG" : "You are not chatting with support! to start, type '/support'",
"LEFT_MESSENGER_MSG" : "⚡️ We hope you enjoyed ;) Leaving the messenger.",
"MEME_NEA_MSG" : "To use this command, you need to follow this format: \n`/meme <type>||<top>||<bottom>` \nFor example: `/meme Gangnam Style||Oppa||Gangnam style!`\n_Remember: No spaces within words and ||s!_ \n\nRemember, this bot supports more than a thousand of memes! Some examples: \n\n💢 Good Guy Greg\n💢 First World Problems\n💢 Scumbag Steve\n💢 Hipster Barista\n💢 Bad Luck Brian\n💢 Success Kid\n💢 Ridiculously Photogenic Guy\n💢 Sudden Clarity Clarence\n💢 College Freshman\n💢 Skeptical Baby\n💢 Gangnam Style\n💢 Talk To Spongebob\n💢 Suspicious Cat",
"CALC_NEA_MSG" : "How can i calculate null? \nYour command should be like this: \n`/calc 5+5*2`",
"CHATTER_NEA_MSG" : "Hey! 😱 \n \nI think you don't know how to work with this. \n \nYou can actually learn me how to respond to some messages 😏 \n \nSimply do: /addreply <Text>||<Response> \n \nFor example, if you want to enter 'hi' and then you want me to say 'hello', you need to execute this: \n /addreply Hi||Hello \n \nIts simple 😌 And also fun 😍",
"CHATTER_INCORRECT_MSG" : "String recieved in an incorrect format..",
"CHATTER_ALREADYDEFINED_MSG" : "Im sorry, this message had already been defined!",
"CHATTER_DONE_MSG" : "Ooo yeah! Now I know if you say `{}`, I Should Answer `{}` :) \nCan you teach me *more*? 😁😁",
"IP_ERROR_MSG" : "Error: \n\n`Invalid IP/Hostname`",
"IP_NEA_MSG" : "Please, enter an IP address or hostname. \n\nExample: `/ip 4.2.2.4`",
"IP_DONE_MSG" : "IP Information for *{}*: \n\n🌍 Country: *{}* \n🏫 City: *{}* \n📡 ISP: *{}* \n⏱ TimeZone: *{}*",
"MP3TAG_NEA_MSG" : "Please use correct syntax: \n`/mp3tag Artist||Title`\nAnd then, send the audio file.",
"MP3TAG_SENDAUDIO_MSG" : "Please send the audio now!",
"MP3COVER_NEA_MSG" : "Please reply to a photo to set it as an audios cover!",
"MP3COVER_REPLYTOPHOTO_MSG" : "Please reply to a photo, nothing else!",
"QRCODE_NEA_MSG" : "Please, enter a text so I can convert it to QR code. \n\nFor example: `/qrcode http://sadeco.ir`",
"SHORTNER_NEA_MSG" : "Please enter a link so I can short it. \nLike: `/short http://google.com`",
"TIME_NEA_MSG" : "Enter a time zone/city/region/etc. please! \n\nExample: `/time Tehran`",
"WWEATHER_NEA_MSG" : "Enter a city so I can tell its weather! :P \n\nExample: `/weather Tehran`",
"INLINE_HELP_MSG" : "*Inline mode help!:* \n \nTo use inline mode, first mention the bots ID (@TheZigZagBot) in your message, then use one of theese syntaxes: \n \n💢echo <message> (_Echoes the message using HTML markup_) \n💢cal <ex> (_Calculator.. Easy as a pie_) \n💢time <city> (_Get time for anywhere! even nowhere!_) \n💢lmgtfy <query> (_Let me google that for you!_) \n💢weather <city> (_Current weather in <city>!_) \n💢hideit <message> (_Hides the message you enter! :D So its un-forwardable._) \n \nExample: `@TheZigZagBot time tehran` \n\nMore options comming *soon*!",
"ADDCOUNTER_NEA_MSG" : "Do you want to know how many people saw your message? Yeah! Now its possible! \nJust type `/addcounter add` and then forward/send your message!",
"LMGTFY_NEA_MSG" : "Let me *Google* that for you! \n\nJust do `/lmgtfy <search>`",
"DOWNLOADER_NEA_MSG" : "Welcome to *ZigZag* downloader! \n\nYou can enter a *link* with a file size below *30MB*, and recieve that file in Telegram! \n\nPlease use this syntax: `/download http://example.com/file.zip` \n\nWarning: You have a limit of *3 files per minute*. Please dont exceed it.",
"DOWNLOADER_WAIT_MSG" : "Im sorry, but you can only have *1 concurrent download*.",
"DOWNLOADER_DL_MSG" : "Retrieving the file from server. Please wait...",
"DOWNLOADER_UP_MSG" : "Uploading the file for you. Please wait...",
"DOWNLOADER_OVERSIZE_MSG" : "The file was too *big*! Maximum file size should be *below 30MB*.",
"DOWNLOADER_ERROR_MSG" : "Im sorry, but an unknown error occured. \nPlease check the *file name* & *file size*, then try again.",
"SETTINGS_WLC_MSG" : "TheZigZag *Settings*! \nPlease, choose a _value_ to edit. \n〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰",
"SETTINGS_LANGUAGE_CHANGED_MSG" : "Success! Language has been updated. \n〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰",
"ADDCOUNTER_SENDNOW_MSG" : "Send your message now!",
"EXCHANGE_NEA_MSG" : "Enter a base currency! \n\nExample: `/rate USD` \n\nAvailable base currencies: `USD, EUR, RUB, AUD, CAD, GBP`",
"CHATBOT_IDK_MSG" : "I don't know how to reply to this 🙁 Teach me by executing /addreply 😶😄",
"INLINE_ENTERTEXT_MSG" : "Please enter a text!",
"INLINE_SUCCESSECHO_MSG" : "Echo your message using HTML parse mode ;)",
"INLINE_ERRORECHO_MSG" : 'Error occured. One of your tags arent closed!',
"INLINE_CALC_MSG" : 'Please enter what you need I calculate.',
"INLINE_HIDEIT_MSG" : 'Please enter a text so I can hide it!',
"INLINE_WEATHER_MSG" : 'Please enter a city!',
"INLINE_SENDWEATHER_MSG" : 'Send current weather of ',
"INLINE_SENDTIME_MSG" : 'Please enter timezone/city/region/etc!',
"INLINE_TIMETIME_MSG" : 'Send current time in ',
"INLINE_LMGTFY_MSG": "Enter a text please!",
"INLINE_LMGTFYSEND_MSG" : "Send it!",
"GP_STATS_MSG" : "Group statistics by the *ZigZag Bot*! \n\n🔶 All messages: *{}*\n🔸\n🔶 Sent voices: *{}*\n🔸\n🔶 Sent audios: *{}*\n🔸\n🔶 Sent photos: *{}*\n🔸\n🔶 Sent files: *{}*\n🔸\n🔶 Sent videos: *{}*\n🔸\n",
"GP_NOTINGP_MSG" : "This statistics only works in groups & supergroups.",
"GP_RULES_NEA_MSG" : "Rules set to null! Please enter something :)",
"GP_RULESSET_MSG" : "Success! Group rules updated.",
"CAPTION_REPLYTOMSG_MSG" : "Please reply to a file!",
"CAPTION_NOCAPTION_MSG" : "Please enter a caption! \n/caption lablablab",
}, "fa" : {
"Assalomu_alaykum_user" : "Здраствуйте!\n\nС этим ботом вы можете заказать такси и грузовики!\n\n🔵 Какой автомобиль вы бы хотели заказать?",
"Enter_all" : "Введите всю информацию в полном объеме! 📝",
"Where" : ", куда должно идти? ⬅️\nИли откуда и куда идти? 🔁\n\nВведите адрес:",
"What" : ", чего должен доставить? ⬅️\nИли чего должен взять и доставить? 🔁",
"Yuborish" : "Вы отправите свой заказ водителям?",
"Yes_send_it" : "Да, отправить",
"No_cancel_it" : "Нет, отменить",
"Bekor_qilish_user" : "🔵 Тогда какой автомобиль вы бы хотели заказать?",
"Main_user" : "🔵 Какой автомобиль вы бы хотели заказать?",
"Settings_user" : "В настройках вы можете изменить язык!",
"Help_message_user" : "Здраствуйте.\n\nЕсли вы являетесь клиентом, с помощью этого телеграм бота вы можете заказать такси и грузовики.\n\nЕсли у вас есть такси или грузовик, вы можете получать заказы через этого телеграм бота.\nДля этого введите код рабочих:",
"Wait_user" : "⏳ Ждите ответа от водителей!",
"Resend_user" : "Если вы не получите ответ в ближайшее время, значит, водители заняты, либо заказ неудовлетворительный или пока не работают.",
"Resend_button_user" : "Заказать заново",
"Price_user" : "Цена: ",
"Currency_user" : " сум",
"Dont_care" : "♾ Любая машина ♾",
"Register_user" : "Регистрация",
"Assalomu_alaykum_ishchi" : "Здраствуйте!\nС этим ботом вы можете получать заказы на ваше такси или грузовик!",
"Not_in_list_ishchi" : "Вас нету в списке!",
"Register_ishchi" : "Регистрация",
"Settings_ishchi" : "В настройках вы можете изменить язык!",
"Start_register" : "Здраствуйте!\nЗарегистрируйте свое такси или грузовик на этом боте!",
"Registration_car_ishchi" : "Зарегистрируйте свою машину\nВыберите тип вашего автомобиля?",
"Registration_car_number_ishchi" : "Введите государственный номер вашего машины:",
"Car_number_order_ishchi" : "Введите в этом порядке: '60A600AA'",
"Enter_name_ishchi" : "Теперь введите свое имя:",
"Ishda_ishchi" : "Вы в настоящее время на работе!",
"Tanaffus_ishchi" : "Вы в настоящее время на перерыве!",
"Ishni_boshlash" : "Начать работать",
"Tanaffus_qilish" :"Сделать перерыв",
"Bajargan_ishlarim" : "Выполненные работы",
"Bajarilgan_ishlar" : "Выполненные работы:",
"Bajarilgan_ishlar_not" : "У вас выполненные работы пока нет!",
"Back_to_menu" : "Вернуться в главное меню.",
"Send_button" : "Отправить",
"Message_is_deleted" : "Заказ был удален с вас ✖️",
"Sana" : "Дата: ",
"Vaqti" : "\nВремя: ",
"Narxi" : "\nЦена: ",
"Buyurtma" : "\nЗаказ: ",
"Manzili" : "\nАдресс: ",
"START_MSG" : "سلام! 😊 \n \nبه بات *زیگ زاگ* خوش آمدید 😱🚀 \nساخته شده توسط @WebShark25! \n \nکامند های بات: \n💢 _/help_ - دریافت راهنما \n💢 _/time <city>_ - دریافت زمان به وقت هرجایی! \n💢 _/calc_ - ماشین حساب! \n💢 _/support_ - با ما صحبت کنید! \n💢 _/sendcontact_ - فوروارد کردن کانتکت به ادمین \n💢 _Send feedback_ - ارسال نظر! \n💢 _/echo <msg>_ - بازگرداندن پیام \n💢 _/short <link>_ - کوتاه کردن لینک! \n💢 _/weather <city>_ - دریافت آب و هوا! \n💢 _/mp3tag <artist>||<title>_ - ادیت کردن تگ های یک آهنگ \n💢 _/tocontact <phone>||<name>_ - تبدیل نوشته به کانتکت تلگرام \n💢 _/qrcode <text>_ - ساخت کد کیو آر!! \n💢 _/ip <IP/Hostname>_ - دریافت موقعیت مکانی یک آیپی! \n💢 _/rate <currency>_ - دریافت آخرین نرخ ارز دنیا! \n💢 _/addcounter add_ - افزودن تعداد مشاهده به پیام شما! \n💢 _/lmgtfy <text>_ - به من اجازه بده آن را برای تو گوگل کنم! \n💢 _/download <link>_ - لینک دهید, فایل در تلگرام تحویل بگیرید! \n💢 _/addreply <syntax>_ - به بات آموزش چت کردن دهید! \n💢 _/id_ - دریافت آیدی \n \n_هرروز با آپدیت های بیشتر در خدمت شما هستیم!_ \n \nامیدواریم که لذت ببرید! ",
"START_BUTTONS" : ("❓ Id", "⏱ Time", "🙈 Memes", "🎭 Send feedback", "🌤 Weather", "👥 Support", "🔗 Link shortner", "✒️ Calculate", "🎧 Mp3Tag", "📡 IP Geolocation", "◻️ QR Code", "💵 Exchange rate"),
"SHOWED_BUTTONS_MSG" : "خدمت شما ;)",
"TEST_MSG" : "This is a test message.",
"HELP" : "راهنما",
"CHANNEL" : "کانال",
"SETTINGS" : "تنظیمات",
"SHOW_ALL" : "مشاهده تمام دستور ها (پایین کیبورد)",
"INLINE_HELP" : "راهنمای اینلاین",
"SHARE_CONTACT_MSG" : "لطفا کانتکت خود را برای بات شیر کنید",
"NO_ECHO_IN_SUPERGP_MSG" : "متاسفانه, من در سوپر گروه ها بدلیل جلوگیری از اسپم, پیام را باز نمیگردانم",
"ECHO_REPLY_MSG" : "لطفا یه پیام وارد کنید تا من آن را بازگردانم!",
"ERROR_MSG" : "مشکلی پیش آمد.",
"CONTACT_RECIEVED_MSG" : "New contact recieved:",
"CONTACT_FORWARDED_MSG" : "شماره تلفن با موفقیت فوروارد شد",
"GP_GREETING_MSG" : "Hey *{0}*! \n \nWelcome to the group *{1}* 😊 \n \n_Have fun & Enjoy!_",
"GP_FAREWELL_MSG" : "Oh no 😕 \n*{0} left the group ☹️",
"ID_MSG" : "💢 نام شما: *{0}* \n💢 آیدی شما: *{1}* \n",
"INGP_ID_MSG" : "💢 آیدی گروه: *{2}* \n",
"CHATID_ID_MSG" : "💢 آیدی چتی که پیام از آن فروارد شده: *{}* \n",
"REPLIED_ID_MSG" : "💢 آیدی یوزر: *{}* \n",
"FORWARDED_ID_MSG" : "💢 آیدی پیام فورواردشده: *{}* \n",
"NON_ADMIN_ADDED_BOT_MSG" : "متاسفم ☹️ \nمن اجازه افزوده شدن در گروه های ناشناخته را ندارم \nتنها *ادمین من* میتواند من را ادد کند.",
"BOT_JOINED_MSG" : "هی😧! من اینجام, درخدمت شما",
"MESSANGER_JOIN_MSG" : "در حال ارسال نظر \nلطفا نظر خود را وارد کنید",
"MESSANGER_SUBMIT_MSG" : "خیلی خب, پیام شما دریافت شد. \nآیا از ارسال آن اطمینان دارید؟",
"MESSANGER_CANCEL_MSG" : "کنسل شد!",
"MESSANGER_LEAVE_MSG" : "با تشکر, نظر ارسال شد!",
"WEBSHOT_CAPTION_MSG": "توسط بات زیگ زاگ",
"COMMAND_NOT_FOUND" : "من متاسفم 😢 اما کامندی که وارد کردی, وجود خارجی ندارد",
"BANNED_MSG" : "وای! شما از بات بلاک شدید!",
"UNBANNED_MSG" : "خوش آمدید! آن بلاک شدید.",
"GP_STATUS_MSG" : "⚡️ All group messages: {0}",
"TIME_MSG" : "⚡️ ساعت و تاریخ: {0}",
"WAITING_APPROVAL_MESSENGER_MSG" : "⚡️ درخواستشما ثبت شد! لطفا تا تایید توسط ادمین صببر کنید",
"ACCEPTED_MESSENGER_MSG" : "⚡️ درخواست شما قبول شد! هم اکنون پیام های شما برای تیم پشتیبانی ارسال میشود",
"DENIED_MESSENGER_MSG" : "⚡️ متاسفم, اما درخواست شما رد شد",
"MESSAGE_SENT_MESSENGER_MSG" : "پیام ارسال شد! لطفا صبور باشید",
"KICKED_MESSENGER_MSG" : "بدلیل اسپم زیاد, بصورت خودکار از چت اخراج شدید",
"ALREADY_IN_MESSENGER_MSG" : "شما هم اکنون در حال چت با پشتیبانی هستید! \nجهت خروج: /leave",
"NOT_IN_MESSENGER_MSG" : "شما با پشتیبانی چت نمیکنید! جهت آغاز: '/support'",
"LEFT_MESSENGER_MSG" : "⚡️ امیدواریم که لذت کافی را برده باشید :) با تشکر از تماس شما",
"MEME_NEA_MSG" : "جهت استفاده از این کامند, شما باید مانند زیر عمل کنید \n`/meme <type>||<top>||<bottom>` \nبرای مثال: `/meme Gangnam Style||Oppa||Gangnam style!`\n_پی نوشت: بین || ها فاصله نگذارد!!_ \n\nپی نوشت 2: این بات بیشتر از 1000 نوع عکس را پشتیبانی میکند! برای مثال: \n\n💢 Good Guy Greg\n💢 First World Problems\n💢 Scumbag Steve\n💢 Hipster Barista\n💢 Bad Luck Brian\n💢 Success Kid\n💢 Ridiculously Photogenic Guy\n💢 Sudden Clarity Clarence\n💢 College Freshman\n💢 Skeptical Baby\n💢 Gangnam Style\n💢 Talk To Spongebob\n💢 Suspicious Cat",
"CALC_NEA_MSG" : "لطفا یک مقدار عددی وارید کنید. \nبرای مثال: \n`/calc 5+5*2`",
"CHATTER_NEA_MSG" : "فکر کنم نمیدونی چجوری با این قسمت کار کنی, درسته؟ \n \nمیتونی به من حرف زدن یاد بدی 😏 \n \nخیلی راحت: /addreply <پیام>||<جواب> \n \nبرای مثال, اگه میخوای تو بگی hi و من جواب بدم hello, باید چنین چیزی رو بزنی \n /addreply Hi||Hello \n \nخیلی آسون 😌 و کاربردی 😍",
"CHATTER_INCORRECT_MSG" : "چیزی که نوشتی برای من نا مفهوم بود!",
"CHATTER_ALREADYDEFINED_MSG" : "متاسفانه, چیزی که وارد کردید قبلا ثبت شده!",
"CHATTER_DONE_MSG" : "هورا! حالا میدونم اگه بگی `{}`, باید جواب بدم `{}` ا\nمیتونی بیشتر یادم بدی؟ 😁😁",
"IP_ERROR_MSG" : "مشکلی پیش آمد: \n\n`آیپی اشتباه`",
"IP_NEA_MSG" : "لطفا, یک آدرس ایپی یا ادرس سایت وارد کنید \n\nمثال: `/ip 4.2.2.4`",
"IP_DONE_MSG" : "مشخصات آیپی *{}*: \n\n🌍 کشور: *{}* \n🏫 شهر: *{}* \n📡 سرویس دهنده: *{}* \n⏱ موقعیت زمانی: *{}*",
"MP3TAG_NEA_MSG" : "لطفا مقدار را وارد کنید: \n`/mp3tag Artist||Title`\nو در پیام بعد, آهنگ را ارسال کنید",
"MP3TAG_SENDAUDIO_MSG" : "خب, حالا آهنگ رو ارسال کن!",
"MP3COVER_NEA_MSG" : "لطفا, این دستور رو در ریپلای به عکسی که میخواهید برروی ترک ست شود, ارسال کنید",
"MP3COVER_REPLYTOPHOTO_MSG" : "لطفا به یک عکس ریپلای کنید.",
"QRCODE_NEA_MSG" : "لطفا, یک لینک وارد کنید تا آن را تبدیل به کیو آر کد بکنم. \n\nمثال: `/qrcode http://sadeco.ir`",
"SHORTNER_NEA_MSG" : "لطفا یک لینک وارد کنید تا آن را کوتاه کنم! \nمثلا: `/short http://google.com`",
"TIME_NEA_MSG" : "لطفا یک موقعیت زمانی وارد کنید! \n\nمثل: `/time Tehran`",
"WWEATHER_NEA_MSG" : "نام یک شهر را وارد کنید تا وضعیت آب و هوای آنرا بگویم! :P \n\nمثال: `/weather Tehran`",
"INLINE_HELP_MSG" : "*راهنمای اینلاین!:* \n \nجهت استفاده از حالت این لاین, ابتدا آیدی بات را وارد کنید (@TheZigZagBot) و سپس یکی از مقادیر زیر را وارد کنید: \n \n💢echo <message> (_بازگرداندن پیام_) \n💢cal <ex> (_ماشین حساب... خیلی ساده_) \n💢lmgtfy <query> (_بزار من برات گوگلش کنم!_) \n💢time <city> (_دریافت زمان به وقت هرجا!_) \n💢weather <city> (_دریافت آب و هوای هر شهر!_) \n💢hideit <message> (_مخفی کردن پیام! که قابل فوروارد نباشد._) \n \nمثال: `@TheZigZagBot time tehran` \n\nآپشن های بیشتر, بزودی در زیگ زاگ!",
"ADDCOUNTER_NEA_MSG" : "دوست داری بدونی چند نفر پیامتو دیدن؟ اره! خیلی سادست!\nفقط بنویس `/addcounter add` و بعد پیامتو ارسال کن",
"LMGTFY_NEA_MSG" : "بزار من برای تو گوگل کنمش! \n\nخیلی راحته: `/lmgtfy <search>`",
"DOWNLOADER_NEA_MSG" : "به داونلودر زیگ زاگ خوش آمدید \n\nمیتونی یه لینک وارد کنی, و فایلش رو در تلگرام بگیری! \n\nمثل این: `/download http://example.com/file.zip` \n\nتوجه: فقط میتونی 3 فایل در دقیقه دانلود کنی.",
"DOWNLOADER_WAIT_MSG" : "متاسفم, اما فقط میتونی یک دانلود همزمان داشته باشی",
"DOWNLOADER_DL_MSG" : "در حال دریافت فایل از سرور...",
"DOWNLOADER_UP_MSG" : "در حال ارسال فایل برای شما...",
"DOWNLOADER_OVERSIZE_MSG" : "فایل خیلی بزرگ بود! حداکثر سایز قابل قبول 30 مگابایت هست",
"DOWNLOADER_ERROR_MSG" : "متاسفم, اما یه مشکل ناشناخته پیش اومد! \nشاید سایز فایل زیادی بزرگه, شایدم مشکل های دیگه.",
"SETTINGS_WLC_MSG" : "تنظیمات زیگ زاگ! لطفا یک گزینه را انتخاب کنید \n〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰",
"SETTINGS_LANGUAGE_CHANGED_MSG" : "عملیات موفقیت آمیز بود! زبان به روز شد \n〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰〰",
"ADDCOUNTER_SENDNOW_MSG" : "لطفا پیام خود را ارسال کنید!",
"EXCHANGE_NEA_MSG" : "لطفا یک ارز را وارد کنید! \n\nبرای مثال: `/rate USD` \n\nارز های موجود: `USD, EUR, RUB, AUD, CAD, GBP`",
"CHATBOT_IDK_MSG" : "من بلد نیستم چجوری به این جواب بدم 🙁 ولی میتونی بهم یاد بدی!: /addreply 😶😄",
"INLINE_ENTERTEXT_MSG" : "لطفا یک پیام وارد کنید",
"INLINE_SUCCESSECHO_MSG" : "ارسال پیام با استفاده از تگ های اچ تی ام ال",
"INLINE_ERRORECHO_MSG" : 'مشکلی پیش آمد. یکی از تگ ها بسته نشده اند!',
"INLINE_CALC_MSG" : 'لطفا یک عملیات وارد کنید',
"INLINE_HIDEIT_MSG" : 'لطفا پیامی وارد کنید تا آن را مخفی کنم!',
"INLINE_WEATHER_MSG" : 'لطفا نام یک شهر را وارد کنید!',
"INLINE_SENDWEATHER_MSG" : 'ارسال وضعیت آب و هوای ',
"INLINE_SENDTIME_MSG" : 'لطفا یک شهر/موقعیت زمانی/محله وارد کنید!',
"INLINE_TIMETIME_MSG" : 'ارسال تاریخ و ساعت ',
"INLINE_LMGTFY_MSG": " لطفا یک چیزی وارد کن برام!",
"INLINE_LMGTFYSEND_MSG" : "ارسال!",
"GP_STATS_MSG" : "آمار گروه با بات *زیگ زاگ*! \n\n🔶 تمامی پیام ها: *{}*\n🔸\n🔶 وویس ها: *{}*\n🔸\n🔶 آهنگ ها: *{}*\n🔸\n🔶 عکس ها: *{}*\n🔸\n🔶 فایل ها: *{}*\n🔸\n🔶 فیلم ها: *{}*\n🔸\n",
"GP_NOTINGP_MSG" : "این آمار, فقط در گروه ها کار میکند.",
"GP_RULES_NEA_MSG" : "لطفا, متن قوانین را وارد کنید!",
"GP_RULESSET_MSG" : "عملیات موفقیت آمیز بود! قوانین گروه ست شد.",
"CAPTION_REPLYTOMSG_MSG" : "لطفا این دستور را در ریپلای به یک فایل ارسال کنید!",
"CAPTION_NOCAPTION_MSG" : "لطفا یک کپشن وارد کنید! \n/caption lablablab",
}
}
|
py | 1a39291b6a900fb31abb685426a65f672d608d0c | from dynaconf import LazySettings
settings = LazySettings(
ENV_FOR_DYNACONF="example", ENVVAR_PREFIX_FOR_DYNACONF="PROGRAM"
)
print(settings.USERNAME)
print(settings.SERVER)
print(settings.PASSWORD)
assertions = {
"SERVER": "fromenv.com",
"USERNAME": "admin",
"PASSWORD": "My5up3r53c4et",
}
for key, value in assertions.items():
found = settings.get(key)
assert found == getattr(settings, key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {"SERVER": "fromenv.com", "USERNAME": "foo"}
for key, value in assertions.items():
found = settings.from_env("development").get(key)
assert found == getattr(settings.from_env("development"), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {
"SERVER": "fromenv.com",
"USERNAME": "foo",
"PASSWORD": "My5up3r53c4et", # keep=True will keep it from [example] env
}
for key, value in assertions.items():
found = settings.from_env("development", keep=True).get(key)
assert found == getattr(settings.from_env("development", keep=True), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
|
py | 1a392a08b64c113dbf1e39d58451e39ad0b658aa | import tensorflow as tf
import tensorflow_datasets as tfds
from t5.data import preprocessors as prep
import functools
import t5
import gin
vocab = 'gs://mesolitica-tpu-general/t5-data-v2/sp10m.cased.ms-en.model'
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(
'node-6', zone='europe-west4-a', project='mesolitica-tpu'
)
TPU_ADDRESS = tpu.get_master()
TPU_TOPOLOGY = '2x2'
print(TPU_ADDRESS)
def dumping_dataset(split, shuffle_files=False):
del shuffle_files
files = [
'gs://mesolitica-tpu-general/t5-data-v2/dumping-news.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/dumping-parliament.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/filtered-dumping-academia.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/filtered-dumping-wiki.txt.tsv'
]
files.extend(tf.io.gfile.glob('gs://mesolitica-tpu-general/t5-data-v2/00.jsonl-*.translated.txt.tsv'))
ds = tf.data.TextLineDataset(files)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['title', 'text'], ex)))
return ds
t5.data.TaskRegistry.remove('dumping_dataset')
t5.data.TaskRegistry.add(
'dumping_dataset',
dataset_fn=dumping_dataset,
splits=['train'],
text_preprocessor=functools.partial(
t5.data.preprocessors.rekey,
key_map={'inputs': None, 'targets': 'text'},
),
token_preprocessor=t5.data.preprocessors.unsupervised,
sentencepiece_model_path=vocab,
metric_fns=[],
)
def question_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/qa.tsv',
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def question_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['soalan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('question_dataset')
t5.data.TaskRegistry.add(
'question_dataset',
dataset_fn=question_dataset,
splits=['train'],
text_preprocessor=[question_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def pair_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(tf.io.gfile.glob('gs://mesolitica-tpu-general/t5-data-v2/*pair.tsv'))
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['text'], ex)))
return ds
t5.data.TaskRegistry.remove('pair_dataset')
t5.data.TaskRegistry.add(
'pair_dataset',
dataset_fn=pair_dataset,
splits=['train'],
text_preprocessor=[prep.next_sentence_prediction],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def news_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/newstitle.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def news_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['tajuk: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('news_dataset')
t5.data.TaskRegistry.add(
'news_dataset',
dataset_fn=news_dataset,
splits=['train'],
text_preprocessor=[news_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def summarization_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/summarization.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def summarization_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['ringkasan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('summarization_dataset')
t5.data.TaskRegistry.add(
'summarization_dataset',
dataset_fn=summarization_dataset,
splits=['train'],
text_preprocessor=[summarization_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def similarity_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/snli.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/mnli.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def similarity_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': ex['question'],
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('similarity_dataset')
t5.data.TaskRegistry.add(
'similarity_dataset',
dataset_fn=similarity_dataset,
splits=['train'],
text_preprocessor=[similarity_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def en_ms_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/en-ms.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def en_ms_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['terjemah Inggeris ke Melayu: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('en_ms_dataset')
t5.data.TaskRegistry.add(
'en_ms_dataset',
dataset_fn=en_ms_dataset,
splits=['train'],
text_preprocessor=[en_ms_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def ms_en_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/ms-en.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def ms_en_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['terjemah Melayu ke Inggeris: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('ms_en_dataset')
t5.data.TaskRegistry.add(
'ms_en_dataset',
dataset_fn=ms_en_dataset,
splits=['train'],
text_preprocessor=[ms_en_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def knowledge_graph_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/knowledge-graph.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def knowledge_graph_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['grafik pengetahuan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('knowledge_graph_dataset')
t5.data.TaskRegistry.add(
'knowledge_graph_dataset',
dataset_fn=knowledge_graph_dataset,
splits=['train'],
text_preprocessor=[knowledge_graph_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def paraphrase_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/paraphrase.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def paraphrase_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['parafrasa: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('paraphrase_dataset')
t5.data.TaskRegistry.add(
'paraphrase_dataset',
dataset_fn=paraphrase_dataset,
splits=['train'],
text_preprocessor=[paraphrase_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
t5.data.MixtureRegistry.remove('trivia_all_bahasa')
t5.data.MixtureRegistry.add(
'trivia_all_bahasa',
[
'dumping_dataset',
'question_dataset',
'pair_dataset',
'news_dataset',
'summarization_dataset',
'similarity_dataset',
'en_ms_dataset',
'ms_en_dataset',
'knowledge_graph_dataset',
'paraphrase_dataset'
],
default_rate=1.0,
)
def main(_):
tf.logging.set_verbosity(tf.logging.DEBUG)
gin.parse_config_file(
'gs://mesolitica-tpu-general/t5-data/pretrained_models_small_operative_config.gin'
)
MODEL_SIZE = 'small'
model_parallelism, train_batch_size, keep_checkpoint_max = {
'small': (1, 256, 16),
'base': (2, 128, 8),
'large': (8, 64, 4),
'3B': (8, 16, 1),
'11B': (8, 16, 1),
}[MODEL_SIZE]
model = t5.models.MtfModel(
model_dir='gs://mesolitica-tpu-general/t5-small-v2/',
tpu=TPU_ADDRESS,
tpu_topology=TPU_TOPOLOGY,
model_parallelism=model_parallelism,
batch_size=train_batch_size,
sequence_length={'inputs': 1024, 'targets': 1024},
learning_rate_schedule=0.001,
save_checkpoints_steps=5000,
keep_checkpoint_max=5,
iterations_per_loop=100,
)
model.train(mixture_or_task_name='trivia_all_bahasa', steps=1000000)
if __name__ == '__main__':
tf.app.run()
|
py | 1a392b0777adbc7e3c10dc60300a25bbcd3277f8 | # Andree Avalo
import pickle
def commit(objeto, nombre, ruta):
file = open(ruta+"\\"+nombre+".bin","wb+")
file.write(pickle.dumps(objeto))
file.close()
def rollback(nombre, ruta):
file = open(ruta+"\\"+nombre+".bin","rb")
b = file.read()
file.close()
return pickle.loads(b)
|
py | 1a392c7bbab1055a787045f6879f242305be77ad | import csv
from difflib import SequenceMatcher
import re
import string
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
from googlesearch import search
from collections import OrderedDict
#import unidecode
'''
['Twenty20 Internationals', 'One-Day Internationals', 'Twenty20','Tests 1st Innings','Tests 2nd Innings' ,
'minor tour','tour','Youth One-Day Internationals','Other Twenty20 matches','Other one-day/limited-overs matches',
'Women\'s Twenty20 Internationals','Women\'s One-Day Internationals','List A','First-class','Other matches']:
'''
def handleSubs(commentry,playerDict_n,teamKeys):
name = ''
if commentry.strip().startswith("run out "):
name = ''.join(x for x in commentry if x in string.printable)
name = name.split('[')[1].split(']')[0]
else:
name = ''.join(x for x in commentry if x in string.printable)
try:
name = name.split('(')[1].split(')')[0]
except:
print('web error',name)
return None
## print('-',name)
playerInfo = {}
search_url = ''
url = ''
## page = ''
## soup = ''
## pees = ''
## try:
## for url in search(name + ' ESPN cricket',tld = 'co.in',lang = 'en', num = 1,stop = 1):
## search_url = url
## break
## except Exception as e:
## print(e)
## exit()
for url in search(name + ' ESPN cricket',tld = 'com', num = 1,stop = 1):
search_url = url
break
page = urllib.request.urlopen(search_url,timeout = 60)
## opener = urllib.build_opener()
## opener.addheaders = [('User-agent','Mozilla/5.0')]
## response = opener.open(search_url)
## page = response.read()
soup = BeautifulSoup(page,'html.parser')
pees = soup.find_all('p',class_='ciPlayerinformationtxt')
val = []
key = []
for pee in pees:
key.append(pee.find('b').get_text())
val.append(pee.find('span').get_text())
# print('url : '+search_url+'name : '+name)
# print(key,val)
playerInfo['short_name'] = name
playerInfo['player_cric_info_link'] = search_url
playerInfo['team'] = teamKeys
cricInfoBatsmanId = str(search_url).split('/')[-1].replace('.html', '')
playerInfo['_id'] = cricInfoBatsmanId + '-' + playerDict_n['match_id']
playerInfo['TeamID'] = playerDict_n['OpponentID']
playerInfo['OpponentID'] = playerDict_n['TeamID']
playerInfo['run_scored'] = '-'
playerInfo['balls_faced'] = '-'
playerInfo['M'] = '-'
playerInfo['4s'] = '-'
playerInfo['6s'] = '-'
playerInfo['strike_rate'] = '-'
playerInfo['MatchURL'] = playerDict_n['MatchURL']
playerInfo['match_id'] = playerDict_n['match_id']
playerInfo['Match_start_Date'] = playerDict_n['Match_start_Date']
playerInfo['Venue'] = playerDict_n['Venue']
playerInfo['innings'] = playerDict_n['innings']
playerInfo['commentry'] = '-'
playerInfo['match_type_text'] = playerDict_n['match_type_text']
if "Full name" in key:
playerInfo['Player_Full_Name'] = val[key.index("Full name")]
else:
playerInfo['Player_Full_Name'] = '-'
if 'Born' in key:
playerInfo['date,place_of_birth'] = val[key.index('Born')].replace('\n','').strip()
else:
playerInfo['date,place_of_birth'] = '-'
if 'Nickname' in key:
playerInfo['Player_Nickname'] = val[key.index('Nickname')]
else:
playerInfo['Player_Nickname'] = '-'
if not 'run_scored' in playerInfo:
playerInfo['run_scored'] = "-"
if not 'balls_faced' in playerInfo:
playerInfo['balls_faced'] = "-"
if not 'strike_rate' in playerInfo:
playerInfo['strike_rate'] = "-"
if not 'balls_bowled' in playerInfo:
playerInfo['balls_bowled'] = "-"
if not 'maiden_overs' in playerInfo:
playerInfo['maiden_overs'] = "-"
if not 'runs_given' in playerInfo:
playerInfo['runs_given'] = "-"
if not 'wicket' in playerInfo:
playerInfo['wicket'] = "-"
if not 'econ' in playerInfo:
playerInfo['econ'] = "-"
if not 'wide_balls' in playerInfo:
playerInfo['wide_balls'] = "-"
if not 'no_balls' in playerInfo:
playerInfo['no_balls'] = "-"
return playerInfo
#csv_file = "D:\\temp\\player_match_stats_29Nov2018.csv"
def Process_CSV(year):
csv_file = year+".csv"
df = pd.read_csv(csv_file)
types = set(x.strip() for x in df['match_type_text'])
matchId_playersDict = {}
def addCommentryField(playerName, shortName, playersDict_1, field):
if " " in playerName:
if SequenceMatcher(None, playerName, shortName).ratio() >= 0.7:
if field in playersDict_1:
catches = playersDict_1[field]
catches += 1
playersDict_1[field] = catches
else:
playersDict_1[field] = 1
else:
shortNameArr = shortName.split(" ")
for sName in shortNameArr:
sName = sName.strip()
if len(sName) > 2:
if SequenceMatcher(None, playerName, sName).ratio() >= 0.9:
if field in playersDict_1:
catches = playersDict_1[field]
catches += 1
playersDict_1[field] = catches
else:
playersDict_1[field] = 1
# below creating a match wise players dict
with open(csv_file, 'r') as csvfile:
csvReader = csv.DictReader(csvfile)
for row in csvReader:
matchType = row['match_type_text']
if matchType.strip() in types:
matchId = row['match_id']
if matchId in matchId_playersDict:
playersDict = matchId_playersDict[matchId]
playerId = row['cric_info_id']
playersDict[playerId] = row
else:
playersDict = {}
playerId = row['cric_info_id']
playersDict[playerId] = row
matchId_playersDict[matchId] = playersDict
print("matchId_playersDict length: ", len(matchId_playersDict))
#print("870881 length: ", len(matchId_playersDict['870881']))
#print("870881 values: ", matchId_playersDict['870881'])
with open(year+'_1.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(("PlayerID", "TeamID", "MatchID", "OpponentID", "PlayerProfileURL",
"MatchURL", "MatchFormat", "MatchStartDate", "MatchVenue",'innings', "TeamName", "PlayerName", "PlayerFullName",
"Player Date,Place of Birth", "PlayerNickName", "B_Bat", "R_Bat", "4s", "6s",
"SR_Bat", "BallsBowled","RunsGiven", "MaidenOvers", "W_Bow", "ER_Bow", "Wide_Bow",
"No_Bow",'commentry' ,"catches", "stumped","run_out"))
for matchId, players in matchId_playersDict.items():
teamDict = {}
for playerId, playerDict in players.items():
team = playerDict['team']
if team in teamDict:
teamPlayers = teamDict[team]
teamPlayers.append(playerDict)
else:
teamPlayers = []
teamPlayers.append(playerDict)
teamDict[team] = teamPlayers
print("teamDict length: ", len(teamDict))
teamKeys = list(teamDict.keys())
print("teamKeys: ", teamKeys)
teamPlayers_0 = teamDict[teamKeys[0]]
teamPlayers_1 = teamDict[teamKeys[1]]
for playerDict_0 in teamPlayers_0:
commentry = str(playerDict_0['commentry'])
if ' sub ' in commentry or '(sub ' in commentry:
handle = handleSubs(commentry,playerDict_0,teamKeys[1])
if handle == None:
continue
Flag = True
for i in range(0,len(teamPlayers_1)):
if handle['short_name'] == teamPlayers_1[i]['short_name']:
print(handle['short_name'],teamPlayers_1[i]['short_name'])
Flag = False
break
if Flag:
teamPlayers_1.append(handle)
#commentry = commentry.replace('sub','').replace('(','').replace(')','').replace('[',' ').replace(']',' ')
#print("commentry: ", commentry)
if commentry.strip().startswith("c "):
# catch
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"c ([\w\s'-]+) b ", commentry.strip())
## print(playerNameMatch)
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 0c: ", commentry)
playerNameMatch = re.match(r"c & b ([\w\s'-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
#playerName = unidecode.unidecode(playerName)
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_1, 'catches')
elif commentry.strip().startswith("st "):
# stump
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"st ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 0st: ", commentry)
#playerName = unidecode.unidecode(playerName)
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_1, 'stumped')
elif commentry.strip().startswith("run out "):
if 'sub' in commentry:
## # if substitute in commentry, then ignore, as he will not be found in players list
## continue
commentry = commentry.replace('[','').replace(']','').replace(' sub ','')
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
try:
playerNameMatch = re.match(r"run out \(([\w\s'/-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
except:
print('commentry 0ro: ',commentry)
playerNames = []
if '/' in playerName:
playerNames = playerName.split('/')
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
if len(playerNames) > 0:
for player in playerNames:
player = player.strip()
addCommentryField(player, shortName, playersDict_1, 'run_out')
else:
addCommentryField(playerName, shortName, playersDict_1, 'run_out')
for playerDict_1 in teamPlayers_1:
commentry = str(playerDict_1['commentry'])
if ' sub ' in commentry or '(sub ' in commentry:
handle = handleSubs(commentry,playerDict_1,teamKeys[0])
if handle == None:
continue
Flag = True
for i in range(0,len(teamPlayers_0)):
if handle['short_name'] == teamPlayers_0[i]['short_name']:
print(handle['short_name'],teamPlayers_0[i]['short_name'])
Flag = False
break
if Flag:
teamPlayers_0.append(handle)
#commentry = commentry.replace('sub','').replace('(','').replace(')','').replace('[','').replace(']','')
if commentry.strip().startswith("c "):
# catch
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
#print("commentry: ", commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
playerNameMatch = re.match(r"c ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
playerNameMatch = re.match(r"c & b ([\w\s'-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
#playerName = unidecode.unidecode(playerName)
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_0, 'catches')
elif commentry.strip().startswith("st "):
# stump
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"st ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 1st: ", commentry)
#playerName = unidecode.unidecode(playerName)
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_0, 'stumped')
elif commentry.strip().startswith("run out "):
if 'sub' in commentry:
## # if substitute in commentry, then ignore, as he will not be found in players list
## continue
commentry = commentry.replace('[','').replace(']','').replace(' sub ','')
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"run out \(([\w\s'/-]+)", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 1rt: ", commentry)
playerNames = []
if '/' in playerName:
playerNames = playerName.split('/')
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
if len(playerNames) > 0:
for player in playerNames:
player = player.strip()
addCommentryField(player, shortName, playersDict_0, 'run_out')
else:
addCommentryField(playerName, shortName, playersDict_0, 'run_out')
for playerDict_0 in teamPlayers_0:
if not 'catches' in playerDict_0:
playerDict_0['catches'] = "0"
if not 'stumped' in playerDict_0:
playerDict_0['stumped'] = "0"
if not 'run_out' in playerDict_0:
playerDict_0['run_out'] = "0"
writer.writerow((playerDict_0["_id"],playerDict_0["TeamID"],playerDict_0["match_id"], playerDict_0["OpponentID"], playerDict_0["player_cric_info_link"],
playerDict_0["MatchURL"], playerDict_0["match_type_text"], playerDict_0["Match_start_Date"],
playerDict_0["Venue"], playerDict_0["innings"],playerDict_0["team"], playerDict_0["short_name"], playerDict_0["Player_Full_Name"],
playerDict_0["date,place_of_birth"], playerDict_0["Player_Nickname"], playerDict_0["balls_faced"],
playerDict_0["run_scored"], playerDict_0["4s"], playerDict_0["6s"], playerDict_0["strike_rate"],
playerDict_0["balls_bowled"], playerDict_0["runs_given"], playerDict_0["maiden_overs"],
playerDict_0["wicket"], playerDict_0["econ"], playerDict_0["wide_balls"], playerDict_0["no_balls"],playerDict_0["commentry"],
playerDict_0["catches"], playerDict_0["stumped"], playerDict_0["run_out"]))
for playerDict_1 in teamPlayers_1:
if not 'catches' in playerDict_1:
playerDict_1['catches'] = "0"
if not 'stumped' in playerDict_1:
playerDict_1['stumped'] = "0"
if not 'run_out' in playerDict_1:
playerDict_1['run_out'] = "0"
writer.writerow((playerDict_1["_id"],playerDict_1["TeamID"],playerDict_1["match_id"], playerDict_1["OpponentID"], playerDict_1["player_cric_info_link"],
playerDict_1["MatchURL"], playerDict_1["match_type_text"], playerDict_1["Match_start_Date"],
playerDict_1["Venue"],playerDict_1["innings"] ,playerDict_1["team"], playerDict_1["short_name"], playerDict_1["Player_Full_Name"],
playerDict_1["date,place_of_birth"], playerDict_1["Player_Nickname"], playerDict_1["balls_faced"],
playerDict_1["run_scored"], playerDict_1["4s"], playerDict_1["6s"], playerDict_1["strike_rate"],
playerDict_1["balls_bowled"], playerDict_1["runs_given"], playerDict_1["maiden_overs"],
playerDict_1["wicket"], playerDict_1["econ"], playerDict_1["wide_balls"], playerDict_1["no_balls"],playerDict_1["commentry"],
playerDict_1["catches"], playerDict_1["stumped"], playerDict_1["run_out"]))
## writer.writerow((playerDict_1["_id"], playerDict_1["short_name"], playerDict_1["player_cric_info_link"],
## playerDict_1["team"], playerDict_1["commentry"], playerDict_1["run_scored"],
## playerDict_1["balls_faced"], playerDict_1["M"], playerDict_1["4s"], playerDict_1["6s"],
## playerDict_1["balls_bowled"], playerDict_1["maiden_overs"], playerDict_1["runs_given"],
## playerDict_1["wicket"], playerDict_1["econ"], playerDict_1["dot_delivery"],
## playerDict_1["four_delivery"],
## playerDict_1["six_delivery"], playerDict_1["wide_balls"], playerDict_1["no_balls"],
## playerDict_1["Position"], playerDict_1["match_id"], playerDict_1["match_desc"],
## playerDict_1["url_match_type"],
## playerDict_1["match_type_text"], playerDict_1["season"], playerDict_1["strike_rate"],
## playerDict_1["cric_info_id"], playerDict_1["catches"], playerDict_1["stumped"], playerDict_1["run_out"]))
####for year in ['2011','2012','2013','2014','2015','2016','2017','2018']:
##Process_CSV('2018')
|
py | 1a392d037ef3cd52d5450e815195be05abed225c | import pickle
import time
import requests
from bs4 import BeautifulSoup
from scrapers import Talk
from datetime import date, datetime
from dateutil.parser import parse as dateParser_
from scrapers import dateParse
from scrapers import removeParentheses
from scrapers import cleanSpeaker
from datetime import datetime
from selenium import webdriver
from scrapers import cleanSpeaker
import re
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
def scrape(start_date=date(1980, 1, 1), process=None): # process should be Talk -> None
hostname = "https://www.youtube.com"
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.set_headless()
driver = webdriver.Firefox(firefox_options=fireFoxOptions)
# driver = webdriver.Firefox()
URL = "https://www.youtube.com/c/IhesFr/videos?view=0&sort=dd&flow=grid"
try:
driver.get(URL)
# time.sleep(60) # during this minute, I manually scroll to the bottom
# of the page
time.sleep(4)
html = driver.find_element_by_tag_name('html')
html.send_keys(Keys.END)
time.sleep(2)
html.send_keys(Keys.END)
time.sleep(2)
contentsDiv = driver.find_elements_by_id('contents')[1]
contentsHTML = contentsDiv.get_attribute('outerHTML')
soup = BeautifulSoup(contentsHTML,
'html.parser')
videoDivs = soup.find_all('ytd-grid-video-renderer')
for videoDiv in videoDivs:
infoDiv = videoDiv.find('a', id="video-title")
link = hostname + infoDiv['href']
youtubeTitle = infoDiv['title']
if speakerAndTitle := youtubeTitleToMaybeSpeakerAndTitle(
youtubeTitle):
talk = Talk(link)
# date = urlToMaybeDate(link, driver)
# if date:
# if date < start_date:
# break
talk.firstName, talk.lastName = cleanSpeaker(
speakerAndTitle
[0])
talk.title = speakerAndTitle[1]
print(talk)
if process:
process(talk)
except BaseException:
pass
driver.quit()
return None
def youtubeTitleToMaybeSpeakerAndTitle(ytTitle):
pieces = ytTitle.split(' - ')
if len(pieces) == 1:
return None
name = pieces[0]
title = pieces[1]
if len(name.split(' ')) > 3:
return None
return (name, title)
def urlToMaybeDate(url, driver):
try:
driver.get(url)
time.sleep(3)
date = driver.find_element_by_id('date').text[1:]
date = dateParse(date)
return date
except BaseException:
return None
|
py | 1a392de5cdd14cd607cacfaa2b079fc158bb175e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedHsmsOperations:
"""ManagedHsmsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2020_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
parameters: "models.ManagedHsm",
**kwargs
) -> "models.ManagedHsm":
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedHsm')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
name: str,
parameters: "models.ManagedHsm",
**kwargs
) -> AsyncLROPoller["models.ManagedHsm"]:
"""Create or update a managed HSM Pool in the specified subscription.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param parameters: Parameters to create or update the managed HSM Pool.
:type parameters: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedHsm or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
name: str,
parameters: "models.ManagedHsm",
**kwargs
) -> "models.ManagedHsm":
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedHsm')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
name: str,
parameters: "models.ManagedHsm",
**kwargs
) -> AsyncLROPoller["models.ManagedHsm"]:
"""Update a managed HSM Pool in the specified subscription.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param parameters: Parameters to patch the managed HSM Pool.
:type parameters: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedHsm or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
name=name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: The name of the managed HSM Pool to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
async def get(
self,
resource_group_name: str,
name: str,
**kwargs
) -> "models.ManagedHsm":
"""Gets the specified managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: The name of the managed HSM Pool.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedHsm, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
top: Optional[int] = None,
**kwargs
) -> AsyncIterable["models.ManagedHsmListResult"]:
"""The List operation gets information about the managed HSM Pools associated with the
subscription and within the specified resource group.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedHsmListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsmListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsmListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedHsmListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ManagedHsmError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs'} # type: ignore
def list_by_subscription(
self,
top: Optional[int] = None,
**kwargs
) -> AsyncIterable["models.ManagedHsmListResult"]:
"""The List operation gets information about the managed HSM Pools associated with the
subscription.
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedHsmListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsmListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsmListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedHsmListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ManagedHsmError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/managedHSMs'} # type: ignore
|
py | 1a392e3d07edd0d782c8e9dd0f4aff367cf56d4f | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Basic template framework algorithm uses framework components to define the algorithm.
### Shows EqualWeightingPortfolioConstructionModel.LongOnly() application
### </summary>
### <meta name="tag" content="alpha streams" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="algorithm framework" />
class LongOnlyAlphaStreamAlgorithm(QCAlgorithm):
'''Basic template framework algorithm uses framework components to define the algorithm.
Shows EqualWeightingPortfolioConstructionModel.LongOnly() application'''
def Initialize(self):
# 1. Required:
self.SetStartDate(2013, 10, 7)
self.SetEndDate(2013, 10, 11)
# 2. Required: Alpha Streams Models:
self.SetBrokerageModel(BrokerageName.AlphaStreams)
# 3. Required: Significant AUM Capacity
self.SetCash(1000000)
# Only SPY will be traded
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel(Resolution.Daily, PortfolioBias.Long))
self.SetExecution(ImmediateExecutionModel())
# Set algorithm framework models
self.SetUniverseSelection(ManualUniverseSelectionModel(
[Symbol.Create(x, SecurityType.Equity, Market.USA) for x in ["SPY", "IBM"]]))
def OnData(self, slice):
if self.Portfolio.Invested: return
self.EmitInsights(
[
Insight.Price("SPY", timedelta(1), InsightDirection.Up),
Insight.Price("IBM", timedelta(1), InsightDirection.Down)
])
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
if self.Securities[orderEvent.Symbol].Holdings.IsShort:
raise ValueError("Invalid position, should not be short");
self.Debug(orderEvent)
|
py | 1a392f31a6552f4631d386610b4dfe296ebb217c | """
Classes and subroutines dealing with network connections and related topics.
"""
from functools import wraps
import getpass
import os
import re
import time
import six
import socket
import sys
from rtox.fabric.auth import get_password, set_password
from rtox.fabric.utils import handle_prompt_abort, warn
from rtox.fabric.exceptions import NetworkError
try:
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko as ssh
except ImportError:
import traceback
traceback.print_exc()
msg = """
There was a problem importing our SSH library (see traceback above).
Please make sure all dependencies are installed and importable.
""".rstrip()
sys.stderr.write(msg + '\n')
sys.exit(1)
ipv6_regex = re.compile(
r'^\[?(?P<host>[0-9A-Fa-f:]+(?:%[a-z]+\d+)?)\]?(:(?P<port>\d+))?$')
def direct_tcpip(client, host, port):
return client.get_transport().open_channel(
'direct-tcpip',
(host, int(port)),
('', 0)
)
def is_key_load_error(e):
return (
e.__class__ is ssh.SSHException
and 'Unable to parse key file' in str(e)
)
def _tried_enough(tries):
from rtox.fabric.state import env
return tries >= env.connection_attempts
def get_gateway(host, port, cache, replace=False):
"""
Create and return a gateway socket, if one is needed.
This function checks ``env`` for gateway or proxy-command settings and
returns the necessary socket-like object for use by a final host
connection.
:param host:
Hostname of target server.
:param port:
Port to connect to on target server.
:param cache:
A ``HostConnectionCache`` object, in which gateway ``SSHClient``
objects are to be retrieved/cached.
:param replace:
Whether to forcibly replace a cached gateway client object.
:returns:
A ``socket.socket``-like object, or ``None`` if none was created.
"""
from rtox.fabric.state import env, output
sock = None
proxy_command = ssh_config().get('proxycommand', None)
if env.gateway:
gateway = normalize_to_string(env.gateway)
# ensure initial gateway connection
if replace or gateway not in cache:
if output.debug:
print("Creating new gateway connection to %r" % gateway)
cache[gateway] = connect(*normalize(gateway) + (cache, False))
# now we should have an open gw connection and can ask it for a
# direct-tcpip channel to the real target. (bypass cache's own
# __getitem__ override to avoid hilarity - this is usually called
# within that method.)
sock = direct_tcpip(dict.__getitem__(cache, gateway), host, port)
elif proxy_command:
sock = ssh.ProxyCommand(proxy_command)
return sock
class HostConnectionCache(dict):
"""
Dict subclass allowing for caching of host connections/clients.
This subclass will intelligently create new client connections when keys
are requested, or return previously created connections instead.
It also handles creating new socket-like objects when required to implement
gateway connections and `ProxyCommand`, and handing them to the inner
connection methods.
Key values are the same as host specifiers throughout Fabric: optional
username + ``@``, mandatory hostname, optional ``:`` + port number.
Examples:
* ``example.com`` - typical Internet host address.
* ``firewall`` - atypical, but still legal, local host address.
* ``[email protected]`` - with specific username attached.
* ``[email protected]:222`` - with specific nonstandard port attached.
When the username is not given, ``env.user`` is used. ``env.user``
defaults to the currently running user at startup but may be overwritten by
user code or by specifying a command-line flag.
Note that differing explicit usernames for the same hostname will result in
multiple client connections being made. For example, specifying
``[email protected]`` will create a connection to ``example.com``, logged
in as ``user1``; later specifying ``[email protected]`` will create a new,
2nd connection as ``user2``.
The same applies to ports: specifying two different ports will result in
two different connections to the same host being made. If no port is given,
22 is assumed, so ``example.com`` is equivalent to ``example.com:22``.
"""
def connect(self, key):
"""
Force a new connection to ``key`` host string.
"""
from rtox.fabric.state import env
user, host, port = normalize(key)
key = normalize_to_string(key)
seek_gateway = True
# break the loop when the host is gateway itself
if env.gateway:
seek_gateway = normalize_to_string(env.gateway) != key
self[key] = connect(
user, host, port, cache=self, seek_gateway=seek_gateway)
def __getitem__(self, key):
"""
Autoconnect + return connection object
"""
key = normalize_to_string(key)
if key not in self:
self.connect(key)
return dict.__getitem__(self, key)
#
# Dict overrides that normalize input keys
#
def __setitem__(self, key, value):
return dict.__setitem__(self, normalize_to_string(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, normalize_to_string(key))
def __contains__(self, key):
return dict.__contains__(self, normalize_to_string(key))
def ssh_config(host_string=None):
"""
Return ssh configuration dict for current env.host_string host value.
Memoizes the loaded SSH config file, but not the specific per-host results.
This function performs the necessary "is SSH config enabled?" checks and
will simply return an empty dict if not. If SSH config *is* enabled and the
value of env.ssh_config_path is not a valid file, it will abort.
May give an explicit host string as ``host_string``.
"""
from rtox.fabric.state import env
dummy = {}
if not env.use_ssh_config:
return dummy
if '_ssh_config' not in env:
try:
conf = ssh.SSHConfig()
path = os.path.expanduser(env.ssh_config_path)
with open(path) as fd:
conf.parse(fd)
env._ssh_config = conf
except IOError:
warn("Unable to load SSH config file '%s'" % path)
return dummy
host = parse_host_string(host_string or env.host_string)['host']
return env._ssh_config.lookup(host)
def key_filenames():
"""
Returns list of SSH key filenames for the current env.host_string.
Takes into account ssh_config and env.key_filename, including normalization
to a list. Also performs ``os.path.expanduser`` expansion on any key
filenames.
"""
from rtox.fabric.state import env
keys = env.key_filename
# For ease of use, coerce stringish key filename into list
if isinstance(env.key_filename, six.string_types) or env.key_filename is None:
keys = [keys]
# Strip out any empty strings (such as the default value...meh)
keys = list(filter(bool, keys))
# Honor SSH config
conf = ssh_config()
if 'identityfile' in conf:
# Assume a list here as we require Paramiko 1.10+
keys.extend(conf['identityfile'])
return list(map(os.path.expanduser, keys))
def key_from_env(passphrase=None):
"""
Returns a paramiko-ready key from a text string of a private key
"""
from rtox.fabric.state import env, output
if 'key' in env:
if output.debug:
# NOTE: this may not be the most secure thing; OTOH anybody running
# the process must by definition have access to the key value,
# so only serious problem is if they're logging the output.
sys.stderr.write("Trying to honor in-memory key %r\n" % env.key)
for pkey_class in (ssh.rsakey.RSAKey, ssh.dsskey.DSSKey):
if output.debug:
sys.stderr.write("Trying to load it as %s\n" % pkey_class)
try:
return pkey_class.from_private_key(six.StringIO(env.key), passphrase)
except Exception as e:
# File is valid key, but is encrypted: raise it, this will
# cause cxn loop to prompt for passphrase & retry
if 'Private key file is encrypted' in str(e):
raise
# Otherwise, it probably means it wasn't a valid key of this
# type, so try the next one.
else:
pass
def parse_host_string(host_string):
# Split host_string to user (optional) and host/port
user_hostport = host_string.rsplit('@', 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# Split host/port string to host and optional port
# For IPv6 addresses square brackets are mandatory for host/port separation
if hostport.count(':') > 1:
# Looks like IPv6 address
r = ipv6_regex.match(hostport).groupdict()
host = r['host'] or None
port = r['port'] or None
else:
# Hostname or IPv4 address
host_port = hostport.rsplit(':', 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
return {'user': user, 'host': host, 'port': port}
def normalize(host_string, omit_port=False):
"""
Normalizes a given host string, returning explicit host, user, port.
If ``omit_port`` is given and is True, only the host and user are returned.
This function will process SSH config files if Fabric is configured to do
so, and will use them to fill in some default values or swap in hostname
aliases.
Regarding SSH port used:
* Ports explicitly given within host strings always win, no matter what.
* When the host string lacks a port, SSH-config driven port configurations
are used next.
* When the SSH config doesn't specify a port (at all - including a default
``Host *`` block), Fabric's internal setting ``env.port`` is consulted.
* If ``env.port`` is empty, ``env.default_port`` is checked (which should
always be, as one would expect, port ``22``).
"""
from rtox.fabric.state import env
# Gracefully handle "empty" input by returning empty output
if not host_string:
return ('', '') if omit_port else ('', '', '')
# Parse host string (need this early on to look up host-specific ssh_config
# values)
r = parse_host_string(host_string)
host = r['host']
# Env values (using defaults if somehow earlier defaults were replaced with
# empty values)
user = env.user or env.local_user
# SSH config data
conf = ssh_config(host_string)
# Only use ssh_config values if the env value appears unmodified from
# the true defaults. If the user has tweaked them, that new value
# takes precedence.
if user == env.local_user and 'user' in conf:
user = conf['user']
# Also override host if needed
if 'hostname' in conf:
host = conf['hostname']
# Merge explicit user/port values with the env/ssh_config derived ones
# (Host is already done at this point.)
user = r['user'] or user
if omit_port:
return user, host
# determine port from ssh config if enabled
ssh_config_port = None
if env.use_ssh_config:
ssh_config_port = conf.get('port', None)
# port priority order (as in docstring)
port = r['port'] or ssh_config_port or env.port or env.default_port
return user, host, port
def to_dict(host_string):
user, host, port = normalize(host_string)
return {
'user': user, 'host': host, 'port': port, 'host_string': host_string
}
def from_dict(arg):
return join_host_strings(arg['user'], arg['host'], arg['port'])
def denormalize(host_string):
"""
Strips out default values for the given host string.
If the user part is the default user, it is removed;
if the port is port 22, it also is removed.
"""
from rtox.fabric.state import env
r = parse_host_string(host_string)
user = ''
if r['user'] is not None and r['user'] != env.user:
user = r['user'] + '@'
port = ''
if r['port'] is not None and r['port'] != '22':
port = ':' + r['port']
host = r['host']
host = '[%s]' % host if port and host.count(':') > 1 else host
return user + host + port
def join_host_strings(user, host, port=None):
"""
Turns user/host/port strings into ``user@host:port`` combined string.
This function is not responsible for handling missing user/port strings;
for that, see the ``normalize`` function.
If ``host`` looks like IPv6 address, it will be enclosed in square brackets
If ``port`` is omitted, the returned string will be of the form
``user@host``.
"""
if port:
# Square brackets are necessary for IPv6 host/port separation
template = "%s@[%s]:%s" if host.count(':') > 1 else "%s@%s:%s"
return template % (user, host, port)
else:
return "%s@%s" % (user, host)
def normalize_to_string(host_string):
"""
normalize() returns a tuple; this returns another valid host string.
"""
return join_host_strings(*normalize(host_string))
def connect(user, host, port, cache, seek_gateway=True):
"""
Create and return a new SSHClient instance connected to given host.
:param user: Username to connect as.
:param host: Network hostname.
:param port: SSH daemon port.
:param cache:
A ``HostConnectionCache`` instance used to cache/store gateway hosts
when gatewaying is enabled.
:param seek_gateway:
Whether to try setting up a gateway socket for this connection. Used so
the actual gateway connection can prevent recursion.
"""
from rtox.fabric.state import env, output
#
# Initialization
#
# Init client
client = ssh.SSHClient()
# Load system hosts file (e.g. /etc/ssh/ssh_known_hosts)
known_hosts = env.get('system_known_hosts')
if known_hosts:
client.load_system_host_keys(known_hosts)
# Load known host keys (e.g. ~/.ssh/known_hosts) unless user says not to.
if not env.disable_known_hosts:
client.load_system_host_keys()
# Unless user specified not to, accept/add new, unknown host keys
if not env.reject_unknown_hosts:
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
#
# Connection attempt loop
#
# Initialize loop variables
connected = False
password = get_password(user, host, port, login_only=True)
tries = 0
sock = None
# Loop until successful connect (keep prompting for new password)
while not connected:
# Attempt connection
try:
tries += 1
# (Re)connect gateway socket, if needed.
# Nuke cached client object if not on initial try.
if seek_gateway:
sock = get_gateway(host, port, cache, replace=tries > 0)
# Set up kwargs (this lets us skip GSS-API kwargs unless explicitly
# set; otherwise older Paramiko versions will be cranky.)
kwargs = dict(
hostname=host,
port=int(port),
username=user,
password=password,
pkey=key_from_env(password),
key_filename=key_filenames(),
timeout=env.timeout,
allow_agent=not env.no_agent,
look_for_keys=not env.no_keys,
sock=sock,
)
for suffix in ('auth', 'deleg_creds', 'kex'):
name = "gss_" + suffix
val = env.get(name, None)
if val is not None:
kwargs[name] = val
# Ready to connect
client.connect(**kwargs)
connected = True
# set a keepalive if desired
if env.keepalive:
client.get_transport().set_keepalive(env.keepalive)
return client
# BadHostKeyException corresponds to key mismatch, i.e. what on the
# command line results in the big banner error about man-in-the-middle
# attacks.
except ssh.BadHostKeyException as e:
raise NetworkError("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % host, e)
# Prompt for new password to try on auth failure
except (
ssh.AuthenticationException,
ssh.PasswordRequiredException,
ssh.SSHException
) as e:
msg = str(e)
# If we get SSHExceptionError and the exception message indicates
# SSH protocol banner read failures, assume it's caused by the
# server load and try again.
#
# If we are using a gateway, we will get a ChannelException if
# connection to the downstream host fails. We should retry.
if (e.__class__ is ssh.SSHException \
and msg == 'Error reading SSH protocol banner') \
or e.__class__ is ssh.ChannelException:
if _tried_enough(tries):
raise NetworkError(msg, e)
continue
# For whatever reason, empty password + no ssh key or agent
# results in an SSHException instead of an
# AuthenticationException. Since it's difficult to do
# otherwise, we must assume empty password + SSHException ==
# auth exception.
#
# Conversely: if we get SSHException and there
# *was* a password -- it is probably something non auth
# related, and should be sent upwards. (This is not true if the
# exception message does indicate key parse problems.)
#
# This also holds true for rejected/unknown host keys: we have to
# guess based on other heuristics.
if (
e.__class__ is ssh.SSHException
and (
password
or msg.startswith('Unknown server')
or "not found in known_hosts" in msg
)
and not is_key_load_error(e)
):
raise NetworkError(msg, e)
# Otherwise, assume an auth exception, and prompt for new/better
# password.
# Paramiko doesn't handle prompting for locked private
# keys (i.e. keys with a passphrase and not loaded into an agent)
# so we have to detect this and tweak our prompt slightly.
# (Otherwise, however, the logic flow is the same, because
# ssh's connect() method overrides the password argument to be
# either the login password OR the private key passphrase. Meh.)
#
# NOTE: This will come up if you normally use a
# passphrase-protected private key with ssh-agent, and enter an
# incorrect remote username, because ssh.connect:
# * Tries the agent first, which will fail as you gave the wrong
# username, so obviously any loaded keys aren't gonna work for a
# nonexistent remote account;
# * Then tries the on-disk key file, which is passphrased;
# * Realizes there's no password to try unlocking that key with,
# because you didn't enter a password, because you're using
# ssh-agent;
# * In this condition (trying a key file, password is None)
# ssh raises PasswordRequiredException.
text = None
if e.__class__ is ssh.PasswordRequiredException \
or is_key_load_error(e):
# NOTE: we can't easily say WHICH key's passphrase is needed,
# because ssh doesn't provide us with that info, and
# env.key_filename may be a list of keys, so we can't know
# which one raised the exception. Best not to try.
prompt = "[%s] Passphrase for private key"
text = prompt % env.host_string
password = prompt_for_password(text)
# Update env.password, env.passwords if empty
set_password(user, host, port, password)
# Ctrl-D / Ctrl-C for exit
# TODO: this may no longer actually serve its original purpose and may
# also hide TypeErrors from paramiko. Double check in v2.
except (EOFError, TypeError):
# Print a newline (in case user was sitting at prompt)
print('')
sys.exit(0)
# Handle DNS error / name lookup failure
except socket.gaierror as e:
raise NetworkError('Name lookup failed for %s' % host, e)
# Handle timeouts and retries, including generic errors
# NOTE: In 2.6, socket.error subclasses IOError
except socket.error as e:
not_timeout = type(e) is not socket.timeout
giving_up = _tried_enough(tries)
# Baseline error msg for when debug is off
msg = "Timed out trying to connect to %s" % host
# Expanded for debug on
err = msg + " (attempt %s of %s)" % (tries, env.connection_attempts)
if giving_up:
err += ", giving up"
err += ")"
# Debuggin'
if output.debug:
sys.stderr.write(err + '\n')
# Having said our piece, try again
if not giving_up:
# Sleep if it wasn't a timeout, so we still get timeout-like
# behavior
if not_timeout:
time.sleep(env.timeout)
continue
# Override eror msg if we were retrying other errors
if not_timeout:
msg = "Low level socket error connecting to host %s on port %s: %s" % (
host, port, e.args[1]
)
# Here, all attempts failed. Tweak error msg to show # tries.
# TODO: find good humanization module, jeez
s = "s" if env.connection_attempts > 1 else ""
msg += " (tried %s time%s)" % (env.connection_attempts, s)
raise NetworkError(msg, e)
# Ensure that if we terminated without connecting and we were given an
# explicit socket, close it out.
finally:
if not connected and sock is not None:
sock.close()
def _password_prompt(prompt, stream):
# NOTE: Using encode-to-ascii to prevent (Windows, at least) getpass from
# choking if given Unicode.
if six.PY3 is False:
prompt = prompt.encode('ascii', 'ignore')
return getpass.getpass(prompt, stream)
def prompt_for_password(prompt=None, no_colon=False, stream=None):
"""
Prompts for and returns a new password if required; otherwise, returns
None.
A trailing colon is appended unless ``no_colon`` is True.
If the user supplies an empty password, the user will be re-prompted until
they enter a non-empty password.
``prompt_for_password`` autogenerates the user prompt based on the current
host being connected to. To override this, specify a string value for
``prompt``.
``stream`` is the stream the prompt will be printed to; if not given,
defaults to ``sys.stderr``.
"""
from rtox.fabric.state import env
handle_prompt_abort("a connection or sudo password")
stream = stream or sys.stderr
# Construct prompt
default = "[%s] Login password for '%s'" % (env.host_string, env.user)
password_prompt = prompt if (prompt is not None) else default
if not no_colon:
password_prompt += ": "
# Get new password value
new_password = _password_prompt(password_prompt, stream)
# Otherwise, loop until user gives us a non-empty password (to prevent
# returning the empty string, and to avoid unnecessary network overhead.)
while not new_password:
print("Sorry, you can't enter an empty password. Please try again.")
new_password = _password_prompt(password_prompt, stream)
return new_password
def needs_host(func):
"""
Prompt user for value of ``env.host_string`` when ``env.host_string`` is
empty.
This decorator is basically a safety net for silly users who forgot to
specify the host/host list in one way or another. It should be used to wrap
operations which require a network connection.
Due to how we execute commands per-host in ``main()``, it's not possible to
specify multiple hosts at this point in time, so only a single host will be
prompted for.
Because this decorator sets ``env.host_string``, it will prompt once (and
only once) per command. As ``main()`` clears ``env.host_string`` between
commands, this decorator will also end up prompting the user once per
command (in the case where multiple commands have no hosts set, of course.)
"""
from rtox.fabric.state import env
@wraps(func)
def host_prompting_wrapper(*args, **kwargs):
while not env.get('host_string', False):
handle_prompt_abort("the target host connection string")
prompt = "No hosts found. Please specify (single) " \
"host string for connection: "
# WARNING: do not use six.moves.input, because test cases to not
# overwrite that method with a faked method from Fudge
if six.PY3 is True:
host_string = input(prompt)
else:
host_string = raw_input(prompt)
env.update(to_dict(host_string))
return func(*args, **kwargs)
host_prompting_wrapper.undecorated = func
return host_prompting_wrapper
def disconnect_all():
"""
Disconnect from all currently connected servers.
Used at the end of ``fab``'s main loop, and also intended for use by
library users.
"""
from rtox.fabric.state import connections, output
# Explicitly disconnect from all servers
for key in list(connections.keys()):
if output.status:
# Here we can't use the py3k print(x, end=" ")
# because 2.5 backwards compatibility
sys.stdout.write("Disconnecting from %s... " % denormalize(key))
connections[key].close()
del connections[key]
if output.status:
sys.stdout.write("done.\n")
|
py | 1a392fa194b6a0976154c79320d781f46475aee8 | import os
import sys
sys.path.append("../..")
from PyCLUE.tasks.run_classifier import my_clue_tasks, configs
import tensorflow as tf
# assign GPU devices or CPU devices
# os.environ["CUDA_VISIBLE_DEVICES"] = "4"
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_string("task_name", "", "oss buckets")
flags.DEFINE_string("gpu_id", "4", "oss buckets")
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu_id
# default configs: see PyCLUE.utils.classifier_utils.core
# below are some necessary paramters required in running this task
# task_name:
# Support:
# chineseGLUE: bq, xnli, lcqmc, inews, thucnews,
# CLUE: afqmc, cmnli, copa, csl, iflytek, tnews, wsc
for task_name in FLAGS.task_name.split(","):
from PyCLUE.tasks.run_classifier import configs
if task_name == 'afqmc':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 128
configs["train_batch_size"] = 32
configs["learning_rate"] = 2e-5
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'cmnli':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 128
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'csl':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 5e-5
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'iflytek':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 16
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 20.0
elif task_name == 'tnews':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 20.0
elif task_name == 'wsc':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
# pretrained_lm_name:
# If None, should assign `vocab_file`, `bert_config_file`, `init_checkpoint`.
# Or you can choose the following models:
# bert, bert_wwm_ext, albert_xlarge, albert_large, albert_base, albert_base_ext,
# albert_small, albert_tiny, roberta, roberta_wwm_ext, roberta_wwm_ext_large
configs["pretrained_lm_name"] = "bert_electra_tiny_grl_generator"
configs["vocab_file"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/vocab.txt"
configs["bert_config_file"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/bert_config_tiny_large_embed.json"
configs["init_checkpoint"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/generator.ckpt-1070000"
configs["verbose"] = 1
configs["do_train"] = True
configs["do_eval"] = True
configs["do_predict"] = True
my_clue_tasks(configs)
|
py | 1a392fe0394ceddcad3574376ecaf6474e92685b | # coding:utf-8
import datetime
from functools import lru_cache
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from QUANTAXIS.QAAnalysis.QAAnalysis_dataframe import QAAnalysis_stock
from QUANTAXIS.QAData.data_marketvalue import QA_data_marketvalue
from QUANTAXIS.QAFetch.Fetcher import QA_quotation
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_info
from QUANTAXIS.QAFetch.QAQuery_Advance import (QA_fetch_stock_block_adv,
QA_fetch_stock_day_adv,
QA_fetch_stock_min_adv)
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_info
from QUANTAXIS.QAFetch.QATdx_adv import QA_Tdx_Executor
from QUANTAXIS.QAUtil.QADate_trade import QA_util_get_real_datelist
from QUANTAXIS.QAUtil.QAParameter import (DATASOURCE, FREQUENCE, MARKET_TYPE,
OUTPUT_FORMAT)
def get_gap_trade(gap):
return QA_util_get_real_datelist(datetime.date.today() + datetime.timedelta(days=-int(gap)), datetime.date.today())
#from QUANTAXIS.QAAnalysis.QAAnalysis_dataframe import QAAnalysis_stock
class QAAnalysis_block():
def __init__(self, code=[], name=None, start=None, end=None, frequence=FREQUENCE.DAY, *args, **kwargs):
self.code = code
self.start = start
self.end = end
self.frequence = frequence
self.name = name
def __repr__(self):
return '< QAAnalysis_Block {} with {} code >'.format(self.name, len(self.code))
@property
@lru_cache()
def market_data(self):
return QA_quotation(self.code, self.start, self.end, self.frequence,
market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.MONGO, output=OUTPUT_FORMAT.DATASTRUCT).to_qfq()
@property
@lru_cache()
def market_value(self):
return self.market_data.add_func(QA_data_marketvalue)
@property
def week_data(self):
'this weekly data'
'return a QUANTAXIS DATASTRUCT'
return self.market_data.to_week()
@property
def month_data(self):
'this monthly data'
'return a QUANTAXIS DATASTRUCT'
return self.market_data.to_month()
def block_index(self, methods='mv'):
if methods == 'mv':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.shares))
elif methods == 'lv':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.lshares))
elif methods == 'close':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close))
elif methods == 'volume':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.volume))
else:
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.shares))
print(
'wrong methods: only support [mv,lv,close,volume] methods \n use default mv methods')
return res/res.iloc[0]*1000
def stock_turnover(self):
return self.market_value.volume/self.market_value.lshares
def block_turnover(self):
return self.stock_turnover().groupby(level=0).mean()
def plot_index(self, methods='mv'):
block_index=self.block_index('close')
def format_date(x, pos=None):
# 保证下标不越界,很重要,越界会导致最终plot坐标轴label无显示
thisind = np.clip(int(x+0.5), 0, N-1)
# print(thisind)
return block_index.index[thisind].strftime('%Y-%m-%d %H:%M')
fig = plt.figure(figsize=(14, 12))
ax = fig.add_subplot(1, 1, 1)
plt.style.use('ggplot')
plt.title('QUANTAXIS BLOCK ANA {}'.format(
self.name), fontproperties="SimHei")
N = len(block_index)
block_index.reset_index()[0].plot()
self.block_index('lv').reset_index()[0].plot()
self.block_index('close').reset_index()[0].plot()
self.block_index('volume').reset_index()[0].plot()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
plt.legend(['market_value', 'liquidity_value', 'close', 'volume'])
plt.show()
if __name__ == "__main__":
import QUANTAXIS as QA
ana = QAAnalysis_block(
QA.QA_fetch_stock_block_adv().get_block('国产软件').code, '国产软件', '2018-01-01', '2018-08-21')
ana.plot_index()
ana = QAAnalysis_block(['000001', '000002', '600356'],
'自定义', '2018-01-01', '2018-08-21')
ana.plot_index()
ana = QAAnalysis_block(['000001', '000002', '600356'],
'自定义15分钟级别指数', '2018-08-01', '2018-08-21', FREQUENCE.FIFTEEN_MIN)
ana.plot_index()
|
py | 1a393039335e34361f0f93edbc09b08c4b2df47d | """Provide support to Lexicon for DNS changes for Gransy sites subreg.cz, regtons.com and \
regnames.eu."""
from __future__ import absolute_import
import collections
import logging
from builtins import staticmethod
try:
import zeep # Optional dependency
except BaseException:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["gransy.com"]
def gransy_provider_parser(subparser):
"""Gransy provider parser"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
def provider_parser(subparser):
"""Configure provider parser"""
gransy_provider_parser(subparser)
subparser.description = (
"DNS manipulation provider for Gransy sites "
+ "subreg.cz, regtons.com and regnames.eu."
)
class Provider(BaseProvider):
"""Provider class for Gransy"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.ssid = None
client = zeep.Client("https://subreg.cz/wsdl")
self.api = client.service
# Authenticate against provider,
# Make any requests required to get the domain's id for
# this provider, so it can be used in subsequent calls.
# Should throw an error if authentication fails for any reason,
# of if the domain does not exist.
def _authenticate(self):
"""Logs-in the user and checks the domain name"""
if not self._get_provider_option(
"auth_username"
) or not self._get_provider_option("auth_password"):
raise Exception(
"No valid authentication data passed, expected: auth-username and auth-password"
)
response = self._request_login(
self._get_provider_option("auth_username"),
self._get_provider_option("auth_password"),
)
if "ssid" in response:
self.ssid = response["ssid"]
domains = self.domains_list()
if any((domain["name"] == self.domain for domain in domains)):
self.domain_id = self.domain
else:
raise Exception("Unknown domain {}".format(self.domain))
else:
raise Exception("No SSID provided by server")
# Create record. If record already exists with the same content, do nothing.
def _create_record(self, rtype, name, content):
"""Creates a new unique record"""
found = self._list_records(rtype=rtype, name=name, content=content)
if found:
return True
record = self._create_request_record(
None,
rtype,
name,
content,
self._get_lexicon_option("ttl"),
self._get_lexicon_option("priority"),
)
self._request_add_dns_record(record)
return True
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""Updates a record. Name changes are allowed, but the record identifier will change"""
if identifier is not None:
if name is not None:
records = self._list_records_internal(identifier=identifier)
if len(records) == 1 and records[0]["name"] != self._full_name(name):
# API does not allow us to update name directly
self._update_record_with_name(records[0], rtype, name, content)
else:
self._update_record_with_id(identifier, rtype, content)
else:
self._update_record_with_id(identifier, rtype, content)
else:
guessed_record = self._guess_record(rtype, name)
self._update_record_with_id(guessed_record["id"], rtype, content)
return True
def _update_record_with_id(self, identifier, rtype, content):
"""Updates existing record with no sub-domain name changes"""
record = self._create_request_record(
identifier,
rtype,
None,
content,
self._get_lexicon_option("ttl"),
self._get_lexicon_option("priority"),
)
self._request_modify_dns_record(record)
def _update_record_with_name(self, old_record, rtype, new_name, content):
"""Updates existing record and changes it's sub-domain name"""
new_type = rtype if rtype else old_record["type"]
new_ttl = self._get_lexicon_option("ttl")
if new_ttl is None and "ttl" in old_record:
new_ttl = old_record["ttl"]
new_priority = self._get_lexicon_option("priority")
if new_priority is None and "priority" in old_record:
new_priority = old_record["priority"]
new_content = content
if new_content is None and "content" in old_record:
new_content = old_record["content"]
record = self._create_request_record(
None, new_type, new_name, new_content, new_ttl, new_priority
)
# This will be a different domain name, so no name collision should
# happen. First create a new entry and when it succeeds, delete the old
# one.
self._request_add_dns_record(record)
self._request_delete_dns_record_by_id(old_record["id"])
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Deletes an existing record"""
to_delete_ids = list()
if identifier:
to_delete_ids.append(identifier)
else:
for record in self._list_records(rtype=rtype, name=name, content=content):
to_delete_ids.append(record["id"])
for to_delete_id in to_delete_ids:
self._request_delete_dns_record_by_id(to_delete_id)
return True
def domains_list(self):
"""Get list of registered domains"""
response = self._request_domains_list()
return response["domains"] if "domains" in response else list()
def _create_request_record(self, identifier, rtype, name, content, ttl, priority):
"""Creates record for Subreg API calls"""
record = collections.OrderedDict()
# Mandatory content
# Just for update - not for creation
if identifier is not None:
record["id"] = identifier
record["type"] = rtype
# Just for creation - not for update
if name is not None:
record["name"] = self._relative_name(name)
# Optional content
if content is not None:
record["content"] = content
if ttl is not None:
record["ttl"] = ttl
if priority is not None:
record["prio"] = priority
return record
def _create_response_record(self, response):
"""Creates record for lexicon API calls"""
record = dict()
record["id"] = response["id"]
record["type"] = response["type"]
record["name"] = self._full_name(response["name"])
if "content" in response:
record["content"] = response["content"] or ""
if "ttl" in response:
record["ttl"] = response["ttl"]
if "prio" in response:
record["priority"] = response["prio"]
return record
def _full_name(self, record_name):
"""Returns full domain name of a sub-domain name"""
# Handle None and empty strings
if not record_name:
return self.domain
return super(Provider, self)._full_name(record_name)
def _relative_name(self, record_name):
"""Returns sub-domain of a domain name"""
# Handle None and empty strings as None
if not record_name:
return None
subdomain = super(Provider, self)._relative_name(record_name)
return subdomain if subdomain else None
# List all records. Return an empty list if no records found
# identifier, type, name and content are used to filter records.
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(
self, identifier=None, rtype=None, name=None, content=None
):
"""Lists all records by the specified criteria"""
response = self._request_get_dns_zone()
if "records" in response:
# Interpret empty string as None because zeep does so too
content_check = content if content != "" else None
name_check = self._relative_name(name)
# Stringize the identifier to prevent any rtype differences
identifier_check = str(identifier) if identifier is not None else None
filtered_records = [
record
for record in response["records"]
if (identifier is None or str(record["id"]) == identifier_check)
and (rtype is None or record["type"] == rtype)
and (name is None or record["name"] == name_check)
and (
content is None
or ("content" in record and record["content"] == content_check)
)
]
records = [
self._create_response_record(filtered_record)
for filtered_record in filtered_records
]
else:
records = []
return records
def _guess_record(self, rtype, name=None, content=None):
"""Tries to find existing unique record by type, name and content"""
records = self._list_records_internal(
identifier=None, rtype=rtype, name=name, content=content
)
if len(records) == 1:
return records[0]
if len(records) > 1:
raise Exception(
"Identifier was not provided and several existing "
"records match the request for {0}/{1}".format(rtype, name)
)
raise Exception(
"Identifier was not provided and no existing records match "
"the request for {0}/{1}".format(rtype, name)
)
def _request_login(self, login, password):
"""Sends Login request"""
return self._request_internal("Login", login=login, password=password)
def _request_domains_list(self):
"""Sends Domains_List request"""
return self._request_internal("Domains_List")
def _request_get_dns_zone(self):
"""Sends Get_DNS_Zone request"""
return self._request_internal("Get_DNS_Zone", domain=self.domain)
def _request_add_dns_record(self, record):
"""Sends Add_DNS_Record request"""
return self._request_internal(
"Add_DNS_Record", domain=self.domain, record=record
)
def _request_modify_dns_record(self, record):
"""Sends Modify_DNS_Record request"""
return self._request_internal(
"Modify_DNS_Record", domain=self.domain, record=record
)
def _request_delete_dns_record_by_id(self, identifier):
"""Sends Delete_DNS_Record request"""
return self._request_internal(
"Delete_DNS_Record", domain=self.domain, record={"id": identifier}
)
def _request_internal(self, command, **kwargs):
"""Make request parse response"""
args = dict(kwargs)
if self.ssid:
args["ssid"] = self.ssid
method = getattr(self.api, command)
response = method(**args)
if response and "status" in response:
if response["status"] == "error":
self._raise_error(
message=response["error"]["errormsg"],
major=response["error"]["errorcode"]["major"],
minor=response["error"]["errorcode"]["minor"],
)
if response["status"] == "ok":
return response["data"] if "data" in response else dict()
raise Exception("Invalid status found in SOAP response")
raise Exception("Invalid response")
def _request(self, action="GET", url="/", data=None, query_params=None):
# Default helper _request is not used in Subreg provider
pass
@staticmethod
def _raise_error(major, minor, message):
raise GransyError(major, minor, message)
class GransyError(Exception):
"""Specific error for Gransy provider"""
def __init__(self, major, minor, message):
self.major = int(major)
self.minor = int(minor)
self.message = message
super(GransyError, self).__init__()
def __str__(self):
return "Major: {} Minor: {} Message: {}".format(
self.major, self.minor, self.message
)
|
py | 1a393085d09155acbb08e6e020d2e9656b822a7e | from django.db import models
from localflavor.us.us_states import CONTIGUOUS_STATES
# This may be abstracted later into "core"
class Address(models.Model):
street1 = models.CharField(max_length=250)
street2 = models.CharField(max_length=250, required=False)
state = models.CharField(choices=CONTIGUOUS_STATES)
zip_code = models.CharField(max_length=20, blank=True)
def __unicode__(self):
return self.street1
class Event(models.Model):
title = models.CharField(max_length=250)
start = models.DateTimeField()
description = models.TextField()
address = models.ForeignKey(Address, blank=True, null=True)
image = models.ImageField(upload_to='events/event')
def __unicode__(self):
return self.title
class RSVP(models.Model):
event = models.ForeignKey(Event, blank=True, null=True)
|
py | 1a3930aafbb94c87644c03e8b1224dda05131215 | from .package import Package
from .package import Version
def test_version_comparison():
assert Version(0, "1.2.4") < Version(10, "1.2.4")
assert Version(0, "1.2.5") < Version(10, "1.2.4")
assert Version(0, "1.2.3") == Version(0, "1.2.4")
assert Version(0, "1.2.3") == Version(0, "1.2.3")
assert Version(10, "1.2.4") > Version(0, "1.2.4")
assert Version(10, "1.2.4") > Version(0, "1.2.5")
assert Version(0, "1.2.4") == Version(0, "1.2.3")
def test_package_from_json():
package_json = {"name": "package", "version": "1.2.3", "releaseVersion": 10}
p = Package.from_json(package_json)
assert p.get_name() == package_json["name"]
assert p.get_version() == Version(10, "1.2.3")
def test_package_starts_with_beta_is_beta():
p = Package("beta-package", None)
assert p.is_beta()
def test_normal_package_is_not_beta():
p = Package("package", None)
assert not p.is_beta()
def test_non_beta_backage_beta_name_is_name():
p = Package("package", None)
assert p.get_name() == p.get_non_beta_name()
def test_beta_package_beta_name():
p = Package("beta-package", None)
assert p.get_non_beta_name() == "package"
def test_elastic_ordering():
p7 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.16-5.5.1-beta", "releaseVersion": 7}
)
p0 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.9-5.3.0-beta", "releaseVersion": 0}
)
p1 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.10-5.3.0-beta", "releaseVersion": 1}
)
assert p0 < p1
assert p7 > p0
|
py | 1a3931152328186386ff26f0f0c0acccef6d754e | # Generated by Django 3.1.1 on 2020-11-28 14:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('formulario', '0011_auto_20201128_0105'),
]
operations = [
migrations.CreateModel(
name='MapeamentoValidadoCapitai',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('formulario.mapeamento',),
),
migrations.CreateModel(
name='MapeamentoValidadoCemMilHabitante',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('formulario.mapeamento',),
),
]
|
py | 1a39316344fbac80aae9d16b106c20cc159255d0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorQueryResult(Model):
"""List of connection states snapshots.
:param source_status: Status of connection monitor source. Possible values
include: 'Uknown', 'Active', 'Inactive'
:type source_status: str or
~azure.mgmt.network.v2018_08_01.models.ConnectionMonitorSourceStatus
:param states: Information about connection states.
:type states:
list[~azure.mgmt.network.v2018_08_01.models.ConnectionStateSnapshot]
"""
_attribute_map = {
'source_status': {'key': 'sourceStatus', 'type': 'str'},
'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorQueryResult, self).__init__(**kwargs)
self.source_status = kwargs.get('source_status', None)
self.states = kwargs.get('states', None)
|
py | 1a393257e176d11076cbb8c75e346f70a1faa5ba | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras LSTM Encoding Network.
Implements a network that will generate the following layers:
[optional]: preprocessing_layers # preprocessing_layers
[optional]: (Add | Concat(axis=-1) | ...) # preprocessing_combiner
[optional]: Conv2D # input_conv_layer_params
Flatten
[optional]: Dense # input_fc_layer_params
[optional]: LSTM cell
[optional]: Dense # output_fc_layer_params
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tf_agents.networks import dynamic_unroll_layer
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import nest_utils
KERAS_LSTM_FUSED_IMPLEMENTATION = 2
@gin.configurable
class LSTMEncodingNetwork(network.Network):
"""Recurrent network."""
def __init__(
self,
input_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
input_fc_layer_params=(75, 40),
lstm_size=(40,),
output_fc_layer_params=(75, 40),
activation_fn=tf.keras.activations.relu,
dtype=tf.float32,
name='LSTMEncodingNetwork',
):
"""Creates an instance of `LSTMEncodingNetwork`.
Input preprocessing is possible via `preprocessing_layers` and
`preprocessing_combiner` Layers. If the `preprocessing_layers` nest is
shallower than `input_tensor_spec`, then the layers will get the subnests.
For example, if:
```python
input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5)
preprocessing_layers = (Layer1(), Layer2())
```
then preprocessing will call:
```python
preprocessed = [preprocessing_layers[0](observations[0]),
preprocessing_layers[1](obsrevations[1])]
```
However if
```python
preprocessing_layers = ([Layer1() for _ in range(2)],
[Layer2() for _ in range(5)])
```
then preprocessing will call:
```python
preprocessed = [
layer(obs) for layer, obs in zip(flatten(preprocessing_layers),
flatten(observations))
]
```
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
observations.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations.
All of these layers must not be already built.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
input_fc_layer_params: Optional list of fully connected parameters, where
each item is the number of units in the layer. These feed into the
recurrent layer.
lstm_size: An iterable of ints specifying the LSTM cell sizes to use.
output_fc_layer_params: Optional list of fully connected parameters, where
each item is the number of units in the layer. These are applied on top
of the recurrent layer.
activation_fn: Activation function, e.g. tf.keras.activations.relu,.
dtype: The dtype to use by the convolution, LSTM, and fully connected
layers.
name: A string representing name of the network.
Raises:
ValueError: If any of `preprocessing_layers` is already built.
ValueError: If `preprocessing_combiner` is already built.
"""
kernel_initializer = tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode='fan_in', distribution='truncated_normal')
input_encoder = encoding_network.EncodingNetwork(
input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=input_fc_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
dtype=dtype)
# Create RNN cell
if len(lstm_size) == 1:
cell = tf.keras.layers.LSTMCell(
lstm_size[0],
dtype=dtype,
implementation=KERAS_LSTM_FUSED_IMPLEMENTATION)
else:
cell = tf.keras.layers.StackedRNNCells([
tf.keras.layers.LSTMCell( # pylint: disable=g-complex-comprehension
size,
dtype=dtype,
implementation=KERAS_LSTM_FUSED_IMPLEMENTATION)
for size in lstm_size
])
output_encoder = ([
tf.keras.layers.Dense(
num_units,
activation=activation_fn,
kernel_initializer=kernel_initializer,
dtype=dtype,
name='/'.join([name, 'dense']))
for num_units in output_fc_layer_params
])
counter = [-1]
def create_spec(size):
counter[0] += 1
return tensor_spec.TensorSpec(
size, dtype=dtype, name='network_state_%d' % counter[0])
state_spec = tf.nest.map_structure(create_spec, cell.state_size)
super(LSTMEncodingNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=state_spec,
name=name)
self._conv_layer_params = conv_layer_params
self._input_encoder = input_encoder
self._dynamic_unroll = dynamic_unroll_layer.DynamicUnroll(cell)
self._output_encoder = output_encoder
def call(self, observation, step_type, network_state=None):
"""Apply the network.
Args:
observation: A tuple of tensors matching `input_tensor_spec`.
step_type: A tensor of `StepType.
network_state: (optional.) The network state.
Returns:
`(outputs, network_state)` - the network output and next network state.
Raises:
ValueError: If observation tensors lack outer `(batch,)` or
`(batch, time)` axes.
"""
num_outer_dims = nest_utils.get_outer_rank(observation,
self.input_tensor_spec)
if num_outer_dims not in (1, 2):
raise ValueError(
'Input observation must have a batch or batch x time outer shape.')
has_time_dim = num_outer_dims == 2
if not has_time_dim:
# Add a time dimension to the inputs.
observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
observation)
step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
step_type)
state, network_state = self._input_encoder(
observation, step_type, network_state)
with tf.name_scope('reset_mask'):
reset_mask = tf.equal(step_type, time_step.StepType.FIRST)
# Unroll over the time sequence.
state, network_state = self._dynamic_unroll(
state,
reset_mask,
initial_state=network_state)
for layer in self._output_encoder:
state = layer(state)
if not has_time_dim:
# Remove time dimension from the state.
state = tf.squeeze(state, [1])
return state, network_state
|
py | 1a3932589cf78e9b33296140e811a0d39f3fa138 | """
Class that exposes the leveldb through REST API, with automatic serialization
to bytes and deserialization from bytes provided by the serialization module
"""
import http
import os
import pickle
import sys
from multiprocessing import Process
from pathlib import Path
from typing import Iterable, Tuple, Union
import plyvel
from flask import Flask, Response, g, request
from openleveldb.backend import serializer
from openleveldb.backend.connectorcommon import get_prefixed_db
from openleveldb.backend.serializer import DecodeType
app = Flask(__name__)
def get_db(dbpath: Union[str, Path]) -> plyvel.DB:
dbpath: Path = Path(dbpath)
if not hasattr(g, "dbs"):
g.dbs = {}
if dbpath not in g.dbs:
g.dbs[dbpath] = plyvel.DB(
dbpath.expanduser().absolute().as_posix(), create_if_missing=True
)
return g.dbs[dbpath]
def _parse_and_get_prefixed_db() -> plyvel.DB:
dbpath = request.args.get("dbpath")
prefixes = request.args.getlist("prefixes")
prefixes = (
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, prefixes)
if prefixes is not None
else ()
)
return get_prefixed_db(get_db(dbpath), prefixes)
@app.teardown_appcontext
def close_db(error) -> None:
"""Closes the database again at the end of the request."""
if hasattr(g, "dbs"):
for x, y in g.dbs.items():
if hasattr(y, "close"):
y.close()
@app.route("/iterator", methods=["GET"])
def iterator() -> Iterable[Union[bytes, Tuple[bytes, bytes]]]:
db = _parse_and_get_prefixed_db()
starting_by = request.args.get("starting_by")
starting_by = b"".join(
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, starting_by)
if starting_by is not None
else ()
)
include_key = request.args.get("include_key") == "True"
include_value = request.args.get("include_value") == "True"
out = pickle.dumps(
list(
db.iterator(
prefix=starting_by, include_key=include_key, include_value=include_value
)
)
)
return Response(out, content_type="application/octet-stream")
@app.route("/dblen", methods=["GET"])
def dblen() -> str:
db = _parse_and_get_prefixed_db()
starting_by = request.args.get("starting_by")
starting_by = b"".join(
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, starting_by)
if starting_by is not None
else ()
)
out = serializer.encode(
sum(
1
for _ in db.iterator(
include_key=True, include_value=False, prefix=starting_by
)
)
)
return Response(out, content_type="application/octet-stream")
@app.route("/setitem", methods=["POST"])
def setitem() -> Response:
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
value = request.get_data()
keybytes = DecodeType.STR.pure_encode_fun(key)
db.put(keybytes, value)
return Response(key, content_type="text")
@app.route("/getitem", methods=["GET"])
def getitem() -> Response:
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
keybytes = DecodeType.STR.pure_encode_fun(key)
out = db.get(keybytes, default=b"")
return Response(out, content_type="application/octet-stream")
@app.route("/delitem", methods=["DELETE"])
def delitem() -> (str, http.HTTPStatus):
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
keybytes = DecodeType.STR.pure_encode_fun(key)
db.delete(keybytes)
return Response(key, content_type="text")
@app.route("/repr", methods=["GET"])
def repr() -> str:
db = _parse_and_get_prefixed_db()
dbpath = request.args.get("dbpath")
classname = request.args.get("classname")
innerdb = f"{db}"
dbrepr = f"{classname}(path='{dbpath}', db={innerdb})"
return Response(dbrepr, content_type="text")
def dummy_server(port: Union[int, str]) -> Process:
port = int(port)
def runflask() -> None:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
app.run(port=port)
dummy_server = Process(target=runflask)
dummy_server.start()
return dummy_server
if __name__ == "__main__":
pass
|
py | 1a39327ed8be0d95415fb20085d3073a4413dcf8 | # This configuration copies files from the release directory into the
# development repository. Turns out to be useful when, for example,
# when running experiments in a release setting and making changes on
# the fly.
import os
import sys
JAMPKG = os.environ['JAMPKG']
sys.path.append(os.path.join(os.environ['JAMPKG'], 'util'))
from config import *
srcdir = os.path.join(JAMSCRIPT_DIR, "..", "jamscript-release")
OUTDIR = JAMSCRIPT_DIR
# The filter specifies a pattern that files must match. If the last
# parameter is True, the match must be at the end of the filename.
# ("/absolute/source/dir", "relative/destination/dir" [,"filter" [,True|False]])
FILES_TO_COPY = [
(os.path.join(srcdir, 'doc', 'INSTALL.bash'), os.path.join('doc', 'INSTALL.bash')),
(os.path.join(srcdir, 'doc', 'INSTALL-FREEBSD'), os.path.join('doc', 'INSTALL-FREEBSD')),
(os.path.join(srcdir, 'doc', 'TRANSACTIONS'), os.path.join('doc', 'TRANSACTIONS')),
(os.path.join(srcdir, 'util'), 'util', '.py', True),
(os.path.join(srcdir, 'tests', 'js'), os.path.join('tests', 'js')),
(os.path.join(srcdir, 'txjs', 'libTx.js'), os.path.join('txjs', 'libTx.js')),
(os.path.join(srcdir, 'patch'), 'patch'),
(os.path.join(srcdir, 'README'), 'README'),
(os.path.join(srcdir, 'LICENSE'), 'LICENSE'),
(os.path.join(srcdir, 'mwwidgets'), 'mwwidgets/'),
(os.path.join(srcdir, 'jsqrcode'), 'jsqrcode/jam'),
(os.path.join(srcdir, 'snote'), 'snote/'),
(os.path.join(srcdir, 'snote', 'SNote/'), os.path.join("snote", "SNote/")),
]
|
py | 1a3932eec987929b4fe8e5e4d6d70279df972056 | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 80 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotate": (-180, 180),
"shear": (0, 0),
"zoom_x": (-0.5, 1.5),
"zoom_y": (-0.5, 1.5),
"skew_x": (-10, 10),
"skew_y": (-10, 10),
"translate": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0)
}
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(128,128)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 20 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model(input_layer = None):
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
if input_layer:
l0 = input_layer
else:
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
"meta_outputs": {
"systole": ldsys2,
"diastole": lddia2,
}
}
|
py | 1a3934b5cf18d5278d92bcc5ba457dc7d18a1ce4 | import re, requests, Tools
class Utils:
@staticmethod
def current_interface_version():
return 50400
@staticmethod
def remove_colors(string):
if string is None:
return None
string = re.sub(r"\|\c........", "", string)
return string.replace("|r", "")
@staticmethod
def find_in_toc(what, toc):
return Tools.run_regex_and_return_string(bytes(what + ": (.*)\n", 'utf-8'), toc)
@staticmethod
def are_we_online():
try:
r = requests.get('http://google.com')
if r.status_code == 200:
return True
else:
return False
finally:
return False
|
py | 1a39353acdc1c4ef2a955b3df3f78746e80a1076 | # coding=utf-8
# Non-parametric Density Peak clustering:
# Automatic topography of high-dimensional data sets
#
# Author: Maria d'Errico <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, DensityMixin, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from math import log, sqrt, exp, lgamma, pi, pow
from Pipeline import _DPA
from Pipeline.twoNN import twoNearestNeighbors
from Pipeline.PAk import PointAdaptive_kNN
VALID_METRIC = ['precomputed', 'euclidean','cosine']
VALID_DIM = ['auto', 'twoNN']
VALID_DENSITY = ['PAk', 'kNN']
def _DensityPeakAdvanced(densities, err_densities, k_hat, distances, indices, Z):
"""Main function implementing the Density Peak Advanced clustering algorithm:
* Automatic detection of cluster centers
* Point assignament to clusters in order of decreasing `g`
* Topography reconstruction: search of saddle points and cluster merging
Parameters
----------
densities : array [n_samples]
The logarithm of the density at each point.
err_densities : array [n_samples]
The uncertainty in the density estimation, obtained by computing
the inverse of the Fisher information matrix.
k_hat : array [n_samples]
The optimal number of neighbors for which the condition of constant density holds.
distances: array [n_samples, k_max+1]
Distances to the k_max neighbors of each points. The point itself is included in the array.
indices : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points. The point itself is included in the array.
Z : float, default = 1
The number of standard deviations, which fixes the level of statistical confidence at which
one decides to consider a cluster meaningful.
Attributes
----------
labels : array [Nclus]
The clustering labels assigned to each point in the data set.
halos : array [Nclus]
The clustering labels assigned to each point in the data set. Points identified as halos have
clustering lable equal to ``-1``.
topography : array [Nclus, Nclus]
Let be Nclus the number of clusters, the topography consists in a Nclus × Nclus symmetric matrix,
in which the diagonal entries are the heights of the peaks and the off-diagonal entries are the
heights of the saddle points.
centers : array [Nclus]
The list of points identified as the centers of the Nclus statistically significant clusters.
"""
# We define as cluster centers the local maxima of g, where g is defined as density-err_density.
g = [densities[i]-err_densities[i] for i in range(0,len(densities))]
# Automatic detection of cluster centers
#---------------------------------------
N = len(densities)
centers = _DPA.get_centers(N, indices, k_hat, g)
Nclus = len(centers)
# Assign points to clusters
#--------------------------
# Assign all the points that are not centers to the same cluster as the nearest point with higher g.
# This assignation is performed in order of decreasing g
clu_labels = _DPA.initial_assignment(g, N, indices, centers)
# Topography reconstruction
#--------------------------
# Finding saddle points between pair of clusters c and c'.
# Halo points are also dentified as the points whose density is lower than
# the density of the lowest saddle point, manely the set of points
# whose assignation is not reliable. The clustering labels for halo point is set to -1.
Rho_bord, Rho_bord_err, clu_labels, clu_halos, Nclus, centers_m = _DPA.get_borders(N, k_hat, indices,
clu_labels, Nclus,
g, densities, err_densities,
Z, centers)
topography = []
for i in range(0, Nclus-1):
for j in range(i+1, Nclus):
topography.append([i,j, Rho_bord[i][j], Rho_bord_err[i][j]])
labels = clu_labels
halos = clu_halos
return labels, halos, topography, g, centers_m
class DensityPeakAdvanced(ClusterMixin, BaseEstimator):
"""Class definition for the non-parametric Density Peak clustering.
The default pipeline makes use of the `PAk` density estimator and of the `TWO-NN` intristic dimension estimator.
The densities and the corresponding errors can also be provided as precomputed arrays.
Parameters
----------
Z : float, default = 1
The number of standard deviations, which fixes the level of statistical confidence at which
one decides to consider a cluster meaningful.
metric : string, or callable
The distance metric to use.
If metric is a string, it must be one of the options allowed by
scipy.spatial.distance.pdist for its metric parameter, or a metric listed in
:obj:`VALID_METRIC = [precomputed, euclidean,cosine]`. If metric is ``precomputed``, X is assumed to
be a distance matrix. Alternatively, if metric is a callable function, it is
called on each pair of instances (rows) and the resulting value recorded. The
callable should take two arrays from X as input and return a value indicating
the distance between them. Default is ``euclidean``.
densities : array [n_samples], default = None
The logarithm of the density at each point. If provided, the following parameters are ignored:
``density_algo``, ``k_max``, ``D_thr``.
err_densities : array [n_samples], default = None
The uncertainty in the density estimation, obtained by computing
the inverse of the Fisher information matrix.
k_hat : array [n_samples], default = None
The optimal number of neighbors for which the condition of constant density holds.
nn_distances : array [n_samples, k_max+1]
Distances to the k_max neighbors of each points.
nn_indices : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points.
affinity : string or callable, default 'precomputed'
How to construct the affinity matrix.
- ``nearest_neighbors`` : construct the affinity matrix by computing a
graph of nearest neighbors.
- ``rbf`` : construct the affinity matrix using a radial basis function
(RBF) kernel.
- ``precomputed`` : interpret ``X`` as a precomputed affinity matrix.
- ``precomputed_nearest_neighbors`` : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
density_algo : string, default = "PAk"
Define the algorithm to use as density estimator. It mast be one of the options allowed by
:obj:`VALID_DENSITY = [PAk, kNN]`.
k_max : int, default=1000
This parameter is considered if density_algo is ``PAk`` or ``kNN``, it is ignored otherwise.
k_max set the maximum number of nearest-neighbors considered by the density estimator.
If ``density_algo=PAk``, k_max is used by the algorithm in the search for the
largest number of neighbors ``k_hat`` for which the condition of constant density
holds, within a given level of confidence.
If ``density_algo=kNN``, k_max set the number of neighbors to be used by the standard
k-Nearest Neighbor algorithm.
If the number of points in the sample N is
less than the default value, k_max will be set automatically to the value ``N/2``.
D_thr : float, default=23.92812698
This parameter is considered if density_algo is ``PAk``, it is ignored otherwise.
Set the level of confidence in the PAk density estimator. The default value corresponds to a p-value of
:math:`10^{-6}` for a :math:`\chiˆ2` distribution with one degree of freedom.
dim : int, default = None
Intrinsic dimensionality of the sample. If dim is provided, the following parameters are ignored:
``dim_algo``, ``blockAn``, ``block_ratio``, ``frac``.
dim_algo : string, or callable, default="twoNN"
Method for intrinsic dimensionality calculation. If dim_algo is ``auto``, dim is assumed to be
equal to n_samples. If dim_algo is a string, it must be one of the options allowed by :obj:`VALID_DIM = [auto, twoNN]`.
blockAn : bool, default=True
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
If blockAn is True the algorithm perform a block analysis that allows discriminating the relevant dimensions
as a function of the block size. This allows to study the stability of the estimation with respect to
changes in the neighborhood size, which is crucial for ID estimations when the data lie on a
manifold perturbed by a high-dimensional noise.
block_ratio : int, default=5
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
Set the minimum size of the blocks as `n_samples/block_ratio`. If ``blockAn=False``, ``block_ratio`` is ignored.
frac : float, default=1
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
Define the fraction of points in the data set used for ID calculation. By default the full data set is used.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
labels_ : array [Nclus]
The clustering labels assigned to each point in the data set.
halos_ : array [Nclus]
The clustering labels assigned to each point in the data set. Points identified as halos have
label equal to zero.
topography_ : array [Nclus, Nclus]
Let be Nclus the number of clusters, the topography consists in a Nclus × Nclus symmetric matrix,
in which the diagonal entries are the heights of the peaks and the off-diagonal entries are the
heights of the saddle points.
distances_ : array [n_samples, k_max+1]
Distances to the k_max neighbors of each points. The point itself is included in the array.
indices_ : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points. The point itself is included in the array.
k_hat_ : array [n_samples]
The optimal number of neighbors for which the condition of constant density holds.
centers_ :array [Nclus]
The clustering labels assigned to each point in the data set.
dim_ : int,
Intrinsic dimensionality of the sample. If ``dim`` is not provided, ``dim_`` is set
to the number of features in the input file.
k_max_ : int
The maximum number of nearest-neighbors considered by the procedure that returns the
largest number of neighbors ``k_hat`` for which the condition of constant density
holds, within a given level of confidence. If the number of points in the sample `N` is
less than the default value, k_max_ will be set automatically to the value ``N/2``.
densities_ : array [n_samples]
If not provided by the parameter ``densities``, it is computed by using the `PAk` density estimator.
err_densities_ : array [n_samples]
The uncertainty in the density estimation. If not provided by the parameter ``densities``, it is
computed by using the `PAk` density estimator.
Example
-------
References
----------
M. d’Errico, E. Facco, A. Laio and A. Rodriguez, Automatic topography of high-dimensional data sets by non-parametric Density Peak clustering (2018) https://arxiv.org/abs/1802.10549
"""
def __init__(self, Z=1, metric="euclidean", densities=None, err_densities=None, k_hat=None,
nn_distances=None, nn_indices=None, affinity='precomputed',
density_algo="PAk", k_max=1000, D_thr=23.92812698, dim=None, dim_algo="twoNN",
blockAn=True, block_ratio=5, frac=1, n_jobs=None):
self.Z = Z
self.metric = metric
self.densities = densities
self.err_densities = err_densities
self.k_hat = k_hat
self.nn_distances = nn_distances
self.nn_indices = nn_indices
self.affinity = affinity
self.density_algo = density_algo
self.k_max = k_max
self.D_thr = D_thr
self.dim = dim
self.dim_algo = dim_algo
self.blockAn = blockAn
self.block_ratio = block_ratio
self.frac = frac
self.n_jobs = n_jobs
if metric not in VALID_METRIC:
raise ValueError("invalid metric: '{0}'".format(metric))
if dim_algo not in VALID_DIM:
raise ValueError("invalid dim_algo: '{0}'".format(dim_algo))
if density_algo not in VALID_DENSITY:
raise ValueError("invalid dim_algo: '{0}'".format(density_algo))
#if not (self.densities and self.err_densities and self.k_hat):
# # TODO: decide whether to raise a worning instead and automatically run PAk.
# raise ValueError("DPA requires the error estimation and optimal neighborhood along \
# with the densities. If not available, use the default PAk estimator")
if self.dim_algo == "twoNN" and self.frac > 1:
raise ValueError("frac should be between 0 and 1.")
if self.nn_distances is not None and self.nn_indices is not None:
if self.nn_distances.shape[1] != self.nn_indices.shape[1]:
raise ValueError("check nn_distances and nn_indices. Mismatch in array dimension.")
def fit(self, X, y=None):
"""Fit the DPA clustering on the data.
Parameters
----------
X : array [n_samples, n_samples] if metric == “precomputed”, or,
[n_samples, n_features] otherwise
The input samples. Similarities / affinities between
instances if ``affinity='precomputed'``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns self.
"""
# Input validation
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
allow_squared = self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn("The DPA clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
self.k_max_ = self.k_max
self.dim_ = self.dim
if not self.dim:
if self.dim_algo == "auto":
self.dim_ = X.shape[1]
elif self.dim_algo == "twoNN":
if self.block_ratio >= X.shape[0]:
raise ValueError("block_ratio is larger than the sample size, the minimum size for \
block analysis would be zero. Please set a value lower than "+str(X.shape[0]))
self.dim_ = twoNearestNeighbors(blockAn=self.blockAn, block_ratio=self.block_ratio, metric=self.metric,
frac=self.frac, n_jobs=self.n_jobs).fit(X).dim_
else:
pass
# If densities, uncertainties and k_hat are provided as input, compute only the
# matrix of nearest neighbor:
self.densities_ = self.densities
self.err_densities_ = self.err_densities
self.k_hat_ = self.k_hat
if self.densities_ is not None and self.err_densities_ is not None and self.k_hat_ is not None:
# If the nearest neighbors matrix is precomputed:
if self.nn_distances is not None and self.nn_indices is not None:
self.k_max_ = max(self.k_hat_)
self.distances_ = self.nn_distances
self.indices_ = self.nn_indices
else:
self.k_max_ = max(self.k_hat_)
if self.metric == "precomputed":
nbrs = NearestNeighbors(n_neighbors=self.k_max_+1, # The point i is counted in its neighborhood
algorithm="brute",
metric=self.metric,
n_jobs=self.n_jobs).fit(X)
else:
nbrs = NearestNeighbors(n_neighbors=self.k_max_+1, # The point i is counted in its neighborhood
algorithm="auto",
metric=self.metric,
n_jobs=self.n_jobs).fit(X)
self.distances_, self.indices_ = nbrs.kneighbors(X)
elif self.density_algo == "PAk":
# If the nearest neighbors matrix is precomputed:
if self.nn_distances is not None and self.nn_indices is not None:
self.k_max_ = self.nn_distances.shape[1]-1
PAk = PointAdaptive_kNN(k_max=self.k_max_, D_thr=self.D_thr, metric=self.metric,
nn_distances=self.nn_distances, nn_indices=self.nn_indices,
dim_algo=self.dim_algo, blockAn=self.blockAn,
block_ratio=self.block_ratio,
frac=self.frac, dim=self.dim_, n_jobs=self.n_jobs).fit(X)
else:
PAk = PointAdaptive_kNN(k_max=self.k_max_, D_thr=self.D_thr, metric=self.metric,
dim_algo=self.dim_algo, blockAn=self.blockAn,
block_ratio=self.block_ratio,
frac=self.frac, dim=self.dim_, n_jobs=self.n_jobs).fit(X)
self.distances_ = PAk.distances_
self.indices_ = PAk.indices_
self.densities_ = PAk.densities_
self.err_densities_ = PAk.err_densities_
self.k_hat_ = PAk.k_hat_
self.k_max_ = max(self.k_hat_)
else:
# TODO: implement option for kNN
pass
self.labels_, self.halos_, self.topography_, self.g_, self.centers_ = _DensityPeakAdvanced(self.densities_,
self.err_densities_, self.k_hat_,
self.distances_, self.indices_, self.Z)
self.is_fitted_ = True
return self
def fit_predict(self, X, y=None):
"""Perform DPA clustering from features or distance matrix,
and return cluster labels.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray, shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
self.fit(X)
return self.labels_
"""
def get_params(self, deep=True):
return {"Z": self.Z, "metric": self.metric, "densities": self.densities,
"err_densities": self.err_densities, "k_hat": self.k_hat, "nn_distances": self.nn_distances,
"nn_indices": self.nn_indices, "affinity": self.affinity, "density_algo": self.density_algo,
"k_max":self.k_max, "D_thr": self.D_thr,
"dim": self.dim, "dim_algo": self.dim_algo, "blockAn": self.blockAn, "block_ratio": self.block_ratio,
"frac": self.frac, "n_jobs": self.n_jobs}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
"""
|
py | 1a3935c198047a4374e0ba4f7f27fff62abb3907 | """/web/app/syzygy/wishlist/controller.py
Author: Adam Green ([email protected])
This file acts as the main router for the API. The main GET/POST/PUT/DELETE
requests are written here. This also draws the swagger UI on the API for
rudimentary testing on the browser.
Classes:
WishlistResource:
Extends Resource from flask-restx. Adding a function with name
"get"/"post"/"delete"/"put" will add the respective route to the API.
WishlistIdResource:
Extends Resource from flask-restx. Follows same functionality from
aforementioned class. Must be routed to with {baseurl}/{id}.
"""
import logging
from typing import List
from flask import request
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from .model import Wishlist
from .schema import WishlistSchema
from .service import WishlistService
api = Namespace("Wishlist")
log = logging.getLogger(__name__)
@api.route("/")
class WishlistResource(Resource):
"""[summary]
Args:
Resource ([type]): [description]
Returns:
[type]: [description]
"""
@responds(schema=WishlistSchema(many=True))
def get(self):
"""Get all Wishlists"""
return WishlistService.get_all()
@accepts(schema=WishlistSchema, api=api)
@responds(schema=WishlistSchema)
def post(self):
"""Create a Single Wishlist"""
return WishlistService.create(request.parsed_obj)
@api.route("/<int:id>")
@api.param("id", "Wishlist database ID")
class WishlistIdResource(Resource):
@responds(schema=WishlistSchema)
def get(self, id: int):
"""Get Single Wishlist"""
return WishlistService.get_by_id(id)
def delete(self, id: int):
"""Delete Single Wishlist"""
from flask import jsonify
id = WishlistService.delete_by_id(id)
return jsonify(dict(status="Success", id=id))
@accepts(schema=WishlistSchema, api=api)
@responds(schema=WishlistSchema)
def put(self, id: int):
"""Update Single Wishlist"""
updates = request.parsed_obj
order = WishlistService.get_by_id(id)
return WishlistService.update(order, updates) |
py | 1a3936f98719b2220644179559c1b5f20a361f0d | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=45
prog.z(input_qubit[1]) # number=46
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.cx(input_qubit[3],input_qubit[0]) # number=48
prog.z(input_qubit[3]) # number=49
prog.cx(input_qubit[3],input_qubit[0]) # number=50
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1045.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a393730de7065ebaa0be810e4575a1e9b110ebf | from math import log, sin, cos
import numpy as np
alt_20_ft = 6.096
alt_1000_ft = 304.8
m_to_ft = 3.28084
# Reference: MIL-F-8785C, MILITARY SPECIFICATION: FLYING QUALITIES OF PILOTED AIRPLANES (05 NOV 1980)
def wind_log(alt, speed, heading, z0=2.0, degree=True):
if degree:
heading = heading * 0.017453292519943295
if alt <= 0:
v = 0
elif (alt > 0) and (alt < alt_20_ft):
v = speed / alt_20_ft * alt
elif (alt >= alt_20_ft) and (alt <= alt_1000_ft):
v = speed * log(alt * m_to_ft / z0) / log(20 / z0)
else:
v = speed * log(1000 / z0) / log(20 / z0)
_VN = v * cos(heading)
_VE = v * sin(heading)
return _VN, _VE
def wind_log_table(speed, heading, z0=2.0):
alts = (-100, 0, *np.logspace(np.log10(alt_20_ft), np.log10(alt_1000_ft), 20), 10e9)
vs = [None] * len(alts)
for i, alt in enumerate(alts):
if alt <= 0:
vs[i] = 0
elif (alt > 0) and (alt < alt_20_ft):
vs[i] = speed / alt_20_ft * alt
elif (alt >= alt_20_ft) and (alt <= alt_1000_ft):
vs[i] = speed * log(alt * m_to_ft / z0) / log(20 / z0)
else:
vs[i] = speed * log(1000 / z0) / log(20 / z0)
return np.array([alts, vs, [heading] * len(alts)]).T
if __name__ == "__main__":
import matplotlib.pyplot as plt
t = wind_log_table(5, 180)
plt.plot(t[:-1, 0], t[:-1, 1])
plt.show()
|
py | 1a3937a9ae808c8eb64530b89c5b9f1be6c20f1f | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from parameterized import parameterized
from qiskit_aqua.components.oracles import BernsteinVaziraniOracle
from qiskit_aqua.algorithms import BernsteinVazirani
from qiskit_aqua import get_aer_backend
from test.common import QiskitAquaTestCase
class TestBernsteinVazirani(QiskitAquaTestCase):
@parameterized.expand([
[{'000': '0', '001': '0', '010': '1', '011': '1',
'100': '1', '101': '1', '110': '0', '111': '0'}],
[{'000': '0', '001': '1', '010': '0', '011': '1',
'100': '1', '101': '0', '110': '1', '111': '0'}]
])
def test_bernsteinvazirani(self, bv_input):
backend = get_aer_backend('qasm_simulator')
oracle = BernsteinVaziraniOracle(bv_input)
algorithm = BernsteinVazirani(oracle)
result = algorithm.run(backend)
self.assertTrue(result['oracle_evaluation'])
if __name__ == '__main__':
unittest.main()
|
py | 1a393ab156fbda1b577e8340872a19d19f4d88a3 | """Facilities for running arbitrary commands in child processes."""
import os
import queue
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.seven import multiprocessing
from dagster.utils import delay_interrupts
from dagster.utils.error import serializable_error_info_from_exc_info
class ChildProcessEvent(object):
pass
class ChildProcessStartEvent(namedtuple("ChildProcessStartEvent", "pid"), ChildProcessEvent):
pass
class ChildProcessDoneEvent(namedtuple("ChildProcessDoneEvent", "pid"), ChildProcessEvent):
pass
class ChildProcessSystemErrorEvent(
namedtuple("ChildProcessSystemErrorEvent", "pid error_info"), ChildProcessEvent
):
pass
class ChildProcessCommand(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
"""Inherit from this class in order to use this library.
The object must be picklable; instantiate it and pass it to _execute_command_in_child_process."""
@abstractmethod
def execute(self):
""" This method is invoked in the child process.
Yields a sequence of events to be handled by _execute_command_in_child_process."""
class ChildProcessCrashException(Exception):
"""Thrown when the child process crashes."""
def __init__(self, exit_code=None):
self.exit_code = exit_code
def _execute_command_in_child_process(event_queue, command):
"""Wraps the execution of a ChildProcessCommand.
Handles errors and communicates across a queue with the parent process."""
check.inst_param(command, "command", ChildProcessCommand)
with delay_interrupts():
pid = os.getpid()
event_queue.put(ChildProcessStartEvent(pid=pid))
try:
for step_event in command.execute():
event_queue.put(step_event)
event_queue.put(ChildProcessDoneEvent(pid=pid))
except (Exception, KeyboardInterrupt): # pylint: disable=broad-except
event_queue.put(
ChildProcessSystemErrorEvent(
pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
finally:
event_queue.close()
TICK = 20.0 * 1.0 / 1000.0
"""The minimum interval at which to check for child process liveness -- default 20ms."""
PROCESS_DEAD_AND_QUEUE_EMPTY = "PROCESS_DEAD_AND_QUEUE_EMPTY"
"""Sentinel value."""
def _poll_for_event(process, event_queue):
try:
return event_queue.get(block=True, timeout=TICK)
except queue.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return event_queue.get(block=False)
except queue.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
return None
def execute_child_process_command(command):
"""Execute a ChildProcessCommand in a new process.
This function starts a new process whose execution target is a ChildProcessCommand wrapped by
_execute_command_in_child_process; polls the queue for events yielded by the child process
until the process dies and the queue is empty.
This function yields a complex set of objects to enable having multiple child process
executions in flight:
* None - nothing has happened, yielded to enable cooperative multitasking other iterators
* ChildProcessEvent - Family of objects that communicates state changes in the child process
* KeyboardInterrupt - Yielded in the case that an interrupt was recieved while
polling the child process. Yielded instead of raised to allow forwarding of the
interrupt to the child and completion of the iterator for this child and
any others that may be executing
* The actual values yielded by the child process command
Args:
command (ChildProcessCommand): The command to execute in the child process.
Warning: if the child process is in an infinite loop, this will
also infinitely loop.
"""
check.inst_param(command, "command", ChildProcessCommand)
event_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=_execute_command_in_child_process, args=(event_queue, command)
)
process.start()
completed_properly = False
while not completed_properly:
event = _poll_for_event(process, event_queue)
if event == PROCESS_DEAD_AND_QUEUE_EMPTY:
break
yield event
if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)):
completed_properly = True
if not completed_properly:
# TODO Figure out what to do about stderr/stdout
raise ChildProcessCrashException(exit_code=process.exitcode)
process.join()
|
py | 1a393b6d491f60b2fc005c89843eba3f3fcb852f | # MIT License
#
# Copyright (c) 2021 God Empress Verin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup, find_packages
setup(
name="pwpy",
version="0.5.0",
packages=find_packages(),
description="Various tools and scrapers for Politics and War and the v3 API.",
author="God Empress Verin",
url="https://github.com/GodEmpressVerin/pwpy"
)
|
py | 1a393b71d8c9f84d04dac06aa600b3feaff9c4f1 | from allennlp.training.checkpointer import Checkpointer
from allennlp.training.tensorboard_writer import TensorboardWriter
from allennlp.training.no_op_trainer import NoOpTrainer
from allennlp.training.trainer import (
Trainer,
GradientDescentTrainer,
BatchCallback,
EpochCallback,
TrainerCallback,
TrackEpochCallback,
)
|
py | 1a393b7b31da21e5a1531f0c40471158cfb28170 | import unittest
from os import path
from ga4stpg.binary.coder import Coder
from ga4stpg.graph import ReaderORLibrary
from ga4stpg.graph.algorithms import prim
from ga4stpg.graph.graph import UndirectedGraph as UGraph
from ga4stpg.graph.graph import UndirectedWeightedGraph as UWGraph
from ga4stpg.binary import random_binary
from ga4stpg.graph.steiner import (prunning_mst, shortest_path,
shortest_path_origin_prim,
shortest_path_with_origin)
class TestBinaryCoder(unittest.TestCase):
def setUp(self):
filename = path.join('datasets', 'ORLibrary', 'steinb15.txt')
self.stpg = ReaderORLibrary().parser(filename)
def test_EncoderMST(self):
stpg = self.stpg
graph = self.stpg.graph
mst, cost = prim(graph, 1)
tree = UGraph()
for u, v in mst.items():
tree.add_edge(v,u)
coder = Coder(STPG=stpg)
chromosome = coder.treegraph2binary(tree)
self.assertIsInstance(chromosome, str)
expected_lenght = stpg.nro_nodes - stpg.nro_terminals
self.assertEqual(len(chromosome), expected_lenght)
self.assertEqual(chromosome, '1' * expected_lenght)
def test_DecoderRandomChromosome(self):
stpg = self.stpg
graph = self.stpg.graph
stpg_vertices = set(graph.vertices)
expected_lenght = stpg.nro_nodes - stpg.nro_terminals
random_chromosome = random_binary(expected_lenght)
self.assertEqual(len(random_chromosome), expected_lenght)
coder = Coder(STPG=stpg)
subgraph = coder.binary2treegraph(random_chromosome)
self.assertIsInstance(subgraph, UGraph)
terminals = self.stpg.terminals
sub_vertices = coder.vertices_from_chromosome(random_chromosome)
self.assertTrue(terminals.issubset(sub_vertices))
self.assertTrue(sub_vertices.issubset(stpg_vertices))
@unittest.skip
def test_DecoderHeuristic(self):
stpg = self.stpg
graph = self.stpg.graph
steiner_tree, cost = shortest_path_origin_prim(graph,1,stpg.terminals)
coder = Coder(STPG=stpg)
chromosome = coder.treegraph2binary(steiner_tree)
self.assertIsInstance(chromosome, str)
vertices_st = set(steiner_tree.vertices)
vertices_cr = coder.vertices_from_chromosome(chromosome)
self.assertEqual(vertices_cr, vertices_st)
subtree = coder.binary2treegraph(chromosome)
st_edges = set((min(edge), max(edge)) for edge in steiner_tree.gen_undirect_edges())
sb_edges = set((min(edge), max(edge)) for edge in subtree.gen_undirect_edges())
self.assertEqual(st_edges, sb_edges)
if __name__ == "__main__" :
unittest.main() |
py | 1a393c10502d2346a405bcca0fa9c8a0b57df7b0 | """
This file contains high level structure for any game to be implemented,
through the use of an abstract game class. Any game object can be expected
to have fully developed abstract classes, as well as its full implementation
only having to utilize such structure (i.e. no other function calls are made externally)
"""
from abc import ABC, abstractmethod
class Game(ABC):
def __init__(self, game_id, screen_dimension):
"""
Abstract initialization function for game class
:param game_id: (str) -> string reference to game
:param screen_dimension: tuple(int, int) -> (heigh, width) integer tuple,
referencing the game screen dimensionality for graphics purposes
"""
self.grid_size = 5
self.game_id = game_id # string reference to game
self.screen_dimension = screen_dimension # representation-wise dimensionality of game
@abstractmethod
def update(self, action):
"""
Update game based on user-input action [AI are self-contained]
:param action: (ndarray) -> numpy array referencing player action
:return: (bool) -> termination information
"""
pass
@abstractmethod
def render(self, screen):
"""
Render game information on to screen
:param screen: (pygame.screen) -> pygame screen to render on to
:return: None
"""
pass
@abstractmethod
def game_reset(self):
"""
Reset the current game, but not entire system (e.g. if new pac-man level reset board)
:return: None
"""
pass
@abstractmethod
def machine_reset(self):
"""
Reset the game conditions to original state
:return: None
"""
pass
|
py | 1a393c57e685232d357ca872bced66568a75412e | """
Performs a two-sided Kolmogorov-Smirnov test that the provided
sample comes from the given probability distribution function.
"""
from __future__ import print_function
import numpy
import pyferret
import pyferret.stats
import scipy.stats
def ferret_init(id):
"""
Initialization for the stats_kstest1 PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns two-sided Kolmogorov-Smirnov test stat. and prob. " \
"that sample comes from a pop. with given prob. distrib.",
"axes": axes_values,
"argnames": ( "SAMPLE", "PDNAME", "PDPARAMS", ),
"argdescripts": ( "Sample data array",
"Name of a continuous probability distribution",
"Parameters for this continuous probability distribution"),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.STRING_ONEVAL, pyferret.FLOAT_ARRAY, ),
"influences": ( false_influences, false_influences, false_influences, ),
}
return retdict
def ferret_custom_axes(id):
"""
Define custom axis of the stats_kstest1 Ferret PyEF
"""
axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM
axis_defs[0] = ( 1, 2, 1, "KS,P", False )
return axis_defs
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Performs a two-sided Kolmogorov-Smirnov test that the provided sample
comes from a population with the given probability distribution function.
The sample is given in inputs[0], the probability distribution function
name is given in inputs[1] (a string), and the "standard" parameters for
this probability distribution function are given in inputs[2]. The test
statistic value and two-tailed probability are returned in result.
Undefined data given in inputs[0] are removed before performing the test.
"""
# get the scipy.stats distribution name from the given distribution name
if inputs[1] is None:
raise ValueError("The name of a probability distribution function not given")
distscipyname = pyferret.stats.getdistname(inputs[1])
if distscipyname is None:
raise ValueError("Unknown or unsupported probability distribution function %s" % inputs[1])
# get the scipy.stats distribution parameters from the given "standard" parameters
if inputs[2] is None:
raise ValueError("Paramaters for the probability distribution function not given")
distscipyparams = pyferret.stats.getdistparams(distscipyname, inputs[2].reshape(-1))
if distscipyparams is None:
raise ValueError("Unknown or unsupported (for params) probability distribution function %s" % inputs[1])
# get the valid sample values
badmask = ( numpy.fabs(inputs[0] - inpbdfs[0]) < 1.0E-5 )
badmask = numpy.logical_or(badmask, numpy.isnan(inputs[0]))
goodmask = numpy.logical_not(badmask)
values = inputs[0][goodmask]
# perform the test and assign the results
fitparams = scipy.stats.kstest(values, distscipyname, distscipyparams)
result[:] = resbdf
# Kolmogorov-Smirnov test statistic
result[0] = fitparams[0]
# probability
result[1] = fitparams[1]
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init and ferret_custom_axes do not have problems
info = ferret_init(0)
info = ferret_custom_axes(0)
# Set the seed to reproduce a problematic distribution
# import numpy.random
# numpy.random.seed(3333333)
# Get a random sample from the compared distribution and from another distribution
ydim = 200
zdim = 150
mu = 5.0
sigma = 0.5
rvsc = scipy.stats.norm(mu, sigma).rvs(ydim * zdim)
rvsu = scipy.stats.uniform(loc=(mu + 3.0 * sigma), scale=(3.0 * sigma)).rvs(ydim * zdim)
# setup for the call to ferret_compute
distname = "norm"
distparams = numpy.array([mu, sigma], dtype=numpy.float64)
inpbdfs = numpy.array([-9999.0, -1.0, -2.0], dtype=numpy.float64)
resbdf = numpy.array([-8888.0], dtype=numpy.float64)
sampc = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
sampu = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
index = 0
for j in range(ydim):
for k in range(zdim):
if (index % 71) == 3:
sampc[0, j, k, 0, 0, 0] = inpbdfs[0]
sampu[0, j, k, 0, 0, 0] = inpbdfs[0]
else:
sampc[0, j, k, 0, 0, 0] = rvsc[index]
sampu[0, j, k, 0, 0, 0] = rvsu[index]
index += 1
resultc = -7777.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
resultu = -7777.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
# call ferret_compute with data from the distribution and check the results
ferret_compute(0, resultc, resbdf, (sampc, distname, distparams), inpbdfs)
resultc = resultc.reshape(-1)
print("from same dist result: %s" % str(resultc))
if (resultc[0] < 0.00) or (resultc[0] > 0.01) or \
(resultc[1] < 0.10) or (resultc[1] > 1.00):
raise ValueError("Unexpected result")
# call ferret_compute with data from a different distribution and check the results
ferret_compute(0, resultu, resbdf, (sampu, distname, distparams), inpbdfs)
resultu = resultu.reshape(-1)
print("from diff dist result: %s" % str(resultu))
if (resultu[0] < 0.99) or (resultu[0] > 1.00) or \
(resultu[1] < 0.00) or (resultu[1] > 0.01):
raise ValueError("Unexpected result")
# All successful
print("Success")
|
py | 1a393d9e9ba5e053b4e9ba40803b309bc3fe4d1f | import unittest
import os, shutil
import subprocess
import sys
import rpy2.robjects as robjects
import glob
import pyreadr
import torch
import pickle
import re
import matplotlib.pyplot as plt
import random
import numpy as np
import warnings
import torch.optim as optim
prepath=os.getcwd()
test_input=prepath+"/test_data/"
test_output=prepath+"/output/"
sourcodedir=prepath.replace('tests','')+"src/"
test_temprun=test_output+"temprun/"##folder for the run of neuralnet
rundatadir=test_temprun+"data/"
runcodedir=test_temprun+"code/"
projdir=test_output+"project/"##the folder supposed to be on local computer and do plotting/analyzing related script
sys.path.insert(0,prepath+'/output/project/')##use absolute path
# print(os.getcwd())
# print(sys.path)
projresdir=projdir+"result/"
projresdir_1=projresdir+"1/"
projdatadir=projdir+"data/"
codefilelist=['nnt_struc.py','plot_model_small.py','plot.mse.epoch.small.r','train_mlp_full_modified.py']
runinputlist='sparselinearode_new.small.stepwiseadd.mat'
runoutputlist=['pickle_traindata.dat','pickle_testdata.dat','pickle_inputwrap.dat','pickle_dimdata.dat','model_best.resnetode.tar','model_best_train.resnetode.tar','checkpoint.resnetode.tar','testmodel.1.out']
runcodelist=['train_mlp_full_modified.py','nnt_struc.py']
runcodetest='test.sh'
# plotdata_py='plotsave.dat'
plotdata_r='Rplot_store.RData'
tempdata_py='datatemp.dat'
plotsourctab='submitlist.tab'
rnncheckfold=test_output+'rnn_test/'
rnncheckfold_data=rnncheckfold+'data/'
rnncheckfold_run=rnncheckfold+'run/'
rnn_comp_data=test_input+'rnn_res/'
smalval=0.001##for comparing values in such as mse
class NNTODETest(unittest.TestCase):
def test_pre(self):
'''
directory preparation for the test
'''
try:
os.makedirs(test_temprun,exist_ok=True)
os.makedirs(rundatadir,exist_ok=True)
os.makedirs(runcodedir,exist_ok=True)
os.makedirs(projdir,exist_ok=True)
os.makedirs(projresdir,exist_ok=True)
os.makedirs(projresdir_1,exist_ok=True)
os.makedirs(projdatadir,exist_ok=True)
shutil.copyfile(test_input+runinputlist,rundatadir+runinputlist)
for codefile in runcodelist:
shutil.copyfile(sourcodedir+codefile,runcodedir+codefile)
shutil.copyfile(runcodetest,runcodedir+runcodetest)
for codefile in codefilelist:
shutil.copyfile(sourcodedir+codefile,projdir+codefile)
shutil.copyfile(test_input+plotsourctab,projdir+plotsourctab)
shutil.copyfile(test_input+runinputlist,projdatadir+runinputlist)
##for add rnn structure test folder
os.makedirs(rnncheckfold,exist_ok=True)
os.makedirs(rnncheckfold_data,exist_ok=True)
os.makedirs(rnncheckfold_run,exist_ok=True)
shutil.copyfile(test_input+runinputlist,rnncheckfold_data+runinputlist)
for codefile in runcodelist:
shutil.copyfile(sourcodedir+codefile,rnncheckfold_run+codefile)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_run_train(self):
'''
test run of the training process
'''
try:
os.chdir(runcodedir)
with open (runcodetest,"r") as myfile:
command=myfile.readlines()
os.system(command[0])
os.chdir(prepath)
for outputfile in runoutputlist:
shutil.copyfile(runcodedir+outputfile,projresdir_1+outputfile)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_run_plotting(self):
'''
run the plotting related python and R script
'''
try:
os.chdir(projdir)
import plot_model_small
subprocess.call("Rscript --vanilla plot.mse.epoch.small.r", shell=True)
os.chdir(prepath)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_file_exist(self):
'''
test all output file are in the folder
'''
try:
#
currlist=set([f for f in os.listdir(projdir) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
currlist=currlist|set([f for f in os.listdir(projresdir) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
currlist=currlist|set([f for f in os.listdir(projresdir_1) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
storelist=set([f for f in os.listdir(test_input) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
# os.chdir(prepath)
if currlist==storelist:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
print('*****'+os.getcwd())
self.assertTrue(False)
def test_plot_model_small(self):
'''
test dimension&value of output files in all script
'''
try:
# ##figure the same
# with open(projdir+plotdata_py,"rb") as f1:
# newfig=pickle.load(f1)
# with open(test_input+plotdata_py,"rb") as f1:
# oldfig=pickle.load(f1)
# figequal=newfig.__eq__(oldfig)
figequal=True
##data size the same {temporary data at one time point stored}
with open(projdir+tempdata_py,"rb") as f1:
newdata=pickle.load(f1)
with open(test_input+tempdata_py,"rb") as f1:
olddata=pickle.load(f1)
dataequal=newdata['data'].shape==olddata['data'].shape
outputequal=newdata['output'].shape==olddata['output'].shape
targetequal=newdata['target'].shape==olddata['target'].shape
ninnersize_val_equal=newdata['ninnersize']==olddata['ninnersize']
# target_val_equal=torch.all(torch.eq(newdata['target'],olddata['target']))
target_val_equal=True
print("data_size %s output_size %s target_size %s ninnersize %s\n" % (newdata['data'].shape,newdata['output'].shape,newdata['target'].shape,newdata['ninnersize'],))
if figequal and dataequal and outputequal and targetequal and ninnersize_val_equal and target_val_equal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_plot_mse_epoch_small(self):
'''
test plot&dimension of data
'''
try:
newres=pyreadr.read_r(projresdir+plotdata_r)
oldres=pyreadr.read_r(test_input+plotdata_r)
# figequal=newres['p']==oldres['p']
figequal=True
tabdimequal=(newres['summtab'].shape[0]==oldres['summtab'].shape[0] and newres['msetablong'].shape==oldres['msetablong'].shape)
print("summtab_size %s msetablong_size %s\n" % (newres['summtab'].shape,newres['msetablong'].shape,))
if figequal and tabdimequal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_train_mlp_full_modified(self):
'''
test value and dimension of the training script on a small run
'''
try:
#dimension of output and input
with open(projresdir_1+runoutputlist[2],"rb") as f1:
currstore=pickle.load(f1)
with open(test_input+runoutputlist[2],"rb") as f1:
prestore=pickle.load(f1)
print("Xvarnorm_size %s ResponseVar_size %s\n" % (currstore['Xvarnorm'].shape,currstore['ResponseVar'].shape,))
if currstore['Xvarnorm'].shape==prestore['Xvarnorm'].shape and currstore['ResponseVar'].shape==prestore['ResponseVar'].shape:
dimequal=True
else:
dimequal=False
#value of stored data
inputwrap_true=True
for key in currstore.keys():
boolarray=currstore[key]==prestore[key]
if type(boolarray)!=bool:
boolarray=boolarray.all()
inputwrap_true=inputwrap_true and boolarray
with open(projresdir_1+runoutputlist[3],"rb") as f1:
currstore=pickle.load(f1)
with open(test_input+runoutputlist[3],"rb") as f1:
prestore=pickle.load(f1)
dimdict_true=currstore==prestore
device=torch.device('cpu')
currstore=torch.load(projresdir_1+runoutputlist[6],map_location=device)
prestore=torch.load(test_input+runoutputlist[6],map_location=device)
## as new keys will be added to args in future version
currarg=currstore['args_input'].__dict__
prearg=prestore['args_input'].__dict__
currkeys=set(currarg.keys())
prekeys=set(prearg.keys())
overkeys=currkeys.intersection(prekeys)
arg_equal=True
for key in overkeys:
arg_equal=arg_equal and (currarg[key]==prearg[key])
arch_equal=currstore['arch']==prestore['arch']
epoch_equal=currstore['epoch']==prestore['epoch']
print('val: '+str(inputwrap_true)+' '+str(dimdict_true)+' '+str(arch_equal)+' '+str(epoch_equal)+'\n')
if inputwrap_true and dimdict_true and arg_equal and arch_equal and epoch_equal:
valequal=True
else:
valequal=False
print('perf: '+str(currstore['best_acc1']-prestore['best_acc1'])+' '+str(currstore['best_acctr']-prestore['best_acctr'])+'\n')
if (currstore['best_acc1']-prestore['best_acc1'])<smalval and (currstore['best_acctr']-prestore['best_acctr'])<smalval:
perf_equal=True
else:
perf_equal=False
curr_state_dict=currstore['state_dict']
pre_state_dict=prestore['state_dict']
layer_size_equal=True
for layer in curr_state_dict.keys():
layer_size_equal=layer_size_equal and (curr_state_dict[layer].shape==pre_state_dict[layer].shape)
print('final: '+str(dimequal)+' '+str(layer_size_equal)+' '+str(valequal)+' '+str(perf_equal)+'\n')
if dimequal and layer_size_equal and valequal: #perf_equal
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_sampler_function(self):
try:
from train_mlp_full_modified import batch_sampler_block
torch.manual_seed(1)
datasource=np.array([0,1,2,3,4,5,6,7,8,9])
blocks=np.array([0,0,1,1,2,2,3,3,4,4])
nblocks=2
exp_res=[[0,1,8,9],[4,5,6,7],[2,3]]
test_res=list(batch_sampler_block(datasource,blocks,nblock=nblocks))
if test_res==exp_res:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_resnet2x(self):
try:
import nnt_struc as models
##resnet 18
model_resnet18=models.__dict__['resnet18_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
model_resnet18_x=models.__dict__['resnet2x_mlp'](ninput=10,num_response=10,p=0,ncellscale=1,x=9)
model_resnet18_dic=model_resnet18.state_dict()
model_resnet18_x_dic=model_resnet18_x.state_dict()
layer_size_equal_18=True
for layer in model_resnet18_dic.keys():
layer_size_equal_18=layer_size_equal_18 and (model_resnet18_dic[layer].shape==model_resnet18_x_dic[layer].shape)
##resnet 34
model_resnet34=models.__dict__['resnet34_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
model_resnet34_x=models.__dict__['resnet2x_mlp'](ninput=10,num_response=10,p=0,ncellscale=1,x=17)
model_resnet34_dic=model_resnet34.state_dict()
model_resnet34_x_dic=model_resnet34_x.state_dict()
layer_size_equal_34=True
for layer in model_resnet34_dic.keys():
layer_size_equal_34=layer_size_equal_34 and (model_resnet34_dic[layer].shape==model_resnet34_x_dic[layer].shape)
if layer_size_equal_18 and layer_size_equal_34:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_get_lr(self):
try:
import nnt_struc as models
from train_mlp_full_modified import get_lr
model_resnet18=models.__dict__['resnet18_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
optimizer=optim.SGD(model_resnet18.parameters(),lr=0.1,momentum=0.1)
if get_lr(optimizer)==0.1:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_rnn_run(self):#just test the shape of the nnt layers
try:
os.chdir(rnncheckfold_run)
structlist=['gru_mlp_rnn','gru_rnn','diffaddcell_rnn']
datalist=['checkpoint.gru_mlp.tar','checkpoint.gru.tar','checkpoint.diffaddcell.tar']
commands=['time python3 train_mlp_full_modified.py --batch-size 42 --test-batch-size 42 --epochs 5 --learning-rate 0.04 --seed 2 --net-struct ',' --layersize-ratio 0.5 --optimizer adam --num-layer 1 --inputfile sparselinearode_new.small.stepwiseadd.mat --p 0 --scheduler step --gpu-use 0 --rnn-struct 1 --timetrainlen 21 &> output']
shapeequal=True
for struc_i in range(0,len(structlist)):
struc=structlist[struc_i]
predata=datalist[struc_i]
os.system(commands[0]+struc+commands[1])
device=torch.device('cpu')
currstore=torch.load(rnncheckfold_run+runoutputlist[6],map_location=device)
prestore=torch.load(rnn_comp_data+predata,map_location=device)
curr_state_dict=currstore['state_dict']
pre_state_dict=prestore['state_dict']
for layer in curr_state_dict.keys():
shapeequal=shapeequal and (curr_state_dict[layer].shape==pre_state_dict[layer].shape)
if shapeequal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_clean(self):
try:
for filename in os.listdir(test_output):
file_path=os.path.join(test_output,filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path,e))
self.assertTrue(False)
self.assertTrue(True)
except:
self.assertTrue(False)
def cmp(a,b):
return (a>b)-(a<b)
if __name__ == '__main__':
ln=lambda f: getattr(NNTODETest,f).__code__.co_firstlineno
lncmp=lambda _, a, b: cmp(ln(a),ln(b))
unittest.TestLoader.sortTestMethodsUsing=lncmp
suite=unittest.TestLoader().loadTestsFromTestCase(NNTODETest)
unittest.TextTestRunner().run(suite)
os.chdir(prepath)
|
py | 1a393e7a6852ca2a20beede432b1f65cd7a8895b | import json
class MorseCode:
def __init__(self):
# load morse dictionary into module
with open("./morse_code.json", mode='r', encoding="utf-8") as f:
self.morse_code = json.load(f)
# convert input sentence to morse code sentence
def encrypt(self, sentence: str) -> str:
converted_sentence = ""
if sentence.strip() == '':
raise ValueError("Invalid sentence detected!")
for letter in sentence.strip():
if letter == " ":
converted_sentence = converted_sentence + "/"
else:
detection_sentence = converted_sentence
for morse in self.morse_code:
# add in morse code to object sentence
if letter.lower() == morse.lower():
converted_sentence = converted_sentence + self.morse_code[morse] + "_"
# pickup fallout and alert
if len(converted_sentence) == len(detection_sentence):
raise ValueError(f"Missing morse code: {letter}")
return converted_sentence
# revert morse code sentence
def decrypt(self, converted_sentence: str) -> str:
reverted_sentence = ""
if converted_sentence.strip() == '':
raise ValueError("Invalid sentence detected!")
morse_words = converted_sentence.split("/") # break words into list of words
for morse_word in morse_words:
morse_letters = morse_word.strip("_") # clean letters with trailing _
morse_letters = morse_letters.split("_") # break word into letters
for morse_letter in morse_letters:
for morse in self.morse_code:
if morse_letter == self.morse_code[morse]:
reverted_sentence = reverted_sentence + morse
reverted_sentence = reverted_sentence + " "
return reverted_sentence.strip()
# view morse code source
def view_source(self):
for morse in self.morse_code:
print(f"{morse}: {self.morse_code[morse]}")
return None
if __name__ == '__main__': # launch as terminal
my_morse = MorseCode()
kernel_activate = True
ascii_activation = """
`. ___
__,' __`. _..----....____
__...--.'``;. ,. ;``--..__ .' ,-._ _.-'
_..-''-------' `' `' `' O ``-''._ (,;') _,'
,'________________ \`-._`-','
`._ ```````````------...___ '-.._'-:
```--.._ ,. ````--...__\-.
`.--. `-` ____ | |`
`. `. ,'`````. ; ;`
`._`. __________ `. \\'__/`
`-:._____/______/___/____`. \ `
| `._ `. \\
`._________`-. `. `.___
`------'`
_ __ _ ___ _ _ _ _
| | / / | | / _ \ | | (_) | | | |
| |/ / ___ _ __ _ __ ___| | / /_\ \ ___| |_ ___ ____ _| |_ ___ __| |
| \ / _ \ '__| '_ \ / _ \ | | _ |/ __| __| \ \ / / _` | __/ _ \/ _` |
| |\ \ __/ | | | | | __/ | | | | | (__| |_| |\ V / (_| | || __/ (_| |
\_| \_/\___|_| |_| |_|\___|_| \_| |_/\___|\__|_| \_/ \__,_|\__\___|\__,_|
"""
count = 0
while kernel_activate:
if count == 0:
print(ascii_activation)
print("Welcome to Morse Code Center!")
print("Input 'help' to view available commands")
command = input("Please input your command ~\n$").strip().lower()
if command == "exit":
print("See you next time!")
kernel_activate = False
elif command == "encrypt":
my_sentence = input("Encrypting below to morse code ~\n$")
print(my_morse.encrypt(my_sentence))
elif command == "decrypt":
my_morse_code = input("Decrypting below to text message ~\n$")
print(my_morse.decrypt(my_morse_code))
elif command == "view":
my_morse.view_source()
elif command == "help":
print("""
exit: Exit terminal
encrypt: Convert input text into morse code
decrypt: Convert input morse code into text
view: View the corresponding text: morse code
""")
else:
print("You have entered invalid command, please try again.")
count = 1
|
py | 1a393ee7ec798d9f4695fb12106ca6cdee9c9764 | # -*- coding: utf-8 -*-
import json
import random
import time
import uuid
from datetime import datetime
import boto3
aws_profile = "eq_sanhe"
stream_name = "kinesis-practice-web-event"
n_records_per_second = 10
n_records_per_send = 10
boto_ses = boto3.session.Session(profile_name=aws_profile)
kn_client = boto_ses.client("kinesis")
event_name_pool = ["sign_in", ] * 9 + ["sign_up", ] * 1
while True:
record_data_list = list()
for _ in range(n_records_per_send):
sleep_time_base = 1.0 / n_records_per_second
sleep_time = sleep_time_base * (random.randint(90, 110) / 100.0)
time.sleep(sleep_time)
record_data = {
"event_id": str(uuid.uuid4()),
"event_name": random.choice(event_name_pool),
"event_time": str(datetime.utcnow())
}
record_data_list.append(record_data)
records = [
{
"Data": (json.dumps(record_data) + "\n").encode("utf-8"),
"PartitionKey": record_data["event_id"]
}
for record_data in record_data_list
]
res = kn_client.put_records(
Records=records,
StreamName=stream_name,
)
print(records)
# break
|
py | 1a393ef6fd921a1708f6c8cba65e7c66d0c1c09b | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.pusher import executor
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class Pusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to a model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
output: Optional[types.Channel] = None,
model_export: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct a Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
a Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as a
RuntimeParameter, push_destination should be constructed as a dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
output: Optional output `standard_artifacts.PushedModel` channel with
result of push.
model_export: Backwards compatibility alias for the 'model' argument.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
if model_export:
absl.logging.warning(
'The "model_export" argument to the Pusher component has '
'been renamed to "model" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
model = model_export
output = output or types.Channel(type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless a '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=output)
super(Pusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name)
|
py | 1a393f5273633425f1e2574356d171c4a3f8ca06 | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="Speck",
version="0.1.0",
url="https://github.com/misternate/speck",
license='MIT',
author="Nate Edwards",
author_email="[email protected]",
description="Speck is a little Spotify macOS menubar app.",
long_description=read("README.md"),
packages=find_packages(exclude=('tests',)),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
py | 1a393f610114f057b63b716bf47d5744e3e0b94c | #!/usr/bin/env python
"""
Copyright 2009 Richard Quirk
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import contextlib
import os
import sys
import unittest
import mock
import cmakelint.__version__
import cmakelint.main
# stderr suppression from https://stackoverflow.com/a/1810086
@contextlib.contextmanager
def nostderr():
savestderr = sys.stderr
class Devnull(object):
def write(self, _): pass
def flush(self): pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
class ErrorCollector(object):
def __init__(self):
self._errors = []
def __call__(self, unused_filename, unused_line, category, message):
if cmakelint.main.ShouldPrintError(category):
self._errors.append(message)
def Results(self):
if len(self._errors) < 2:
return ''.join(self._errors)
return self._errors
class CMakeLintTestBase(unittest.TestCase):
def doTestLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines([code])
cmakelint.main.ProcessLine('foo.cmake', 0, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestMultiLineLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.ProcessLine('foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckRepeatLogic(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckRepeatLogic(
'foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFileName(self, filename, expected_message):
errors = ErrorCollector()
cmakelint.main.CheckFileName(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFindPackage(self, filename, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckFindPackage(filename, i, clean_lines, errors)
cmakelint.main._package_state.Done(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestGetArgument(self, expected_arg, code):
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
self.assertEqual(
expected_arg, cmakelint.main.GetCommandArgument(0, clean_lines))
class CMakeLintTest(CMakeLintTestBase):
def setUp(self):
cmakelint.main._lint_state.filters = []
def testLineLength(self):
self.doTestLint(
'# '+('o'*80),
'Lines should be <= 80 characters long')
def testUpperAndLowerCase(self):
self.doTestMultiLineLint(
'''project()\nCMAKE_MINIMUM_REQUIRED()\n''',
'Do not mix upper and lower case commands')
def testContainsCommand(self):
self.assertTrue(cmakelint.main.ContainsCommand('project()'))
self.assertTrue(cmakelint.main.ContainsCommand('project('))
self.assertTrue(cmakelint.main.ContainsCommand('project ( '))
self.assertFalse(cmakelint.main.ContainsCommand('VERSION'))
def testGetCommand(self):
self.assertEqual('project', cmakelint.main.GetCommand('project()'))
self.assertEqual('project', cmakelint.main.GetCommand('project('))
self.assertEqual('project', cmakelint.main.GetCommand('project ( '))
self.assertEqual('', cmakelint.main.GetCommand('VERSION'))
def testIsCommandUpperCase(self):
self.assertTrue(cmakelint.main.IsCommandUpperCase('PROJECT'))
self.assertTrue(cmakelint.main.IsCommandUpperCase(
'CMAKE_MINIMUM_REQUIRED'))
self.assertFalse(cmakelint.main.IsCommandUpperCase(
'cmake_minimum_required'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('project'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('PrOjEct'))
def testIsCommandMixedCase(self):
self.assertTrue(cmakelint.main.IsCommandMixedCase('PrOjEct'))
self.assertFalse(cmakelint.main.IsCommandMixedCase('project'))
self.assertFalse(cmakelint.main.IsCommandMixedCase(
'CMAKE_MINIMUM_REQUIRED'))
self.assertTrue(cmakelint.main.IsCommandMixedCase(
'CMAKE_MINIMUM_required'))
def testCleanComment(self):
self.assertEqual(
('', False), cmakelint.main.CleanComments('# Comment to zap'))
self.assertEqual(
('project()', False),
cmakelint.main.CleanComments('project() # Comment to zap'))
def testCleanCommentQuotes(self):
self.assertEqual(
('CHECK_C_SOURCE_COMPILES("', True),
cmakelint.main.CleanComments('CHECK_C_SOURCE_COMPILES("'))
self.assertEqual(
('', True),
cmakelint.main.CleanComments(' some line in a comment ', True))
self.assertEqual(
('")', False),
cmakelint.main.CleanComments(' end of comment") ', True))
def testCommandSpaces(self):
self.doTestMultiLineLint(
"""project ()""",
"Extra spaces between 'project' and its ()")
def testTabs(self):
self.doTestLint('\tfoo()', 'Tab found; please use spaces')
def testTrailingSpaces(self):
self.doTestLint('# test ', 'Line ends in whitespace')
self.doTestMultiLineLint(
' foo() \n foo()\n', 'Line ends in whitespace')
self.doTestLint(' set(var value)', '')
def testCommandSpaceBalance(self):
self.doTestMultiLineLint(
"""project( Foo)""",
'Mismatching spaces inside () after command')
self.doTestMultiLineLint(
"""project(Foo )""",
'Mismatching spaces inside () after command')
def testCommandNotEnded(self):
self.doTestMultiLineLint(
"""project(
Foo
#
#""",
'Unable to find the end of this command')
def testRepeatLogicExpression(self):
self.doTestCheckRepeatLogic('else(foo)',
'Expression repeated inside else; '
'better to use only else()')
self.doTestCheckRepeatLogic('ELSEIF(NOT ${VAR})', '')
self.doTestCheckRepeatLogic('ENDMACRO( my_macro foo bar baz)',
'Expression repeated inside endmacro; '
'better to use only ENDMACRO()')
def testFindTool(self):
self.doTestCheckFileName('path/to/FindFooBar.cmake',
'Find modules should use uppercase names; '
'consider using FindFOOBAR.cmake')
self.doTestCheckFileName('CMakeLists.txt', '')
self.doTestCheckFileName('cmakeLists.txt',
'File should be called CMakeLists.txt')
def testIsFindPackage(self):
self.assertTrue(cmakelint.main.IsFindPackage('path/to/FindFOO.cmake'))
self.assertFalse(cmakelint.main.IsFindPackage(
'path/to/FeatureFOO.cmake'))
def testCheckFindPackage(self):
self.doTestCheckFindPackage(
'FindFoo.cmake',
'',
['Package should include FindPackageHandleStandardArgs',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS'])
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)''',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'Package should include FindPackageHandleStandardArgs')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK DEFAULT_MSG)''',
'Weird variable passed to std args, should be FOO not KK')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'')
def testGetCommandArgument(self):
self.doTestGetArgument('KK',
'''SET(
KK)''')
self.doTestGetArgument('KK', 'Set( KK)')
self.doTestGetArgument(
'KK', 'FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK BLEUGH)')
def testIsValidFile(self):
self.assertTrue(cmakelint.main.IsValidFile('CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('cmakelists.txt'))
self.assertTrue(cmakelint.main.IsValidFile(
'/foo/bar/baz/CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('Findkk.cmake'))
self.assertFalse(cmakelint.main.IsValidFile('foobar.h.in'))
def testFilterControl(self):
self.doTestMultiLineLint(('# lint_cmake: -whitespace/eol\n'
' foo() \n'
' foo()\n'), '')
def testBadPragma(self):
self.doTestMultiLineLint(('# lint_cmake: I am badly formed\n'
'if(TRUE)\n'
'endif()\n'),
'Filter should start with - or +')
def testBadPragma2(self):
self.doTestMultiLineLint(('# lint_cmake: -unknown thing\n'
'if(TRUE)\n'
'endif()\n'),
'Filter not allowed: -unknown thing')
def testWhitespaceIssue16(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar\n'
' )\n'
'endif()\n'),
'')
def testWhitespaceIssue16NonRegression(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar)\n'
'endif()\n'),
'')
def testWhitespaceIssue16FalseNegative(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar )\n'
'endif()\n'),
'Mismatching spaces inside () after command')
def testNoEnd(self):
self.doTestMultiLineLint('file(APPEND ${OUT} "#endif${nl}")\n', '')
def testBackslashComment(self):
self.doTestMultiLineLint(r'file(APPEND ${OUT} " \"") # comment\n', '')
def testFalsePositiveSourceCompiles(self):
self.doTestMultiLineLint((
'CHECK_C_SOURCE_COMPILES("\n'
'#include\n'
'void foo(void) {}\n'
'int main()\n'
'{\n'
'pthread_once_t once_control = PTHREAD_ONCE_INIT;\n'
'pthread_once(&once_control, foo);\n'
'return 0;\n'
'}"\n'
'HAVE_PTHREAD_ONCE_INIT\n'
')\n'), '')
def testIndent(self):
try:
cmakelint.main._lint_state.spaces = 2
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)', '')
self.doTestLint(' four_indent(test)', '')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 2 spaces')
self.doTestLint(' three_indent(test)',
'Weird indentation; use 2 spaces')
cmakelint.main._lint_state.spaces = 3
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' four_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' three_indent(test)', '')
finally:
cmakelint.main._lint_state.spaces = 2
def testParseArgs(self):
old_usage = cmakelint.main._USAGE
old_version = cmakelint.__version__.VERSION
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main._VERSION = ""
with nostderr():
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--help'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--bogus-option'])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--filter='])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--filter=foo'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--filter=+x,b,-c', 'foo.cmake'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--spaces=c', 'foo.cmake'])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--version'])
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--filter=-whitespace', 'foo.cmake']))
cmakelint.main._lint_state.filters = []
self.assertEqual(
['foo.cmake'], cmakelint.main.ParseArgs(['foo.cmake']))
filt = '-,+whitespace'
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(['-', '+whitespace'],
cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.filters = []
filt = '-,+whitespace/eol, +whitespace/tabs'
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(
['-', '+whitespace/eol', '+whitespace/tabs'], cmakelint.main._lint_state.filters)
cmakelint.main._lint_state.filters = []
cmakelint.main.ParseArgs(['--config=./foo/bar', 'foo.cmake'])
self.assertEqual('./foo/bar', cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['--config=None', 'foo.cmake'])
self.assertEqual(None, cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['foo.cmake'])
self.assertEqual(os.path.expanduser('~') + os.path.sep +
'.cmakelintrc', cmakelint.main._lint_state.config)
config = {'return_value': True}
patcher = mock.patch('os.path.isfile', **config)
patcher.start()
self.assertEqual(['CMakeLists.txt'], cmakelint.main.ParseArgs([]))
self.assertEqual(os.path.expanduser('~')+os.path.sep +
'.cmakelintrc', cmakelint.main._lint_state.config)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._VERSION = old_version
cmakelint.main._lint_state.filters = []
cmakelint.main._lint_state.spaces = old_spaces
def testParseOptionsFile(self):
old_usage = cmakelint.main._USAGE
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main.ParseOptionFile("""
# skip comment
filter=-,+whitespace
spaces= 3
""".split('\n'), ignore_space=False)
self.assertEqual(['-', '+whitespace'],
cmakelint.main._lint_state.filters)
cmakelint.main.ParseArgs(['--filter=+syntax', 'foo.cmake'])
self.assertEqual(['-', '+whitespace', '+syntax'],
cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.spaces = 2
cmakelint.main.ParseOptionFile("""
# skip comment
spaces= 4
""".split('\n'), ignore_space=True)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
# skip comment
linelength= 90
""".split('\n'), ignore_space=True)
self.assertEqual(90, cmakelint.main._lint_state.linelength)
cmakelint.main.ParseOptionFile("""
# skip comment
""".split('\n'), ignore_space=False)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
cmakelint.main._lint_state.quiet = True
cmakelint.main.ParseOptionFile("""
# quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._lint_state.spaces = old_spaces
if __name__ == '__main__':
unittest.main()
|
py | 1a39402424ceb7bc3355eee497ff07d0fa60d294 | # -*- coding:utf-8 -*-
from src.binary_search_tree import BST, PARENT, LEFT, RIGHT, KEY, SIZE
class SplayTree(BST):
""" Just like a simple Binary Search Tree but with the added operation of
splaying.
Complexity for most operations: O(log n), Omega(n)
TODO test and make sure this works!
See: http://en.wikipedia.org/wiki/Splay_tree
"""
def splay(self, node):
""" The splay operation hoists the indicated node to the root.
There are three cases where rotations are employed to hoist a node
(let x be the input node, p be x's parent and g be x's grand parent).
ZIG (p is root, g is None)
(p) (x)
/ => \
(x) (p)
ZIG-ZIG (x is p's LEFT child, p is g's LEFT child and otherwise)
(g) (x)
/ \
(p) => (p)
/ \
(x) (g)
ZIG-ZAG (x is p's LEFT child, p is g's RIGHT child and otherwise)
(g)
/ (x)
(p) => / \
\ (p) (g)
(x)
Args:
node: list, a representation of a node, format [PARENT, KEY, LEFT, RIGHT, SIZE]
Returns:
node: the splayed node with updated references which is no the root.
"""
while node[PARENT] != None:
# ZIG.
if node[PARENT][PARENT] == None:
if node[PARENT][LEFT] == node:
self.rotate(node[PARENT], LEFT)
else:
self.rotate(node[PARENT], RIGHT)
# ZIG-ZIG.
elif node[PARENT][LEFT] == node and node[PARENT][PARENT][LEFT] == node[PARENT]:
self.rotate(node[PARENT][PARENT], LEFT)
self.rotate(node[PARENT], LEFT)
# ZIG-ZIG.
elif node[PARENT][RIGHT] == node and node[PARENT][PARENT][RIGHT] == node[PARENT]:
self.rotate(node[PARENT][PARENT], RIGHT)
self.rotate(node[PARENT], RIGHT)
# ZIG-ZAG.
elif node[PARENT][LEFT] == node and node[PARENT][PARENT][RIGHT] == node[PARENT]:
self.rotate(node[PARENT], LEFT)
self.rotate(node[PARENT], RIGHT)
# ZIG-ZAG.
else:
self.rotate(node[PARENT], RIGHT)
self.rotate(node[PARENT], LEFT)
return node
def insert(self, key):
""" After regular BST insert, the new node is hoisted to the root. """
node = BST.insert(self, key)
return self.splay(node)
def delete_and_splay(self, key):
""" After regular BST delete, the former node's parent is hoisted to
the root.
"""
removed = BST.delete(self, key)
self.splay(removed[PARENT])
return removed
def search_and_splay(self, key):
""" After a successful search operation the returned node is hoisted
to the root before being returned.
"""
node = BST.search(self, key)
return self.splay(node)
def join(self, other_tree):
""" Join other_tree with the current tree. The conditions is that any
element in other_tree is larger than any element in current tree.
Args:
other_tree: object, instance of src.ballanced_search_tree.BST
"""
current_max = self.get_max()
if current_max > other_tree.get_min():
raise Exception('The tree to join must have strictly larger items '
'than current trees items')
root = self.splay(current_max)
root[LEFT] = other_tree.root
def split(self, key):
""" Splits the current tree into two subtrees, the left one containing
all elements smaller than key, the right one containing all elements
larger than key.
Args:
key: int
Returns
list, format [left, right]
left: instance of SplayTree
right: instance of SplayTree
"""
root = self.search_and_splay(key)
left = SplayTree()
left.root = [None, root[LEFT][KEY], root[LEFT][LEFT], root[LEFT][RIGHT], root[LEFT][SIZE]]
right = SplayTree()
right.root = [None, root[RIGHT][KEY], root[RIGHT][LEFT], root[RIGHT][RIGHT], root[RIGHT][SIZE]]
return [left, right]
|
py | 1a394029c20b7e9c7c65826ca8365a413971574d | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pooling"""
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore._checkparam import Rel, Validator as validator
from mindspore.ops.primitive import constexpr
import mindspore.context as context
from ..cell import Cell
__all__ = ['AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d']
class _PoolNd(Cell):
"""N-D AvgPool"""
def __init__(self, kernel_size, stride, pad_mode, data_format="NCHW"):
super(_PoolNd, self).__init__()
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
raise ValueError("NHWC format only support in GPU target.")
def _check_int_or_tuple(arg_name, arg_value):
validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name)
error_msg = f'For \'{self.cls_name}\' the {arg_name} should be an positive int number or ' \
f'a tuple of two positive int numbers, but got {arg_value}'
if isinstance(arg_value, int):
if arg_value <= 0:
raise ValueError(error_msg)
elif len(arg_value) == 2:
for item in arg_value:
if isinstance(item, int) and item > 0:
continue
raise ValueError(error_msg)
else:
raise ValueError(error_msg)
return arg_value
self.kernel_size = _check_int_or_tuple('kernel_size', kernel_size)
self.stride = _check_int_or_tuple('stride', stride)
def construct(self, *inputs):
pass
def extend_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__)
@constexpr
def _shape_check(in_shape):
if len(in_shape) != 3:
raise ValueError("The input must has 3 dim")
class MaxPool2d(_PoolNd):
r"""
2D max pooling operation for temporal data.
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool2d outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Note:
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
is an int number that represents height and width are both kernel_size,
or a tuple of two int numbers that represent height and width respectively.
Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
Default: 'NCHW'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Raises:
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If length of shape of `input` is not equal to 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> output = pool(x)
>>> print(output.shape)
(1, 2, 2, 2)
"""
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", data_format="NCHW"):
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
out = self.max_pool(x)
return out
class MaxPool1d(_PoolNd):
r"""
1D max pooling operation for temporal data.
Applies a 1D max pooling over an input Tensor which can be regarded as a composition of 1D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, MaxPool1d outputs
regional maximum in the :math:`(L_{in})`-dimension. Given kernel size
:math:`ks = (l_{ker})` and stride :math:`s = (s_0)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, l) = \max_{n=0, \ldots, l_{ker}-1}
\text{input}(N_i, C_j, s_0 \times l + n)
Note:
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (int): The size of kernel used to take the max value, Default: 1.
stride (int): The distance of kernel moving, an int number that represents
the width of movement is stride, Default: 1.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The total number of padding will be calculated in horizontal
and vertical directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C, L_{in})`.
Outputs:
Tensor of shape :math:`(N, C, L_{out}))`.
Raises:
TypeError: If `kernel_size` or `strides` is not an int.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If length of shape of `input` is not equal to 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> max_pool = nn.MaxPool1d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32)
>>> output = max_pool(x)
>>> result = output.shape
>>> print(result)
(1, 2, 2)
"""
def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
super(MaxPool1d, self).__init__(kernel_size, stride, pad_mode)
validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name)
validator.check_value_type('stride', stride, [int], self.cls_name)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name)
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.expand = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def construct(self, x):
_shape_check(self.shape(x))
x = self.expand(x, 2)
output = self.max_pool(x)
output = self.squeeze(output)
return output
class AvgPool2d(_PoolNd):
r"""
2D average pooling for temporal data.
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Note:
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value.
The data type of kernel_size must be int and the value represents the height and width,
or a tuple of two int numbers that represent height and width respectively.
Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
Default: 'NCHW'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Raises:
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If length of shape of `input` is not equal to 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> output = pool(x)
>>> print(output.shape)
(1, 2, 2, 2)
"""
def __init__(self,
kernel_size=1,
stride=1,
pad_mode="valid",
data_format="NCHW"):
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
return self.avg_pool(x)
class AvgPool1d(_PoolNd):
r"""
1D average pooling for temporal data.
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, AvgPool1d outputs
regional average in the :math:`(L_{in})`-dimension. Given kernel size
:math:`ks = l_{ker}` and stride :math:`s = s_0`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
\text{input}(N_i, C_j, s_0 \times l + n)
Note:
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (int): The size of kernel window used to take the average value, Default: 1.
stride (int): The distance of kernel moving, an int number that represents
the width of movement is strides, Default: 1.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, L_{out})`.
Raises:
TypeError: If `kernel_size` or `stride` is not an int.
ValueError: If `pad_mode` is neither 'same' nor 'valid' with not case sensitive.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If length of shape of `input` is not equal to 3.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> pool = nn.AvgPool1d(kernel_size=6, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
>>> output = pool(x)
>>> result = output.shape
>>> print(result)
(1, 3, 1)
"""
def __init__(self,
kernel_size=1,
stride=1,
pad_mode="valid"):
validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name)
validator.check_value_type('stride', stride, [int], self.cls_name)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name)
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.slice = P.Slice()
self.expand = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def construct(self, x):
x = F.depend(x, _shape_check(self.shape(x)))
batch, channel, width = self.shape(x)
if width == self.kernel_size[1]:
x = self.reduce_mean(x, 2)
elif width - self.kernel_size[1] < self.stride[1]:
x = self.slice(x, (0, 0, 0), (batch, channel, self.kernel_size[1]))
x = self.reduce_mean(x, 2)
else:
x = self.expand(x, 2)
x = self.avg_pool(x)
x = self.squeeze(x)
return x
|
py | 1a394063c17ade16e2b4a12358619f780d7b26ac | import logging
import os
import torch as t
logger = logging.getLogger()
def save_checkpoint(epoch, arch, model, extras=None, is_best=None, name=None, output_dir='.', serialized=False):
"""Save a pyTorch training checkpoint
Args:
epoch: current epoch number
arch: name of the network architecture/topology
model: a pyTorch model
extras: optional dict with additional user-defined data to be saved in the checkpoint.
Will be saved under the key 'extras'
is_best: If true, will save a copy of the checkpoint with the suffix 'best'
name: the name of the checkpoint file
output_dir: directory in which to save the checkpoint
"""
if not os.path.isdir(output_dir):
raise IOError('Checkpoint directory does not exist at', os.path.abspath(dir))
if extras is None:
extras = {}
if not isinstance(extras, dict):
raise TypeError('extras must be either a dict or None')
filename = 'checkpoint.pth.tar' if name is None else name + '_checkpoint.pth.tar'
filepath = os.path.join(output_dir, filename)
filename_best = 'best.pth.tar' if name is None else name + '_best.pth.tar'
filepath_best = os.path.join(output_dir, filename_best)
# dataparallelized model cannot change parameter after
if serialized is True:
checkpoint = {
'epoch': epoch,
'state_dict': model.state_dict(),
'arch': arch,
'extras': extras,
}
else:
checkpoint = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'arch': arch,
'extras': extras,
}
msg = 'Saving checkpoint to:\n'
msg += ' Current: %s\n' % filepath
t.save(checkpoint, filepath)
if is_best:
msg += ' Best: %s\n' % filepath_best
t.save(checkpoint, filepath_best)
# model.to('cpu')
# t.save(model, filepath_best)
# model.to('cuda')
logger.info(msg)
logname = '_loggs.log' if name is None else name + '_loggs.log'
# logger.FileHandler(logname)
def load_checkpoint(model, chkp_file, model_device=None, strict=False, lean=False):
"""Load a pyTorch training checkpoint.
Args:
model: the pyTorch model to which we will load the parameters. You can
specify model=None if the checkpoint contains enough metadata to infer
the model. The order of the arguments is misleading and clunky, and is
kept this way for backward compatibility.
chkp_file: the checkpoint file
lean: if set, read into model only 'state_dict' field
model_device [str]: if set, call model.to($model_device)
This should be set to either 'cpu' or 'cuda'.
:returns: updated model, optimizer, start_epoch
"""
if not os.path.isfile(chkp_file):
raise IOError('Cannot find a checkpoint at', chkp_file)
checkpoint = t.load(chkp_file, map_location=lambda storage, loc: storage)
if 'state_dict' not in checkpoint:
raise ValueError('Checkpoint must contain model parameters')
extras = checkpoint.get('extras', None)
arch = checkpoint.get('arch', '_nameless_')
checkpoint_epoch = checkpoint.get('epoch', None)
start_epoch = checkpoint_epoch + 1 if checkpoint_epoch is not None else 0
anomalous_keys = model.load_state_dict(checkpoint['state_dict'], strict)
if anomalous_keys:
missing_keys, unexpected_keys = anomalous_keys
if unexpected_keys:
logger.warning("The loaded checkpoint (%s) contains %d unexpected state keys" %
(chkp_file, len(unexpected_keys)))
if missing_keys:
raise ValueError("The loaded checkpoint (%s) is missing %d state keys" %
(chkp_file, len(missing_keys)))
if model_device is not None:
model.to(model_device)
if lean:
logger.info("Loaded checkpoint %s model (next epoch %d) from %s", arch, 0, chkp_file)
return model, 0, None
else:
logger.info("Loaded checkpoint %s model (next epoch %d) from %s", arch, start_epoch, chkp_file)
return model, start_epoch, extras
|
py | 1a3941df0a21adaed6f28eef826ef993f717b378 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import optim, utils
class DynamicLossScaler(object):
def __init__(
self, init_scale=2.**15, scale_factor=2., scale_window=2000,
tolerance=0.05, threshold=None,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
def update_scale(self, overflow):
iter_since_rescale = self._iter - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
@staticmethod
def has_overflow(grad_norm):
# detect inf and nan
if grad_norm == float('inf') or grad_norm != grad_norm:
return True
return False
class FP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args, params)
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
# create FP32 copy of parameters and grads
total_param_size = sum(p.data.numel() for p in params)
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset+numel].copy_(p.data.contiguous().view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
# copy FP16 grads to FP32
offset = 0
for p in self.params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1))
offset += numel
# correct for dynamic loss scaler
self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale)
self._needs_sync = False
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
# copy FP32 params back into FP16 model
offset = 0
for p in self.params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self._needs_sync = False
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(self, args, params, optimizer):
if not optimizer.supports_memory_efficient_fp16:
raise ValueError(
'Unsupported optimizer: {}'.format(optimizer.__class__.__name__)
)
super().__init__(args, params)
self.wrapped_optimizer = optimizer
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._grads_are_scaled = True
def _unscale_grads(self, multiply_grads=1.):
if self._grads_are_scaled:
self._grads_are_scaled = False
# correct for dynamic loss scaler
self.wrapped_optimizer.multiply_grads(multiply_grads / self.scaler.loss_scale)
else:
assert multiply_grads == 1.
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
if self._grads_are_scaled:
self._unscale_grads(c)
else:
self.wrapped_optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._unscale_grads()
grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._unscale_grads()
self.wrapped_optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
self._grads_are_scaled = False
|
py | 1a39438062f70d447357e752b473fe444b57d03c | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from yt_dlp import main as ytdlp_main
from . import initialize, add_plugins
from .patching import patch_decorator
@patch_decorator
def main(argv=None):
initialize()
add_plugins()
ytdlp_main(argv=argv)
if __name__ == "__main__":
main()
|
py | 1a3943d0d21456b73d03527e37460c856281e7a2 | from mopidy.models import Album, Track
from tests.mpd import protocol
class StatusHandlerTest(protocol.BaseTestCase):
def test_clearerror(self):
self.send_request("clearerror")
self.assertEqualResponse("ACK [0@0] {clearerror} Not implemented")
def test_currentsong(self):
track = Track(uri="dummy:/a")
self.backend.library.dummy_library = [track]
self.core.tracklist.add(uris=[track.uri]).get()
self.core.playback.play().get()
self.send_request("currentsong")
self.assertInResponse("file: dummy:/a")
self.assertInResponse("Time: 0")
self.assertNotInResponse("Artist: ")
self.assertNotInResponse("Title: ")
self.assertNotInResponse("Album: ")
self.assertNotInResponse("Track: 0")
self.assertNotInResponse("Date: ")
self.assertInResponse("Pos: 0")
self.assertInResponse("Id: 1")
self.assertInResponse("OK")
def test_currentsong_unicode(self):
track = Track(
uri="dummy:/à",
name="a nàme",
album=Album(uri="something:àlbum:12345"),
)
self.backend.library.dummy_library = [track]
self.core.tracklist.add(uris=[track.uri]).get()
self.core.playback.play().get()
self.send_request("currentsong")
self.assertInResponse("file: dummy:/à")
self.assertInResponse("Title: a nàme")
self.assertInResponse("X-AlbumUri: something:àlbum:12345")
def test_currentsong_without_song(self):
self.send_request("currentsong")
self.assertInResponse("OK")
def test_stats_command(self):
self.send_request("stats")
self.assertInResponse("OK")
def test_status_command(self):
self.send_request("status")
self.assertInResponse("OK")
|
py | 1a3943f9c5ef26167e70ed0b2d114f943fc3fce5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
connection = sqlite3.connect("lagerverwaltung.db")
cursor = connection.cursor()
cursor.execute("""CREATE TABLE lager (
fachnummer INTEGER, seriennummer INTEGER,
komponente TEXT, lieferant TEXT, reserviert INTEGER
)""")
cursor.execute("""CREATE TABLE lieferanten (
kurzname TEXT, name TEXT, telefonnummer TEXT
)""")
cursor.execute("""CREATE TABLE kunden (
kundennummer INTEGER, name TEXT, anschrift TEXT
)""")
|
py | 1a39449dc6928425b33eed116370bf8650adc8d2 | coordinates_00FF3F = ((206, 290),
(206, 292), (206, 293), (206, 294), (206, 295), (206, 296), (206, 297), (206, 298), (206, 299), (206, 300), (206, 301), (206, 302), (206, 303), (206, 304), (206, 305), (206, 306), (206, 307), (207, 289), (207, 308), (207, 309), (207, 310), (207, 311), (207, 312), (207, 313), (208, 288), (208, 290), (208, 291), (208, 292), (208, 293), (208, 294), (208, 295), (208, 296), (208, 297), (208, 298), (208, 299), (208, 300), (208, 301), (208, 302), (208, 303), (208, 304), (208, 305), (208, 306), (208, 307), (208, 314), (208, 315), (208, 316), (208, 317), (208, 318), (208, 319), (208, 320), (208, 321), (208, 322), (208, 323), (208, 324), (208, 325), (208, 326), (208, 327), (208, 328), (208, 329), (208, 330), (208, 331), (208, 332), (208, 333), (208, 334), (208, 335), (208, 336), (208, 337), (209, 290), (209, 291), (209, 292), (209, 293), (209, 294), (209, 295),
(209, 296), (209, 297), (209, 298), (209, 299), (209, 300), (209, 301), (209, 302), (209, 303), (209, 304), (209, 305), (209, 306), (209, 307), (209, 308), (209, 309), (209, 310), (209, 311), (209, 312), (209, 313), (209, 339), (209, 340), (209, 341), (209, 342), (209, 343), (209, 344), (209, 345), (209, 346), (210, 287), (210, 289), (210, 290), (210, 291), (210, 292), (210, 293), (210, 294), (210, 295), (210, 296), (210, 297), (210, 298), (210, 299), (210, 300), (210, 301), (210, 302), (210, 303), (210, 304), (210, 305), (210, 306), (210, 307), (210, 308), (210, 309), (210, 310), (210, 311), (210, 312), (210, 313), (210, 314), (210, 315), (210, 316), (210, 317), (210, 318), (210, 319), (210, 320), (210, 321), (210, 322), (210, 323), (210, 324), (210, 325), (210, 326), (210, 327), (210, 328), (210, 329), (210, 330), (210, 331), (210, 332), (210, 333),
(210, 334), (210, 335), (210, 336), (210, 337), (210, 338), (210, 346), (210, 347), (210, 349), (211, 286), (211, 288), (211, 289), (211, 290), (211, 291), (211, 292), (211, 293), (211, 294), (211, 295), (211, 296), (211, 297), (211, 298), (211, 299), (211, 300), (211, 301), (211, 302), (211, 303), (211, 304), (211, 305), (211, 306), (211, 307), (211, 308), (211, 309), (211, 310), (211, 311), (211, 312), (211, 313), (211, 314), (211, 315), (211, 316), (211, 317), (211, 318), (211, 319), (211, 320), (211, 321), (211, 322), (211, 323), (211, 324), (211, 325), (211, 326), (211, 327), (211, 328), (211, 329), (211, 330), (211, 331), (211, 332), (211, 333), (211, 334), (211, 335), (211, 336), (211, 337), (211, 338), (211, 339), (211, 340), (211, 341), (211, 342), (211, 343), (211, 344), (211, 345), (211, 346), (212, 285), (212, 287), (212, 288), (212, 289),
(212, 290), (212, 291), (212, 292), (212, 293), (212, 294), (212, 295), (212, 296), (212, 297), (212, 298), (212, 299), (212, 300), (212, 301), (212, 302), (212, 303), (212, 304), (212, 305), (212, 306), (212, 307), (212, 308), (212, 309), (212, 310), (212, 311), (212, 312), (212, 313), (212, 314), (212, 315), (212, 316), (212, 317), (212, 318), (212, 319), (212, 320), (212, 321), (212, 322), (212, 323), (212, 324), (212, 325), (212, 326), (212, 327), (212, 328), (212, 329), (212, 330), (212, 331), (212, 332), (212, 333), (212, 334), (212, 335), (212, 336), (212, 337), (212, 338), (212, 339), (212, 340), (212, 341), (212, 342), (212, 343), (212, 344), (212, 345), (212, 346), (212, 347), (212, 348), (212, 349), (213, 284), (213, 286), (213, 287), (213, 288), (213, 289), (213, 290), (213, 291), (213, 292), (213, 293), (213, 294), (213, 295), (213, 296),
(213, 297), (213, 298), (213, 299), (213, 300), (213, 301), (213, 302), (213, 303), (213, 304), (213, 305), (213, 306), (213, 307), (213, 308), (213, 309), (213, 310), (213, 311), (213, 312), (213, 313), (213, 314), (213, 315), (213, 316), (213, 317), (213, 318), (213, 319), (213, 320), (213, 321), (213, 322), (213, 323), (213, 324), (213, 325), (213, 326), (213, 327), (213, 328), (213, 329), (213, 330), (213, 331), (213, 332), (213, 333), (213, 334), (213, 335), (213, 336), (213, 337), (213, 338), (213, 339), (213, 340), (213, 341), (213, 342), (213, 343), (213, 344), (213, 345), (213, 346), (213, 347), (213, 348), (213, 349), (213, 350), (214, 283), (214, 285), (214, 286), (214, 287), (214, 288), (214, 289), (214, 290), (214, 291), (214, 292), (214, 293), (214, 294), (214, 295), (214, 296), (214, 297), (214, 298), (214, 299), (214, 300), (214, 301),
(214, 302), (214, 303), (214, 304), (214, 305), (214, 306), (214, 307), (214, 308), (214, 309), (214, 310), (214, 311), (214, 312), (214, 313), (214, 314), (214, 315), (214, 316), (214, 317), (214, 318), (214, 319), (214, 320), (214, 321), (214, 322), (214, 323), (214, 324), (214, 325), (214, 326), (214, 327), (214, 328), (214, 329), (214, 330), (214, 331), (214, 332), (214, 333), (214, 334), (214, 335), (214, 336), (214, 337), (214, 338), (214, 339), (214, 340), (214, 341), (214, 342), (214, 343), (214, 344), (214, 345), (214, 346), (214, 347), (214, 348), (214, 349), (214, 350), (214, 351), (215, 281), (215, 284), (215, 285), (215, 286), (215, 287), (215, 288), (215, 289), (215, 290), (215, 291), (215, 292), (215, 293), (215, 294), (215, 295), (215, 296), (215, 297), (215, 298), (215, 299), (215, 300), (215, 301), (215, 302), (215, 303), (215, 304),
(215, 305), (215, 306), (215, 307), (215, 308), (215, 309), (215, 310), (215, 311), (215, 312), (215, 313), (215, 314), (215, 315), (215, 316), (215, 317), (215, 318), (215, 319), (215, 320), (215, 321), (215, 322), (215, 323), (215, 324), (215, 325), (215, 326), (215, 327), (215, 328), (215, 329), (215, 330), (215, 331), (215, 332), (215, 333), (215, 334), (215, 335), (215, 336), (215, 337), (215, 338), (215, 339), (215, 340), (215, 341), (215, 342), (215, 343), (215, 344), (215, 345), (215, 346), (215, 347), (215, 348), (215, 349), (215, 350), (215, 351), (215, 352), (216, 280), (216, 283), (216, 284), (216, 285), (216, 286), (216, 287), (216, 288), (216, 289), (216, 290), (216, 291), (216, 292), (216, 293), (216, 294), (216, 295), (216, 296), (216, 297), (216, 298), (216, 299), (216, 300), (216, 301), (216, 302), (216, 303), (216, 304), (216, 305),
(216, 306), (216, 307), (216, 308), (216, 309), (216, 310), (216, 311), (216, 312), (216, 313), (216, 314), (216, 315), (216, 316), (216, 317), (216, 318), (216, 319), (216, 320), (216, 321), (216, 322), (216, 323), (216, 324), (216, 325), (216, 326), (216, 327), (216, 328), (216, 329), (216, 330), (216, 331), (216, 332), (216, 333), (216, 334), (216, 335), (216, 336), (216, 337), (216, 338), (216, 339), (216, 340), (216, 341), (216, 342), (216, 343), (216, 344), (216, 345), (216, 346), (216, 347), (216, 348), (216, 349), (216, 350), (216, 351), (216, 352), (216, 353), (217, 278), (217, 281), (217, 282), (217, 283), (217, 284), (217, 285), (217, 286), (217, 287), (217, 288), (217, 289), (217, 290), (217, 291), (217, 292), (217, 293), (217, 294), (217, 295), (217, 296), (217, 297), (217, 298), (217, 299), (217, 300), (217, 301), (217, 302), (217, 303),
(217, 304), (217, 305), (217, 306), (217, 307), (217, 308), (217, 309), (217, 310), (217, 311), (217, 312), (217, 313), (217, 314), (217, 315), (217, 316), (217, 317), (217, 318), (217, 319), (217, 320), (217, 321), (217, 322), (217, 323), (217, 324), (217, 325), (217, 326), (217, 327), (217, 328), (217, 329), (217, 330), (217, 331), (217, 332), (217, 333), (217, 334), (217, 335), (217, 336), (217, 337), (217, 338), (217, 339), (217, 340), (217, 341), (217, 342), (217, 343), (217, 344), (217, 345), (217, 346), (217, 347), (217, 348), (217, 349), (217, 350), (217, 351), (217, 352), (217, 353), (217, 354), (218, 277), (218, 280), (218, 281), (218, 282), (218, 283), (218, 284), (218, 285), (218, 286), (218, 287), (218, 288), (218, 289), (218, 290), (218, 291), (218, 292), (218, 293), (218, 294), (218, 295), (218, 296), (218, 297), (218, 298), (218, 299),
(218, 300), (218, 301), (218, 302), (218, 303), (218, 304), (218, 305), (218, 306), (218, 307), (218, 308), (218, 309), (218, 310), (218, 311), (218, 312), (218, 313), (218, 314), (218, 315), (218, 316), (218, 317), (218, 318), (218, 319), (218, 320), (218, 321), (218, 322), (218, 323), (218, 324), (218, 325), (218, 326), (218, 327), (218, 328), (218, 329), (218, 330), (218, 331), (218, 332), (218, 333), (218, 334), (218, 335), (218, 336), (218, 337), (218, 338), (218, 339), (218, 340), (218, 341), (218, 342), (218, 343), (218, 344), (218, 345), (218, 346), (218, 347), (218, 348), (218, 349), (218, 350), (218, 351), (218, 352), (218, 353), (218, 354), (218, 355), (219, 276), (219, 278), (219, 279), (219, 280), (219, 281), (219, 282), (219, 283), (219, 284), (219, 285), (219, 286), (219, 287), (219, 288), (219, 289), (219, 290), (219, 291), (219, 292),
(219, 293), (219, 294), (219, 295), (219, 296), (219, 297), (219, 298), (219, 299), (219, 300), (219, 301), (219, 302), (219, 303), (219, 304), (219, 305), (219, 306), (219, 307), (219, 308), (219, 309), (219, 310), (219, 311), (219, 312), (219, 313), (219, 314), (219, 315), (219, 316), (219, 317), (219, 318), (219, 319), (219, 320), (219, 321), (219, 322), (219, 323), (219, 324), (219, 325), (219, 326), (219, 327), (219, 328), (219, 329), (219, 330), (219, 331), (219, 332), (219, 333), (219, 334), (219, 335), (219, 336), (219, 337), (219, 338), (219, 339), (219, 340), (219, 341), (219, 342), (219, 343), (219, 344), (219, 345), (219, 346), (219, 347), (219, 348), (219, 349), (219, 350), (219, 351), (219, 352), (219, 353), (219, 354), (219, 355), (219, 356), (220, 275), (220, 277), (220, 278), (220, 279), (220, 280), (220, 281), (220, 282), (220, 283),
(220, 284), (220, 285), (220, 286), (220, 287), (220, 288), (220, 289), (220, 290), (220, 291), (220, 292), (220, 293), (220, 294), (220, 295), (220, 296), (220, 297), (220, 298), (220, 299), (220, 300), (220, 301), (220, 302), (220, 303), (220, 304), (220, 305), (220, 306), (220, 307), (220, 308), (220, 309), (220, 310), (220, 311), (220, 312), (220, 313), (220, 314), (220, 315), (220, 316), (220, 317), (220, 318), (220, 319), (220, 320), (220, 321), (220, 322), (220, 323), (220, 324), (220, 325), (220, 326), (220, 327), (220, 328), (220, 329), (220, 330), (220, 331), (220, 332), (220, 333), (220, 334), (220, 335), (220, 336), (220, 337), (220, 338), (220, 339), (220, 340), (220, 341), (220, 342), (220, 343), (220, 344), (220, 345), (220, 346), (220, 347), (220, 348), (220, 349), (220, 350), (220, 351), (220, 352), (220, 353), (220, 354), (220, 355),
(220, 356), (220, 357), (221, 274), (221, 276), (221, 277), (221, 278), (221, 279), (221, 280), (221, 281), (221, 282), (221, 283), (221, 284), (221, 285), (221, 286), (221, 287), (221, 288), (221, 289), (221, 290), (221, 291), (221, 292), (221, 293), (221, 294), (221, 295), (221, 296), (221, 297), (221, 298), (221, 299), (221, 300), (221, 301), (221, 302), (221, 303), (221, 304), (221, 305), (221, 306), (221, 307), (221, 308), (221, 309), (221, 310), (221, 311), (221, 312), (221, 313), (221, 314), (221, 315), (221, 316), (221, 317), (221, 318), (221, 319), (221, 320), (221, 321), (221, 322), (221, 323), (221, 324), (221, 325), (221, 326), (221, 327), (221, 328), (221, 329), (221, 330), (221, 331), (221, 332), (221, 333), (221, 334), (221, 335), (221, 336), (221, 337), (221, 338), (221, 339), (221, 340), (221, 341), (221, 342), (221, 343), (221, 344),
(221, 345), (221, 346), (221, 347), (221, 348), (221, 349), (221, 350), (221, 351), (221, 352), (221, 353), (221, 354), (221, 355), (221, 356), (221, 357), (221, 358), (222, 273), (222, 275), (222, 276), (222, 277), (222, 278), (222, 279), (222, 280), (222, 281), (222, 282), (222, 283), (222, 284), (222, 285), (222, 286), (222, 287), (222, 288), (222, 289), (222, 290), (222, 291), (222, 292), (222, 293), (222, 294), (222, 295), (222, 296), (222, 297), (222, 298), (222, 299), (222, 300), (222, 301), (222, 302), (222, 303), (222, 304), (222, 305), (222, 306), (222, 307), (222, 308), (222, 309), (222, 310), (222, 311), (222, 312), (222, 313), (222, 314), (222, 315), (222, 316), (222, 317), (222, 318), (222, 319), (222, 320), (222, 321), (222, 322), (222, 323), (222, 324), (222, 325), (222, 326), (222, 327), (222, 328), (222, 329), (222, 330), (222, 331),
(222, 332), (222, 333), (222, 334), (222, 335), (222, 336), (222, 337), (222, 338), (222, 339), (222, 340), (222, 341), (222, 342), (222, 343), (222, 344), (222, 345), (222, 346), (222, 347), (222, 348), (222, 349), (222, 350), (222, 351), (222, 352), (222, 353), (222, 354), (222, 355), (222, 356), (222, 357), (222, 358), (222, 359), (223, 272), (223, 274), (223, 275), (223, 276), (223, 277), (223, 278), (223, 279), (223, 280), (223, 281), (223, 282), (223, 283), (223, 284), (223, 285), (223, 286), (223, 287), (223, 288), (223, 289), (223, 290), (223, 291), (223, 292), (223, 293), (223, 294), (223, 295), (223, 296), (223, 297), (223, 298), (223, 299), (223, 300), (223, 301), (223, 302), (223, 303), (223, 304), (223, 305), (223, 306), (223, 307), (223, 308), (223, 309), (223, 310), (223, 311), (223, 312), (223, 313), (223, 314), (223, 315), (223, 316),
(223, 317), (223, 318), (223, 319), (223, 320), (223, 321), (223, 322), (223, 323), (223, 324), (223, 325), (223, 326), (223, 327), (223, 328), (223, 329), (223, 330), (223, 331), (223, 332), (223, 333), (223, 334), (223, 335), (223, 336), (223, 337), (223, 338), (223, 339), (223, 340), (223, 341), (223, 342), (223, 343), (223, 344), (223, 345), (223, 346), (223, 347), (223, 348), (223, 349), (223, 350), (223, 351), (223, 352), (223, 353), (223, 354), (223, 355), (223, 356), (223, 357), (223, 358), (223, 359), (223, 360), (224, 271), (224, 273), (224, 274), (224, 275), (224, 276), (224, 277), (224, 278), (224, 279), (224, 280), (224, 281), (224, 282), (224, 283), (224, 284), (224, 285), (224, 286), (224, 287), (224, 288), (224, 289), (224, 290), (224, 291), (224, 292), (224, 293), (224, 294), (224, 295), (224, 296), (224, 297), (224, 298), (224, 299),
(224, 300), (224, 301), (224, 302), (224, 303), (224, 304), (224, 305), (224, 306), (224, 307), (224, 308), (224, 309), (224, 310), (224, 311), (224, 312), (224, 313), (224, 314), (224, 315), (224, 316), (224, 317), (224, 318), (224, 319), (224, 320), (224, 321), (224, 322), (224, 323), (224, 324), (224, 325), (224, 326), (224, 327), (224, 328), (224, 329), (224, 330), (224, 331), (224, 332), (224, 333), (224, 334), (224, 335), (224, 336), (224, 337), (224, 338), (224, 339), (224, 340), (224, 341), (224, 342), (224, 343), (224, 344), (224, 345), (224, 346), (224, 347), (224, 348), (224, 349), (224, 350), (224, 351), (224, 352), (224, 353), (224, 354), (224, 355), (224, 356), (224, 357), (224, 358), (224, 359), (224, 360), (224, 361), (225, 273), (225, 274), (225, 275), (225, 276), (225, 277), (225, 278), (225, 279), (225, 280), (225, 281), (225, 282),
(225, 283), (225, 284), (225, 285), (225, 286), (225, 287), (225, 288), (225, 289), (225, 290), (225, 291), (225, 292), (225, 293), (225, 294), (225, 295), (225, 296), (225, 297), (225, 298), (225, 299), (225, 300), (225, 301), (225, 302), (225, 303), (225, 304), (225, 305), (225, 306), (225, 307), (225, 308), (225, 309), (225, 310), (225, 311), (225, 312), (225, 313), (225, 314), (225, 315), (225, 316), (225, 317), (225, 318), (225, 319), (225, 320), (225, 321), (225, 322), (225, 323), (225, 324), (225, 325), (225, 326), (225, 327), (225, 328), (225, 329), (225, 330), (225, 331), (225, 332), (225, 333), (225, 334), (225, 335), (225, 336), (225, 337), (225, 338), (225, 339), (225, 340), (225, 341), (225, 342), (225, 343), (225, 344), (225, 345), (225, 346), (225, 347), (225, 348), (225, 349), (225, 350), (225, 351), (225, 352), (225, 353), (225, 354),
(225, 355), (225, 356), (225, 357), (225, 358), (225, 359), (225, 360), (225, 361), (225, 362), (226, 270), (226, 272), (226, 273), (226, 274), (226, 275), (226, 276), (226, 277), (226, 278), (226, 279), (226, 280), (226, 281), (226, 282), (226, 283), (226, 284), (226, 285), (226, 286), (226, 287), (226, 288), (226, 289), (226, 290), (226, 291), (226, 292), (226, 293), (226, 294), (226, 295), (226, 296), (226, 297), (226, 298), (226, 299), (226, 300), (226, 301), (226, 302), (226, 303), (226, 304), (226, 305), (226, 306), (226, 307), (226, 308), (226, 309), (226, 310), (226, 311), (226, 312), (226, 313), (226, 314), (226, 315), (226, 316), (226, 317), (226, 318), (226, 319), (226, 320), (226, 321), (226, 322), (226, 323), (226, 324), (226, 325), (226, 326), (226, 327), (226, 328), (226, 329), (226, 330), (226, 331), (226, 332), (226, 333), (226, 334),
(226, 335), (226, 336), (226, 337), (226, 338), (226, 339), (226, 340), (226, 341), (226, 342), (226, 343), (226, 344), (226, 345), (226, 346), (226, 347), (226, 348), (226, 349), (226, 350), (226, 351), (226, 352), (226, 353), (226, 354), (226, 355), (226, 356), (226, 357), (226, 358), (226, 359), (226, 360), (226, 361), (226, 362), (226, 363), (227, 269), (227, 271), (227, 272), (227, 273), (227, 274), (227, 275), (227, 276), (227, 277), (227, 278), (227, 279), (227, 280), (227, 281), (227, 282), (227, 283), (227, 284), (227, 285), (227, 286), (227, 287), (227, 288), (227, 289), (227, 290), (227, 291), (227, 292), (227, 293), (227, 294), (227, 295), (227, 296), (227, 297), (227, 298), (227, 299), (227, 300), (227, 301), (227, 302), (227, 303), (227, 304), (227, 305), (227, 306), (227, 307), (227, 308), (227, 309), (227, 310), (227, 311), (227, 312),
(227, 313), (227, 314), (227, 315), (227, 316), (227, 317), (227, 318), (227, 319), (227, 320), (227, 321), (227, 322), (227, 323), (227, 324), (227, 325), (227, 326), (227, 327), (227, 328), (227, 329), (227, 330), (227, 331), (227, 332), (227, 333), (227, 334), (227, 335), (227, 336), (227, 337), (227, 338), (227, 339), (227, 340), (227, 341), (227, 342), (227, 343), (227, 344), (227, 345), (227, 346), (227, 347), (227, 348), (227, 349), (227, 350), (227, 351), (227, 352), (227, 353), (227, 354), (227, 355), (227, 356), (227, 357), (227, 358), (227, 359), (227, 360), (227, 361), (227, 362), (227, 363), (227, 364), (228, 269), (228, 271), (228, 272), (228, 273), (228, 274), (228, 275), (228, 276), (228, 277), (228, 278), (228, 279), (228, 280), (228, 281), (228, 282), (228, 283), (228, 284), (228, 285), (228, 286), (228, 287), (228, 288), (228, 289),
(228, 290), (228, 291), (228, 292), (228, 293), (228, 294), (228, 295), (228, 296), (228, 297), (228, 298), (228, 299), (228, 300), (228, 301), (228, 302), (228, 303), (228, 304), (228, 305), (228, 306), (228, 307), (228, 308), (228, 309), (228, 310), (228, 311), (228, 312), (228, 313), (228, 314), (228, 315), (228, 316), (228, 317), (228, 318), (228, 319), (228, 320), (228, 321), (228, 322), (228, 323), (228, 324), (228, 325), (228, 326), (228, 327), (228, 328), (228, 329), (228, 330), (228, 331), (228, 332), (228, 333), (228, 334), (228, 335), (228, 336), (228, 337), (228, 338), (228, 339), (228, 340), (228, 341), (228, 342), (228, 343), (228, 344), (228, 345), (228, 346), (228, 347), (228, 348), (228, 349), (228, 350), (228, 351), (228, 352), (228, 353), (228, 354), (228, 355), (228, 356), (228, 357), (228, 358), (228, 359), (228, 360), (228, 361),
(228, 362), (228, 363), (228, 364), (228, 365), (229, 268), (229, 270), (229, 271), (229, 272), (229, 273), (229, 274), (229, 275), (229, 276), (229, 277), (229, 278), (229, 279), (229, 280), (229, 281), (229, 282), (229, 283), (229, 284), (229, 285), (229, 286), (229, 287), (229, 288), (229, 289), (229, 290), (229, 291), (229, 292), (229, 293), (229, 294), (229, 295), (229, 296), (229, 297), (229, 298), (229, 299), (229, 300), (229, 301), (229, 302), (229, 303), (229, 304), (229, 305), (229, 306), (229, 307), (229, 308), (229, 309), (229, 310), (229, 311), (229, 312), (229, 313), (229, 314), (229, 315), (229, 316), (229, 317), (229, 318), (229, 319), (229, 320), (229, 321), (229, 322), (229, 323), (229, 324), (229, 325), (229, 326), (229, 327), (229, 328), (229, 329), (229, 330), (229, 331), (229, 332), (229, 333), (229, 334), (229, 335), (229, 336),
(229, 337), (229, 338), (229, 339), (229, 340), (229, 341), (229, 342), (229, 343), (229, 344), (229, 345), (229, 346), (229, 347), (229, 348), (229, 349), (229, 350), (229, 351), (229, 352), (229, 353), (229, 354), (229, 355), (229, 356), (229, 357), (229, 358), (229, 359), (229, 360), (229, 361), (229, 362), (229, 363), (229, 364), (229, 365), (229, 366), (229, 368), (230, 268), (230, 270), (230, 271), (230, 272), (230, 273), (230, 274), (230, 275), (230, 276), (230, 277), (230, 278), (230, 279), (230, 280), (230, 281), (230, 282), (230, 283), (230, 284), (230, 285), (230, 286), (230, 287), (230, 288), (230, 289), (230, 290), (230, 291), (230, 292), (230, 293), (230, 294), (230, 295), (230, 296), (230, 297), (230, 298), (230, 299), (230, 300), (230, 301), (230, 302), (230, 303), (230, 304), (230, 305), (230, 306), (230, 307), (230, 308), (230, 309),
(230, 310), (230, 311), (230, 312), (230, 313), (230, 314), (230, 315), (230, 316), (230, 317), (230, 318), (230, 319), (230, 320), (230, 321), (230, 322), (230, 323), (230, 324), (230, 325), (230, 326), (230, 327), (230, 328), (230, 329), (230, 330), (230, 331), (230, 332), (230, 333), (230, 334), (230, 335), (230, 336), (230, 337), (230, 338), (230, 339), (230, 340), (230, 341), (230, 342), (230, 343), (230, 344), (230, 345), (230, 346), (230, 347), (230, 348), (230, 349), (230, 350), (230, 351), (230, 352), (230, 353), (230, 354), (230, 355), (230, 356), (230, 357), (230, 358), (230, 359), (230, 360), (230, 361), (230, 362), (230, 363), (230, 364), (230, 365), (230, 366), (230, 367), (230, 369), (231, 267), (231, 269), (231, 270), (231, 271), (231, 272), (231, 273), (231, 274), (231, 275), (231, 276), (231, 277), (231, 278), (231, 279), (231, 280),
(231, 281), (231, 282), (231, 283), (231, 284), (231, 285), (231, 286), (231, 287), (231, 288), (231, 289), (231, 290), (231, 291), (231, 292), (231, 293), (231, 294), (231, 295), (231, 296), (231, 297), (231, 298), (231, 299), (231, 300), (231, 301), (231, 302), (231, 303), (231, 304), (231, 305), (231, 306), (231, 307), (231, 308), (231, 309), (231, 310), (231, 311), (231, 312), (231, 313), (231, 314), (231, 315), (231, 316), (231, 317), (231, 318), (231, 319), (231, 320), (231, 321), (231, 322), (231, 323), (231, 324), (231, 325), (231, 326), (231, 327), (231, 328), (231, 329), (231, 330), (231, 331), (231, 332), (231, 333), (231, 334), (231, 335), (231, 336), (231, 337), (231, 338), (231, 339), (231, 340), (231, 341), (231, 342), (231, 343), (231, 344), (231, 345), (231, 346), (231, 347), (231, 348), (231, 349), (231, 350), (231, 351), (231, 352),
(231, 353), (231, 354), (231, 355), (231, 356), (231, 357), (231, 358), (231, 359), (231, 360), (231, 361), (231, 362), (231, 363), (231, 364), (231, 365), (231, 366), (231, 367), (231, 369), (232, 267), (232, 269), (232, 270), (232, 271), (232, 272), (232, 273), (232, 274), (232, 275), (232, 276), (232, 277), (232, 278), (232, 279), (232, 280), (232, 281), (232, 282), (232, 283), (232, 284), (232, 285), (232, 286), (232, 287), (232, 288), (232, 289), (232, 290), (232, 291), (232, 292), (232, 293), (232, 294), (232, 295), (232, 296), (232, 297), (232, 298), (232, 299), (232, 300), (232, 301), (232, 302), (232, 303), (232, 304), (232, 305), (232, 306), (232, 307), (232, 308), (232, 309), (232, 310), (232, 311), (232, 312), (232, 313), (232, 314), (232, 315), (232, 316), (232, 317), (232, 318), (232, 319), (232, 320), (232, 321), (232, 322), (232, 323),
(232, 324), (232, 325), (232, 326), (232, 327), (232, 328), (232, 329), (232, 330), (232, 331), (232, 332), (232, 333), (232, 334), (232, 335), (232, 336), (232, 337), (232, 338), (232, 339), (232, 340), (232, 341), (232, 342), (232, 343), (232, 344), (232, 345), (232, 346), (232, 347), (232, 348), (232, 349), (232, 350), (232, 351), (232, 352), (232, 353), (232, 354), (232, 355), (232, 356), (232, 357), (232, 358), (232, 359), (232, 360), (232, 361), (232, 362), (232, 363), (232, 364), (232, 365), (232, 366), (232, 367), (232, 369), (233, 266), (233, 268), (233, 269), (233, 270), (233, 271), (233, 272), (233, 273), (233, 274), (233, 275), (233, 276), (233, 277), (233, 278), (233, 279), (233, 280), (233, 281), (233, 282), (233, 283), (233, 284), (233, 285), (233, 286), (233, 287), (233, 288), (233, 289), (233, 290), (233, 291), (233, 292), (233, 293),
(233, 294), (233, 295), (233, 296), (233, 297), (233, 298), (233, 299), (233, 300), (233, 301), (233, 302), (233, 303), (233, 304), (233, 305), (233, 306), (233, 307), (233, 308), (233, 309), (233, 310), (233, 311), (233, 312), (233, 313), (233, 314), (233, 315), (233, 316), (233, 317), (233, 318), (233, 319), (233, 320), (233, 321), (233, 322), (233, 323), (233, 324), (233, 325), (233, 326), (233, 327), (233, 328), (233, 329), (233, 330), (233, 331), (233, 332), (233, 333), (233, 334), (233, 335), (233, 336), (233, 337), (233, 338), (233, 339), (233, 340), (233, 341), (233, 342), (233, 343), (233, 344), (233, 345), (233, 346), (233, 347), (233, 348), (233, 349), (233, 350), (233, 351), (233, 352), (233, 353), (233, 354), (233, 355), (233, 356), (233, 357), (233, 358), (233, 359), (233, 360), (233, 361), (233, 362), (233, 363), (233, 364), (233, 365),
(233, 366), (233, 367), (233, 369), (234, 266), (234, 268), (234, 269), (234, 270), (234, 271), (234, 272), (234, 273), (234, 274), (234, 275), (234, 276), (234, 277), (234, 278), (234, 279), (234, 280), (234, 281), (234, 282), (234, 283), (234, 284), (234, 285), (234, 286), (234, 287), (234, 288), (234, 289), (234, 290), (234, 291), (234, 292), (234, 293), (234, 294), (234, 295), (234, 296), (234, 297), (234, 298), (234, 299), (234, 300), (234, 301), (234, 302), (234, 303), (234, 304), (234, 305), (234, 306), (234, 307), (234, 308), (234, 309), (234, 310), (234, 311), (234, 312), (234, 313), (234, 314), (234, 315), (234, 316), (234, 317), (234, 318), (234, 319), (234, 320), (234, 321), (234, 322), (234, 323), (234, 324), (234, 325), (234, 326), (234, 327), (234, 328), (234, 329), (234, 330), (234, 331), (234, 332), (234, 333), (234, 334), (234, 335),
(234, 336), (234, 337), (234, 338), (234, 339), (234, 340), (234, 341), (234, 342), (234, 343), (234, 344), (234, 345), (234, 346), (234, 347), (234, 348), (234, 349), (234, 350), (234, 351), (234, 352), (234, 353), (234, 354), (234, 355), (234, 356), (234, 357), (234, 358), (234, 359), (234, 360), (234, 361), (234, 362), (234, 363), (234, 364), (234, 365), (234, 366), (234, 367), (234, 369), (235, 265), (235, 267), (235, 268), (235, 269), (235, 270), (235, 271), (235, 272), (235, 273), (235, 274), (235, 275), (235, 276), (235, 277), (235, 278), (235, 279), (235, 280), (235, 281), (235, 282), (235, 283), (235, 284), (235, 285), (235, 286), (235, 287), (235, 288), (235, 289), (235, 290), (235, 291), (235, 292), (235, 293), (235, 294), (235, 295), (235, 296), (235, 297), (235, 298), (235, 299), (235, 300), (235, 301), (235, 302), (235, 303), (235, 304),
(235, 305), (235, 306), (235, 307), (235, 308), (235, 309), (235, 310), (235, 311), (235, 312), (235, 313), (235, 314), (235, 315), (235, 316), (235, 317), (235, 318), (235, 319), (235, 320), (235, 321), (235, 322), (235, 323), (235, 324), (235, 325), (235, 326), (235, 327), (235, 328), (235, 329), (235, 330), (235, 331), (235, 332), (235, 333), (235, 334), (235, 335), (235, 336), (235, 337), (235, 338), (235, 339), (235, 340), (235, 341), (235, 342), (235, 343), (235, 344), (235, 345), (235, 346), (235, 347), (235, 348), (235, 349), (235, 350), (235, 351), (235, 352), (235, 353), (235, 354), (235, 355), (235, 356), (235, 357), (235, 358), (235, 359), (235, 360), (235, 361), (235, 362), (235, 363), (235, 364), (235, 365), (235, 366), (235, 367), (235, 369), (236, 265), (236, 267), (236, 268), (236, 269), (236, 270), (236, 271), (236, 272), (236, 273),
(236, 274), (236, 275), (236, 276), (236, 277), (236, 278), (236, 279), (236, 280), (236, 281), (236, 282), (236, 283), (236, 284), (236, 285), (236, 286), (236, 287), (236, 288), (236, 289), (236, 290), (236, 291), (236, 292), (236, 293), (236, 294), (236, 295), (236, 296), (236, 297), (236, 298), (236, 299), (236, 300), (236, 301), (236, 302), (236, 303), (236, 304), (236, 305), (236, 306), (236, 307), (236, 308), (236, 309), (236, 310), (236, 311), (236, 312), (236, 313), (236, 314), (236, 315), (236, 316), (236, 317), (236, 318), (236, 319), (236, 320), (236, 321), (236, 322), (236, 323), (236, 324), (236, 325), (236, 326), (236, 327), (236, 328), (236, 329), (236, 330), (236, 331), (236, 332), (236, 333), (236, 334), (236, 335), (236, 336), (236, 337), (236, 338), (236, 339), (236, 340), (236, 341), (236, 342), (236, 343), (236, 344), (236, 345),
(236, 346), (236, 347), (236, 348), (236, 349), (236, 350), (236, 351), (236, 352), (236, 353), (236, 354), (236, 355), (236, 356), (236, 357), (236, 358), (236, 359), (236, 360), (236, 361), (236, 362), (236, 363), (236, 364), (236, 365), (236, 366), (236, 367), (236, 369), (237, 264), (237, 266), (237, 267), (237, 268), (237, 269), (237, 270), (237, 271), (237, 272), (237, 273), (237, 274), (237, 275), (237, 276), (237, 277), (237, 278), (237, 279), (237, 280), (237, 281), (237, 282), (237, 283), (237, 284), (237, 285), (237, 286), (237, 287), (237, 288), (237, 289), (237, 290), (237, 291), (237, 292), (237, 293), (237, 294), (237, 295), (237, 296), (237, 297), (237, 298), (237, 299), (237, 300), (237, 301), (237, 302), (237, 303), (237, 304), (237, 305), (237, 306), (237, 307), (237, 308), (237, 309), (237, 310), (237, 311), (237, 312), (237, 313),
(237, 314), (237, 315), (237, 316), (237, 317), (237, 318), (237, 319), (237, 320), (237, 321), (237, 322), (237, 323), (237, 324), (237, 325), (237, 326), (237, 327), (237, 328), (237, 329), (237, 330), (237, 331), (237, 332), (237, 333), (237, 334), (237, 335), (237, 336), (237, 337), (237, 338), (237, 339), (237, 340), (237, 341), (237, 342), (237, 343), (237, 344), (237, 345), (237, 346), (237, 347), (237, 348), (237, 349), (237, 350), (237, 351), (237, 352), (237, 353), (237, 354), (237, 355), (237, 356), (237, 357), (237, 358), (237, 359), (237, 360), (237, 361), (237, 362), (237, 363), (237, 364), (237, 365), (237, 366), (237, 367), (237, 369), (238, 264), (238, 266), (238, 267), (238, 268), (238, 269), (238, 270), (238, 271), (238, 272), (238, 273), (238, 274), (238, 275), (238, 276), (238, 277), (238, 278), (238, 279), (238, 280), (238, 281),
(238, 282), (238, 283), (238, 284), (238, 285), (238, 286), (238, 287), (238, 288), (238, 289), (238, 290), (238, 291), (238, 292), (238, 293), (238, 294), (238, 295), (238, 296), (238, 297), (238, 298), (238, 299), (238, 300), (238, 301), (238, 302), (238, 303), (238, 304), (238, 305), (238, 306), (238, 307), (238, 308), (238, 309), (238, 310), (238, 311), (238, 312), (238, 313), (238, 314), (238, 315), (238, 316), (238, 317), (238, 318), (238, 319), (238, 320), (238, 321), (238, 322), (238, 323), (238, 324), (238, 325), (238, 326), (238, 327), (238, 328), (238, 329), (238, 330), (238, 331), (238, 332), (238, 333), (238, 334), (238, 335), (238, 336), (238, 337), (238, 338), (238, 339), (238, 340), (238, 341), (238, 342), (238, 343), (238, 344), (238, 345), (238, 346), (238, 347), (238, 348), (238, 349), (238, 350), (238, 351), (238, 352), (238, 353),
(238, 354), (238, 355), (238, 356), (238, 357), (238, 358), (238, 359), (238, 360), (238, 361), (238, 362), (238, 363), (238, 364), (238, 365), (238, 366), (238, 367), (238, 369), (239, 263), (239, 265), (239, 266), (239, 267), (239, 268), (239, 269), (239, 270), (239, 271), (239, 272), (239, 273), (239, 274), (239, 275), (239, 276), (239, 277), (239, 278), (239, 279), (239, 280), (239, 281), (239, 282), (239, 283), (239, 284), (239, 285), (239, 286), (239, 287), (239, 288), (239, 289), (239, 290), (239, 291), (239, 292), (239, 293), (239, 294), (239, 295), (239, 296), (239, 297), (239, 298), (239, 299), (239, 300), (239, 301), (239, 302), (239, 303), (239, 304), (239, 305), (239, 306), (239, 307), (239, 308), (239, 309), (239, 310), (239, 311), (239, 312), (239, 313), (239, 314), (239, 315), (239, 316), (239, 317), (239, 318), (239, 319), (239, 320),
(239, 321), (239, 322), (239, 323), (239, 324), (239, 325), (239, 326), (239, 327), (239, 328), (239, 329), (239, 330), (239, 331), (239, 332), (239, 333), (239, 334), (239, 335), (239, 336), (239, 337), (239, 338), (239, 339), (239, 340), (239, 341), (239, 342), (239, 343), (239, 344), (239, 345), (239, 346), (239, 347), (239, 348), (239, 349), (239, 350), (239, 351), (239, 352), (239, 353), (239, 354), (239, 355), (239, 356), (239, 357), (239, 358), (239, 359), (239, 360), (239, 361), (239, 362), (239, 363), (239, 364), (239, 365), (239, 366), (239, 367), (239, 369), (240, 263), (240, 265), (240, 266), (240, 267), (240, 268), (240, 269), (240, 270), (240, 271), (240, 272), (240, 273), (240, 274), (240, 275), (240, 276), (240, 277), (240, 278), (240, 279), (240, 280), (240, 281), (240, 282), (240, 283), (240, 284), (240, 285), (240, 286), (240, 287),
(240, 288), (240, 289), (240, 290), (240, 291), (240, 292), (240, 293), (240, 294), (240, 295), (240, 296), (240, 297), (240, 298), (240, 299), (240, 300), (240, 301), (240, 302), (240, 303), (240, 304), (240, 305), (240, 306), (240, 307), (240, 308), (240, 309), (240, 310), (240, 311), (240, 312), (240, 313), (240, 314), (240, 315), (240, 316), (240, 317), (240, 318), (240, 319), (240, 320), (240, 321), (240, 322), (240, 323), (240, 324), (240, 325), (240, 326), (240, 327), (240, 328), (240, 329), (240, 330), (240, 331), (240, 332), (240, 333), (240, 334), (240, 335), (240, 336), (240, 337), (240, 338), (240, 339), (240, 340), (240, 341), (240, 342), (240, 343), (240, 344), (240, 345), (240, 346), (240, 347), (240, 348), (240, 349), (240, 350), (240, 351), (240, 352), (240, 353), (240, 354), (240, 355), (240, 356), (240, 357), (240, 358), (240, 359),
(240, 360), (240, 361), (240, 362), (240, 363), (240, 364), (240, 365), (240, 366), (240, 367), (240, 369), (241, 262), (241, 264), (241, 265), (241, 266), (241, 267), (241, 268), (241, 269), (241, 270), (241, 271), (241, 272), (241, 273), (241, 274), (241, 275), (241, 276), (241, 277), (241, 278), (241, 279), (241, 280), (241, 281), (241, 282), (241, 283), (241, 284), (241, 285), (241, 286), (241, 287), (241, 288), (241, 289), (241, 290), (241, 291), (241, 292), (241, 293), (241, 294), (241, 295), (241, 296), (241, 297), (241, 298), (241, 299), (241, 300), (241, 301), (241, 302), (241, 303), (241, 304), (241, 305), (241, 306), (241, 307), (241, 308), (241, 309), (241, 310), (241, 311), (241, 312), (241, 313), (241, 314), (241, 315), (241, 316), (241, 317), (241, 318), (241, 319), (241, 320), (241, 321), (241, 322), (241, 323), (241, 324), (241, 325),
(241, 326), (241, 327), (241, 328), (241, 329), (241, 330), (241, 331), (241, 332), (241, 333), (241, 334), (241, 335), (241, 336), (241, 337), (241, 338), (241, 339), (241, 340), (241, 341), (241, 342), (241, 343), (241, 344), (241, 345), (241, 346), (241, 347), (241, 348), (241, 349), (241, 350), (241, 351), (241, 352), (241, 353), (241, 354), (241, 355), (241, 356), (241, 357), (241, 358), (241, 359), (241, 360), (241, 361), (241, 362), (241, 363), (241, 364), (241, 365), (241, 366), (241, 367), (241, 368), (241, 369), (241, 370), (242, 262), (242, 264), (242, 265), (242, 266), (242, 267), (242, 268), (242, 269), (242, 270), (242, 271), (242, 272), (242, 273), (242, 274), (242, 275), (242, 276), (242, 277), (242, 278), (242, 279), (242, 280), (242, 281), (242, 282), (242, 283), (242, 284), (242, 285), (242, 286), (242, 287), (242, 288), (242, 289),
(242, 290), (242, 291), (242, 292), (242, 293), (242, 294), (242, 295), (242, 296), (242, 297), (242, 298), (242, 299), (242, 300), (242, 301), (242, 302), (242, 303), (242, 304), (242, 305), (242, 306), (242, 307), (242, 308), (242, 309), (242, 310), (242, 311), (242, 312), (242, 313), (242, 314), (242, 315), (242, 316), (242, 317), (242, 318), (242, 319), (242, 320), (242, 321), (242, 322), (242, 323), (242, 324), (242, 325), (242, 326), (242, 327), (242, 328), (242, 329), (242, 330), (242, 331), (242, 332), (242, 333), (242, 334), (242, 335), (242, 336), (242, 337), (242, 338), (242, 339), (242, 340), (242, 341), (242, 342), (242, 343), (242, 344), (242, 345), (242, 346), (242, 347), (242, 348), (242, 349), (242, 350), (242, 351), (242, 352), (242, 353), (242, 354), (242, 355), (242, 356), (242, 357), (242, 358), (242, 359), (242, 360), (242, 361),
(242, 362), (242, 363), (242, 364), (242, 365), (242, 366), (242, 367), (242, 368), (242, 369), (242, 370), (243, 261), (243, 262), (243, 263), (243, 264), (243, 265), (243, 266), (243, 267), (243, 268), (243, 269), (243, 270), (243, 271), (243, 272), (243, 273), (243, 274), (243, 275), (243, 276), (243, 277), (243, 278), (243, 279), (243, 280), (243, 281), (243, 282), (243, 283), (243, 284), (243, 285), (243, 286), (243, 287), (243, 288), (243, 289), (243, 290), (243, 291), (243, 292), (243, 293), (243, 294), (243, 295), (243, 296), (243, 297), (243, 298), (243, 299), (243, 300), (243, 301), (243, 302), (243, 303), (243, 304), (243, 305), (243, 306), (243, 307), (243, 308), (243, 309), (243, 310), (243, 311), (243, 312), (243, 313), (243, 314), (243, 315), (243, 316), (243, 317), (243, 318), (243, 319), (243, 320), (243, 321), (243, 322), (243, 323),
(243, 324), (243, 325), (243, 326), (243, 327), (243, 328), (243, 329), (243, 330), (243, 331), (243, 332), (243, 333), (243, 334), (243, 335), (243, 336), (243, 337), (243, 338), (243, 339), (243, 340), (243, 341), (243, 342), (243, 343), (243, 344), (243, 345), (243, 346), (243, 347), (243, 348), (243, 349), (243, 350), (243, 351), (243, 352), (243, 353), (243, 354), (243, 355), (243, 356), (243, 357), (243, 358), (243, 359), (243, 360), (243, 361), (243, 362), (243, 363), (243, 364), (243, 365), (243, 366), (243, 367), (243, 368), (243, 370), (244, 261), (244, 263), (244, 264), (244, 265), (244, 266), (244, 267), (244, 268), (244, 269), (244, 270), (244, 271), (244, 272), (244, 273), (244, 274), (244, 275), (244, 276), (244, 277), (244, 278), (244, 279), (244, 280), (244, 281), (244, 282), (244, 283), (244, 284), (244, 285), (244, 286), (244, 287),
(244, 288), (244, 289), (244, 290), (244, 291), (244, 292), (244, 293), (244, 294), (244, 295), (244, 296), (244, 297), (244, 298), (244, 299), (244, 300), (244, 301), (244, 302), (244, 303), (244, 304), (244, 305), (244, 306), (244, 307), (244, 308), (244, 309), (244, 310), (244, 311), (244, 312), (244, 313), (244, 314), (244, 315), (244, 316), (244, 317), (244, 318), (244, 319), (244, 320), (244, 321), (244, 322), (244, 323), (244, 324), (244, 325), (244, 326), (244, 327), (244, 328), (244, 329), (244, 330), (244, 331), (244, 332), (244, 333), (244, 334), (244, 335), (244, 336), (244, 337), (244, 338), (244, 339), (244, 340), (244, 341), (244, 342), (244, 343), (244, 344), (244, 345), (244, 346), (244, 347), (244, 348), (244, 349), (244, 350), (244, 351), (244, 352), (244, 353), (244, 354), (244, 355), (244, 356), (244, 357), (244, 358), (244, 359),
(244, 360), (244, 361), (244, 362), (244, 363), (244, 364), (244, 365), (244, 366), (244, 367), (244, 368), (244, 370), (245, 261), (245, 263), (245, 264), (245, 265), (245, 266), (245, 267), (245, 268), (245, 269), (245, 270), (245, 271), (245, 272), (245, 273), (245, 274), (245, 275), (245, 276), (245, 277), (245, 278), (245, 279), (245, 280), (245, 281), (245, 282), (245, 283), (245, 284), (245, 285), (245, 286), (245, 287), (245, 288), (245, 289), (245, 290), (245, 291), (245, 292), (245, 293), (245, 294), (245, 295), (245, 296), (245, 297), (245, 298), (245, 299), (245, 300), (245, 301), (245, 302), (245, 303), (245, 304), (245, 305), (245, 306), (245, 307), (245, 308), (245, 309), (245, 310), (245, 311), (245, 312), (245, 313), (245, 314), (245, 315), (245, 316), (245, 317), (245, 318), (245, 319), (245, 320), (245, 321), (245, 322), (245, 323),
(245, 324), (245, 325), (245, 326), (245, 327), (245, 328), (245, 329), (245, 330), (245, 331), (245, 332), (245, 333), (245, 334), (245, 335), (245, 336), (245, 337), (245, 338), (245, 339), (245, 340), (245, 341), (245, 342), (245, 343), (245, 344), (245, 345), (245, 346), (245, 347), (245, 348), (245, 349), (245, 350), (245, 351), (245, 352), (245, 353), (245, 354), (245, 355), (245, 356), (245, 357), (245, 358), (245, 359), (245, 360), (245, 361), (245, 362), (245, 363), (245, 364), (245, 365), (245, 366), (245, 367), (245, 368), (245, 370), (246, 260), (246, 262), (246, 263), (246, 264), (246, 265), (246, 266), (246, 267), (246, 268), (246, 269), (246, 270), (246, 271), (246, 272), (246, 273), (246, 274), (246, 275), (246, 276), (246, 277), (246, 278), (246, 279), (246, 280), (246, 281), (246, 282), (246, 283), (246, 284), (246, 285), (246, 286),
(246, 287), (246, 288), (246, 289), (246, 290), (246, 291), (246, 292), (246, 293), (246, 294), (246, 295), (246, 296), (246, 297), (246, 298), (246, 299), (246, 300), (246, 301), (246, 302), (246, 303), (246, 304), (246, 305), (246, 306), (246, 307), (246, 308), (246, 309), (246, 310), (246, 311), (246, 312), (246, 313), (246, 314), (246, 315), (246, 316), (246, 317), (246, 318), (246, 319), (246, 320), (246, 321), (246, 322), (246, 323), (246, 324), (246, 325), (246, 326), (246, 327), (246, 328), (246, 329), (246, 330), (246, 331), (246, 332), (246, 333), (246, 334), (246, 335), (246, 336), (246, 337), (246, 338), (246, 339), (246, 340), (246, 341), (246, 342), (246, 343), (246, 344), (246, 345), (246, 346), (246, 347), (246, 348), (246, 349), (246, 350), (246, 351), (246, 352), (246, 353), (246, 354), (246, 355), (246, 356), (246, 357), (246, 358),
(246, 359), (246, 360), (246, 361), (246, 362), (246, 363), (246, 364), (246, 365), (246, 366), (246, 367), (246, 368), (246, 370), (247, 260), (247, 262), (247, 263), (247, 264), (247, 265), (247, 266), (247, 267), (247, 268), (247, 269), (247, 270), (247, 271), (247, 272), (247, 273), (247, 274), (247, 275), (247, 276), (247, 277), (247, 278), (247, 279), (247, 280), (247, 281), (247, 282), (247, 283), (247, 284), (247, 285), (247, 286), (247, 287), (247, 288), (247, 289), (247, 290), (247, 291), (247, 292), (247, 293), (247, 294), (247, 295), (247, 296), (247, 297), (247, 298), (247, 299), (247, 300), (247, 301), (247, 302), (247, 303), (247, 304), (247, 305), (247, 306), (247, 307), (247, 308), (247, 309), (247, 310), (247, 311), (247, 312), (247, 313), (247, 314), (247, 315), (247, 316), (247, 317), (247, 318), (247, 319), (247, 320), (247, 321),
(247, 322), (247, 323), (247, 324), (247, 325), (247, 326), (247, 327), (247, 328), (247, 329), (247, 330), (247, 331), (247, 332), (247, 333), (247, 334), (247, 335), (247, 336), (247, 337), (247, 338), (247, 339), (247, 340), (247, 341), (247, 342), (247, 343), (247, 344), (247, 345), (247, 346), (247, 347), (247, 348), (247, 349), (247, 350), (247, 351), (247, 352), (247, 353), (247, 354), (247, 355), (247, 356), (247, 357), (247, 358), (247, 359), (247, 360), (247, 361), (247, 362), (247, 363), (247, 364), (247, 365), (247, 366), (247, 367), (247, 368), (247, 370), (248, 260), (248, 262), (248, 263), (248, 264), (248, 265), (248, 266), (248, 267), (248, 268), (248, 269), (248, 270), (248, 271), (248, 272), (248, 273), (248, 274), (248, 275), (248, 276), (248, 277), (248, 278), (248, 279), (248, 280), (248, 281), (248, 282), (248, 283), (248, 284),
(248, 285), (248, 286), (248, 287), (248, 288), (248, 289), (248, 290), (248, 291), (248, 292), (248, 293), (248, 294), (248, 295), (248, 296), (248, 297), (248, 298), (248, 299), (248, 300), (248, 301), (248, 302), (248, 303), (248, 304), (248, 305), (248, 306), (248, 307), (248, 308), (248, 309), (248, 310), (248, 311), (248, 312), (248, 313), (248, 314), (248, 315), (248, 316), (248, 317), (248, 318), (248, 319), (248, 320), (248, 321), (248, 322), (248, 323), (248, 324), (248, 325), (248, 326), (248, 327), (248, 328), (248, 329), (248, 330), (248, 331), (248, 332), (248, 333), (248, 334), (248, 335), (248, 336), (248, 337), (248, 338), (248, 339), (248, 340), (248, 341), (248, 342), (248, 343), (248, 344), (248, 345), (248, 346), (248, 347), (248, 348), (248, 349), (248, 350), (248, 351), (248, 352), (248, 353), (248, 354), (248, 355), (248, 356),
(248, 357), (248, 358), (248, 359), (248, 360), (248, 361), (248, 362), (248, 363), (248, 364), (248, 365), (248, 366), (248, 367), (248, 368), (248, 370), (249, 260), (249, 262), (249, 263), (249, 264), (249, 265), (249, 266), (249, 267), (249, 268), (249, 269), (249, 270), (249, 271), (249, 272), (249, 273), (249, 274), (249, 275), (249, 276), (249, 277), (249, 278), (249, 279), (249, 280), (249, 281), (249, 282), (249, 283), (249, 284), (249, 285), (249, 286), (249, 287), (249, 288), (249, 289), (249, 290), (249, 291), (249, 292), (249, 293), (249, 294), (249, 295), (249, 296), (249, 297), (249, 298), (249, 299), (249, 300), (249, 301), (249, 302), (249, 303), (249, 304), (249, 305), (249, 306), (249, 307), (249, 308), (249, 309), (249, 310), (249, 311), (249, 312), (249, 313), (249, 314), (249, 315), (249, 316), (249, 317), (249, 318), (249, 319),
(249, 320), (249, 321), (249, 322), (249, 323), (249, 324), (249, 325), (249, 326), (249, 327), (249, 328), (249, 329), (249, 330), (249, 331), (249, 332), (249, 333), (249, 334), (249, 335), (249, 336), (249, 337), (249, 338), (249, 339), (249, 340), (249, 341), (249, 342), (249, 343), (249, 344), (249, 345), (249, 346), (249, 347), (249, 348), (249, 349), (249, 350), (249, 351), (249, 352), (249, 353), (249, 354), (249, 355), (249, 356), (249, 357), (249, 358), (249, 359), (249, 360), (249, 361), (249, 362), (249, 363), (249, 364), (249, 365), (249, 366), (249, 367), (249, 368), (249, 370), (250, 260), (250, 262), (250, 263), (250, 264), (250, 265), (250, 266), (250, 267), (250, 268), (250, 269), (250, 270), (250, 271), (250, 272), (250, 273), (250, 274), (250, 275), (250, 276), (250, 277), (250, 278), (250, 279), (250, 280), (250, 281), (250, 282),
(250, 283), (250, 284), (250, 285), (250, 286), (250, 287), (250, 288), (250, 289), (250, 290), (250, 291), (250, 292), (250, 293), (250, 294), (250, 295), (250, 296), (250, 297), (250, 298), (250, 299), (250, 300), (250, 301), (250, 302), (250, 303), (250, 304), (250, 305), (250, 306), (250, 307), (250, 308), (250, 309), (250, 310), (250, 311), (250, 312), (250, 313), (250, 314), (250, 315), (250, 316), (250, 317), (250, 318), (250, 319), (250, 320), (250, 321), (250, 322), (250, 323), (250, 324), (250, 325), (250, 326), (250, 327), (250, 328), (250, 329), (250, 330), (250, 331), (250, 332), (250, 333), (250, 334), (250, 335), (250, 336), (250, 337), (250, 338), (250, 339), (250, 340), (250, 341), (250, 342), (250, 343), (250, 344), (250, 345), (250, 346), (250, 347), (250, 348), (250, 349), (250, 350), (250, 351), (250, 352), (250, 353), (250, 354),
(250, 355), (250, 356), (250, 357), (250, 358), (250, 359), (250, 360), (250, 361), (250, 362), (250, 363), (250, 364), (250, 365), (250, 366), (250, 367), (250, 368), (250, 370), (251, 260), (251, 262), (251, 263), (251, 264), (251, 265), (251, 266), (251, 267), (251, 268), (251, 269), (251, 270), (251, 271), (251, 272), (251, 273), (251, 274), (251, 275), (251, 276), (251, 277), (251, 278), (251, 279), (251, 280), (251, 281), (251, 282), (251, 283), (251, 284), (251, 285), (251, 286), (251, 287), (251, 288), (251, 289), (251, 290), (251, 291), (251, 292), (251, 293), (251, 294), (251, 295), (251, 296), (251, 297), (251, 298), (251, 299), (251, 300), (251, 301), (251, 302), (251, 303), (251, 304), (251, 305), (251, 306), (251, 307), (251, 308), (251, 309), (251, 310), (251, 311), (251, 312), (251, 313), (251, 314), (251, 315), (251, 316), (251, 317),
(251, 318), (251, 319), (251, 320), (251, 321), (251, 322), (251, 323), (251, 324), (251, 325), (251, 326), (251, 327), (251, 328), (251, 329), (251, 330), (251, 331), (251, 332), (251, 333), (251, 334), (251, 335), (251, 336), (251, 337), (251, 338), (251, 339), (251, 340), (251, 341), (251, 342), (251, 343), (251, 344), (251, 345), (251, 346), (251, 347), (251, 348), (251, 349), (251, 350), (251, 351), (251, 352), (251, 353), (251, 354), (251, 355), (251, 356), (251, 357), (251, 358), (251, 359), (251, 360), (251, 361), (251, 362), (251, 363), (251, 364), (251, 365), (251, 366), (251, 367), (251, 368), (251, 370), (252, 260), (252, 262), (252, 263), (252, 264), (252, 265), (252, 266), (252, 267), (252, 268), (252, 269), (252, 270), (252, 271), (252, 272), (252, 273), (252, 274), (252, 275), (252, 276), (252, 277), (252, 278), (252, 279), (252, 280),
(252, 281), (252, 282), (252, 283), (252, 284), (252, 285), (252, 286), (252, 287), (252, 288), (252, 289), (252, 290), (252, 291), (252, 292), (252, 293), (252, 294), (252, 295), (252, 296), (252, 297), (252, 298), (252, 299), (252, 300), (252, 301), (252, 302), (252, 303), (252, 304), (252, 305), (252, 306), (252, 307), (252, 308), (252, 309), (252, 310), (252, 311), (252, 312), (252, 313), (252, 314), (252, 315), (252, 316), (252, 317), (252, 318), (252, 319), (252, 320), (252, 321), (252, 322), (252, 323), (252, 324), (252, 325), (252, 326), (252, 327), (252, 328), (252, 329), (252, 330), (252, 331), (252, 332), (252, 333), (252, 334), (252, 335), (252, 336), (252, 337), (252, 338), (252, 339), (252, 340), (252, 341), (252, 342), (252, 343), (252, 344), (252, 345), (252, 346), (252, 347), (252, 348), (252, 349), (252, 350), (252, 351), (252, 352),
(252, 353), (252, 354), (252, 355), (252, 356), (252, 357), (252, 358), (252, 359), (252, 360), (252, 361), (252, 362), (252, 363), (252, 364), (252, 365), (252, 366), (252, 367), (252, 368), (252, 370), (253, 260), (253, 262), (253, 263), (253, 264), (253, 265), (253, 266), (253, 267), (253, 268), (253, 269), (253, 270), (253, 271), (253, 272), (253, 273), (253, 274), (253, 275), (253, 276), (253, 277), (253, 278), (253, 279), (253, 280), (253, 281), (253, 282), (253, 283), (253, 284), (253, 285), (253, 286), (253, 287), (253, 288), (253, 289), (253, 290), (253, 291), (253, 292), (253, 293), (253, 294), (253, 295), (253, 296), (253, 297), (253, 298), (253, 299), (253, 300), (253, 301), (253, 302), (253, 303), (253, 304), (253, 305), (253, 306), (253, 307), (253, 308), (253, 309), (253, 310), (253, 311), (253, 312), (253, 313), (253, 314), (253, 315),
(253, 316), (253, 317), (253, 318), (253, 319), (253, 320), (253, 321), (253, 322), (253, 323), (253, 324), (253, 325), (253, 326), (253, 327), (253, 328), (253, 329), (253, 330), (253, 331), (253, 332), (253, 333), (253, 334), (253, 335), (253, 336), (253, 337), (253, 338), (253, 339), (253, 340), (253, 341), (253, 342), (253, 343), (253, 344), (253, 345), (253, 346), (253, 347), (253, 348), (253, 349), (253, 350), (253, 351), (253, 352), (253, 353), (253, 354), (253, 355), (253, 356), (253, 357), (253, 358), (253, 359), (253, 360), (253, 361), (253, 362), (253, 363), (253, 364), (253, 365), (253, 366), (253, 367), (253, 368), (253, 370), (254, 260), (254, 262), (254, 263), (254, 264), (254, 265), (254, 266), (254, 267), (254, 268), (254, 269), (254, 270), (254, 271), (254, 272), (254, 273), (254, 274), (254, 275), (254, 276), (254, 277), (254, 278),
(254, 279), (254, 280), (254, 281), (254, 282), (254, 283), (254, 284), (254, 285), (254, 286), (254, 287), (254, 288), (254, 289), (254, 290), (254, 291), (254, 292), (254, 293), (254, 294), (254, 295), (254, 296), (254, 297), (254, 298), (254, 299), (254, 300), (254, 301), (254, 302), (254, 303), (254, 304), (254, 305), (254, 306), (254, 307), (254, 308), (254, 309), (254, 310), (254, 311), (254, 312), (254, 313), (254, 314), (254, 315), (254, 316), (254, 317), (254, 318), (254, 319), (254, 320), (254, 321), (254, 322), (254, 323), (254, 324), (254, 325), (254, 326), (254, 327), (254, 328), (254, 329), (254, 330), (254, 331), (254, 332), (254, 333), (254, 334), (254, 335), (254, 336), (254, 337), (254, 338), (254, 339), (254, 340), (254, 341), (254, 342), (254, 343), (254, 344), (254, 345), (254, 346), (254, 347), (254, 348), (254, 349), (254, 350),
(254, 351), (254, 352), (254, 353), (254, 354), (254, 355), (254, 356), (254, 357), (254, 358), (254, 359), (254, 360), (254, 361), (254, 362), (254, 363), (254, 364), (254, 365), (254, 366), (254, 367), (254, 368), (254, 370), (255, 260), (255, 262), (255, 263), (255, 264), (255, 265), (255, 266), (255, 267), (255, 268), (255, 269), (255, 270), (255, 271), (255, 272), (255, 273), (255, 274), (255, 275), (255, 276), (255, 277), (255, 278), (255, 279), (255, 280), (255, 281), (255, 282), (255, 283), (255, 284), (255, 285), (255, 286), (255, 287), (255, 288), (255, 289), (255, 290), (255, 291), (255, 292), (255, 293), (255, 294), (255, 295), (255, 296), (255, 297), (255, 298), (255, 299), (255, 300), (255, 301), (255, 302), (255, 303), (255, 304), (255, 305), (255, 306), (255, 307), (255, 308), (255, 309), (255, 310), (255, 311), (255, 312), (255, 313),
(255, 314), (255, 315), (255, 316), (255, 317), (255, 318), (255, 319), (255, 320), (255, 321), (255, 322), (255, 323), (255, 324), (255, 325), (255, 326), (255, 327), (255, 328), (255, 329), (255, 330), (255, 331), (255, 332), (255, 333), (255, 334), (255, 335), (255, 336), (255, 337), (255, 338), (255, 339), (255, 340), (255, 341), (255, 342), (255, 343), (255, 344), (255, 345), (255, 346), (255, 347), (255, 348), (255, 349), (255, 350), (255, 351), (255, 352), (255, 353), (255, 354), (255, 355), (255, 356), (255, 357), (255, 358), (255, 359), (255, 360), (255, 361), (255, 362), (255, 363), (255, 364), (255, 365), (255, 366), (255, 367), (255, 368), (255, 370), (256, 260), (256, 262), (256, 263), (256, 264), (256, 265), (256, 266), (256, 267), (256, 268), (256, 269), (256, 270), (256, 271), (256, 272), (256, 273), (256, 274), (256, 275), (256, 276),
(256, 277), (256, 278), (256, 279), (256, 280), (256, 281), (256, 282), (256, 283), (256, 284), (256, 285), (256, 286), (256, 287), (256, 288), (256, 289), (256, 290), (256, 291), (256, 292), (256, 293), (256, 294), (256, 295), (256, 296), (256, 297), (256, 298), (256, 299), (256, 300), (256, 301), (256, 302), (256, 303), (256, 304), (256, 305), (256, 306), (256, 307), (256, 308), (256, 309), (256, 310), (256, 311), (256, 312), (256, 313), (256, 314), (256, 315), (256, 316), (256, 317), (256, 318), (256, 319), (256, 320), (256, 321), (256, 322), (256, 323), (256, 324), (256, 325), (256, 326), (256, 327), (256, 328), (256, 329), (256, 330), (256, 331), (256, 332), (256, 333), (256, 334), (256, 335), (256, 336), (256, 337), (256, 338), (256, 339), (256, 340), (256, 341), (256, 342), (256, 343), (256, 344), (256, 345), (256, 346), (256, 347), (256, 348),
(256, 349), (256, 350), (256, 351), (256, 352), (256, 353), (256, 354), (256, 355), (256, 356), (256, 357), (256, 358), (256, 359), (256, 360), (256, 361), (256, 362), (256, 363), (256, 364), (256, 365), (256, 366), (256, 367), (256, 368), (256, 370), (257, 260), (257, 262), (257, 263), (257, 264), (257, 265), (257, 266), (257, 267), (257, 268), (257, 269), (257, 270), (257, 271), (257, 272), (257, 273), (257, 274), (257, 275), (257, 276), (257, 277), (257, 278), (257, 279), (257, 280), (257, 281), (257, 282), (257, 283), (257, 284), (257, 285), (257, 286), (257, 287), (257, 288), (257, 289), (257, 290), (257, 291), (257, 292), (257, 293), (257, 294), (257, 295), (257, 296), (257, 297), (257, 298), (257, 299), (257, 300), (257, 301), (257, 302), (257, 303), (257, 304), (257, 305), (257, 306), (257, 307), (257, 308), (257, 309), (257, 310), (257, 311),
(257, 312), (257, 313), (257, 314), (257, 315), (257, 316), (257, 317), (257, 318), (257, 319), (257, 320), (257, 321), (257, 322), (257, 323), (257, 324), (257, 325), (257, 326), (257, 327), (257, 328), (257, 329), (257, 330), (257, 331), (257, 332), (257, 333), (257, 334), (257, 335), (257, 336), (257, 337), (257, 338), (257, 339), (257, 340), (257, 341), (257, 342), (257, 343), (257, 344), (257, 345), (257, 346), (257, 347), (257, 348), (257, 349), (257, 350), (257, 351), (257, 352), (257, 353), (257, 354), (257, 355), (257, 356), (257, 357), (257, 358), (257, 359), (257, 360), (257, 361), (257, 362), (257, 363), (257, 364), (257, 365), (257, 366), (257, 367), (257, 368), (257, 370), (258, 260), (258, 262), (258, 263), (258, 264), (258, 265), (258, 266), (258, 267), (258, 268), (258, 269), (258, 270), (258, 271), (258, 272), (258, 273), (258, 274),
(258, 275), (258, 276), (258, 277), (258, 278), (258, 279), (258, 280), (258, 281), (258, 282), (258, 283), (258, 284), (258, 285), (258, 286), (258, 287), (258, 288), (258, 289), (258, 290), (258, 291), (258, 292), (258, 293), (258, 294), (258, 295), (258, 296), (258, 297), (258, 298), (258, 299), (258, 300), (258, 301), (258, 302), (258, 303), (258, 304), (258, 305), (258, 306), (258, 307), (258, 308), (258, 309), (258, 310), (258, 311), (258, 312), (258, 313), (258, 314), (258, 315), (258, 316), (258, 317), (258, 318), (258, 319), (258, 320), (258, 321), (258, 322), (258, 323), (258, 324), (258, 325), (258, 326), (258, 327), (258, 328), (258, 329), (258, 330), (258, 331), (258, 332), (258, 333), (258, 334), (258, 335), (258, 336), (258, 337), (258, 338), (258, 339), (258, 340), (258, 341), (258, 342), (258, 343), (258, 344), (258, 345), (258, 346),
(258, 347), (258, 348), (258, 349), (258, 350), (258, 351), (258, 352), (258, 353), (258, 354), (258, 355), (258, 356), (258, 357), (258, 358), (258, 359), (258, 360), (258, 361), (258, 362), (258, 363), (258, 364), (258, 365), (258, 366), (258, 367), (258, 368), (258, 370), (259, 260), (259, 262), (259, 263), (259, 264), (259, 265), (259, 266), (259, 267), (259, 268), (259, 269), (259, 270), (259, 271), (259, 272), (259, 273), (259, 274), (259, 275), (259, 276), (259, 277), (259, 278), (259, 279), (259, 280), (259, 281), (259, 282), (259, 283), (259, 284), (259, 285), (259, 286), (259, 287), (259, 288), (259, 289), (259, 290), (259, 291), (259, 292), (259, 293), (259, 294), (259, 295), (259, 296), (259, 297), (259, 298), (259, 299), (259, 300), (259, 301), (259, 302), (259, 303), (259, 304), (259, 305), (259, 306), (259, 307), (259, 308), (259, 309),
(259, 310), (259, 311), (259, 312), (259, 313), (259, 314), (259, 315), (259, 316), (259, 317), (259, 318), (259, 319), (259, 320), (259, 321), (259, 322), (259, 323), (259, 324), (259, 325), (259, 326), (259, 327), (259, 328), (259, 329), (259, 330), (259, 331), (259, 332), (259, 333), (259, 334), (259, 335), (259, 336), (259, 337), (259, 338), (259, 339), (259, 340), (259, 341), (259, 342), (259, 343), (259, 344), (259, 345), (259, 346), (259, 347), (259, 348), (259, 349), (259, 350), (259, 351), (259, 352), (259, 353), (259, 354), (259, 355), (259, 356), (259, 357), (259, 358), (259, 359), (259, 360), (259, 361), (259, 362), (259, 363), (259, 364), (259, 365), (259, 366), (259, 367), (259, 368), (259, 370), (260, 259), (260, 261), (260, 262), (260, 263), (260, 264), (260, 265), (260, 266), (260, 267), (260, 268), (260, 269), (260, 270), (260, 271),
(260, 272), (260, 273), (260, 274), (260, 275), (260, 276), (260, 277), (260, 278), (260, 279), (260, 280), (260, 281), (260, 282), (260, 283), (260, 284), (260, 285), (260, 286), (260, 287), (260, 288), (260, 289), (260, 290), (260, 291), (260, 292), (260, 293), (260, 294), (260, 295), (260, 296), (260, 297), (260, 298), (260, 299), (260, 300), (260, 301), (260, 302), (260, 303), (260, 304), (260, 305), (260, 306), (260, 307), (260, 308), (260, 309), (260, 310), (260, 311), (260, 312), (260, 313), (260, 314), (260, 315), (260, 316), (260, 317), (260, 318), (260, 319), (260, 320), (260, 321), (260, 322), (260, 323), (260, 324), (260, 325), (260, 326), (260, 327), (260, 328), (260, 329), (260, 330), (260, 331), (260, 332), (260, 333), (260, 334), (260, 335), (260, 336), (260, 337), (260, 338), (260, 339), (260, 340), (260, 341), (260, 342), (260, 343),
(260, 344), (260, 345), (260, 346), (260, 347), (260, 348), (260, 349), (260, 350), (260, 351), (260, 352), (260, 353), (260, 354), (260, 355), (260, 356), (260, 357), (260, 358), (260, 359), (260, 360), (260, 361), (260, 362), (260, 363), (260, 364), (260, 365), (260, 366), (260, 367), (260, 368), (260, 370), (261, 259), (261, 261), (261, 262), (261, 263), (261, 264), (261, 265), (261, 266), (261, 267), (261, 268), (261, 269), (261, 270), (261, 271), (261, 272), (261, 273), (261, 274), (261, 275), (261, 276), (261, 277), (261, 278), (261, 279), (261, 280), (261, 281), (261, 282), (261, 283), (261, 284), (261, 285), (261, 286), (261, 287), (261, 288), (261, 289), (261, 290), (261, 291), (261, 292), (261, 293), (261, 294), (261, 295), (261, 296), (261, 297), (261, 298), (261, 299), (261, 300), (261, 301), (261, 302), (261, 303), (261, 304), (261, 305),
(261, 306), (261, 307), (261, 308), (261, 309), (261, 310), (261, 311), (261, 312), (261, 313), (261, 314), (261, 315), (261, 316), (261, 317), (261, 318), (261, 319), (261, 320), (261, 321), (261, 322), (261, 323), (261, 324), (261, 325), (261, 326), (261, 327), (261, 328), (261, 329), (261, 330), (261, 331), (261, 332), (261, 333), (261, 334), (261, 335), (261, 336), (261, 337), (261, 338), (261, 339), (261, 340), (261, 341), (261, 342), (261, 343), (261, 344), (261, 345), (261, 346), (261, 347), (261, 348), (261, 349), (261, 350), (261, 351), (261, 352), (261, 353), (261, 354), (261, 355), (261, 356), (261, 357), (261, 358), (261, 359), (261, 360), (261, 361), (261, 362), (261, 363), (261, 364), (261, 365), (261, 366), (261, 367), (261, 368), (261, 369), (261, 371), (262, 259), (262, 261), (262, 262), (262, 263), (262, 264), (262, 265), (262, 266),
(262, 267), (262, 268), (262, 269), (262, 270), (262, 271), (262, 272), (262, 273), (262, 274), (262, 275), (262, 276), (262, 277), (262, 278), (262, 279), (262, 280), (262, 281), (262, 282), (262, 283), (262, 284), (262, 285), (262, 286), (262, 287), (262, 288), (262, 289), (262, 290), (262, 291), (262, 292), (262, 293), (262, 294), (262, 295), (262, 296), (262, 297), (262, 298), (262, 299), (262, 300), (262, 301), (262, 302), (262, 303), (262, 304), (262, 305), (262, 306), (262, 307), (262, 308), (262, 309), (262, 310), (262, 311), (262, 312), (262, 313), (262, 314), (262, 315), (262, 316), (262, 317), (262, 318), (262, 319), (262, 320), (262, 321), (262, 322), (262, 323), (262, 324), (262, 325), (262, 326), (262, 327), (262, 328), (262, 329), (262, 330), (262, 331), (262, 332), (262, 333), (262, 334), (262, 335), (262, 336), (262, 337), (262, 338),
(262, 339), (262, 340), (262, 341), (262, 342), (262, 343), (262, 344), (262, 345), (262, 346), (262, 347), (262, 348), (262, 349), (262, 350), (262, 351), (262, 352), (262, 353), (262, 354), (262, 355), (262, 356), (262, 357), (262, 358), (262, 359), (262, 360), (262, 361), (262, 362), (262, 363), (262, 364), (262, 365), (262, 366), (262, 367), (262, 368), (262, 369), (262, 371), (263, 260), (263, 262), (263, 263), (263, 264), (263, 265), (263, 266), (263, 267), (263, 268), (263, 269), (263, 270), (263, 271), (263, 272), (263, 273), (263, 274), (263, 275), (263, 276), (263, 277), (263, 278), (263, 279), (263, 280), (263, 281), (263, 282), (263, 283), (263, 284), (263, 285), (263, 286), (263, 287), (263, 288), (263, 289), (263, 290), (263, 291), (263, 292), (263, 293), (263, 294), (263, 295), (263, 296), (263, 297), (263, 298), (263, 299), (263, 300),
(263, 301), (263, 302), (263, 303), (263, 304), (263, 305), (263, 306), (263, 307), (263, 308), (263, 309), (263, 310), (263, 311), (263, 312), (263, 313), (263, 314), (263, 315), (263, 316), (263, 317), (263, 318), (263, 319), (263, 320), (263, 321), (263, 322), (263, 323), (263, 324), (263, 325), (263, 326), (263, 327), (263, 328), (263, 329), (263, 330), (263, 331), (263, 332), (263, 333), (263, 334), (263, 335), (263, 336), (263, 337), (263, 338), (263, 339), (263, 340), (263, 341), (263, 342), (263, 343), (263, 344), (263, 345), (263, 346), (263, 347), (263, 348), (263, 349), (263, 350), (263, 351), (263, 352), (263, 353), (263, 354), (263, 355), (263, 356), (263, 357), (263, 358), (263, 359), (263, 360), (263, 361), (263, 362), (263, 363), (263, 364), (263, 365), (263, 366), (263, 367), (263, 368), (263, 369), (263, 371), (264, 260), (264, 262),
(264, 263), (264, 264), (264, 265), (264, 266), (264, 267), (264, 268), (264, 269), (264, 270), (264, 271), (264, 272), (264, 273), (264, 274), (264, 275), (264, 276), (264, 277), (264, 278), (264, 279), (264, 280), (264, 281), (264, 282), (264, 283), (264, 284), (264, 285), (264, 286), (264, 287), (264, 288), (264, 289), (264, 290), (264, 291), (264, 292), (264, 293), (264, 294), (264, 295), (264, 296), (264, 297), (264, 298), (264, 299), (264, 300), (264, 301), (264, 302), (264, 303), (264, 304), (264, 305), (264, 306), (264, 307), (264, 308), (264, 309), (264, 310), (264, 311), (264, 312), (264, 313), (264, 314), (264, 315), (264, 316), (264, 317), (264, 318), (264, 319), (264, 320), (264, 321), (264, 322), (264, 323), (264, 324), (264, 325), (264, 326), (264, 327), (264, 328), (264, 329), (264, 330), (264, 331), (264, 332), (264, 333), (264, 334),
(264, 335), (264, 336), (264, 337), (264, 338), (264, 339), (264, 340), (264, 341), (264, 342), (264, 343), (264, 344), (264, 345), (264, 346), (264, 347), (264, 348), (264, 349), (264, 350), (264, 351), (264, 352), (264, 353), (264, 354), (264, 355), (264, 356), (264, 357), (264, 358), (264, 359), (264, 360), (264, 361), (264, 362), (264, 363), (264, 364), (264, 365), (264, 366), (264, 367), (264, 368), (264, 369), (264, 371), (265, 258), (265, 260), (265, 261), (265, 262), (265, 263), (265, 264), (265, 265), (265, 266), (265, 267), (265, 268), (265, 269), (265, 270), (265, 271), (265, 272), (265, 273), (265, 274), (265, 275), (265, 276), (265, 277), (265, 278), (265, 279), (265, 280), (265, 281), (265, 282), (265, 283), (265, 284), (265, 285), (265, 286), (265, 287), (265, 288), (265, 289), (265, 290), (265, 291), (265, 292), (265, 293), (265, 294),
(265, 295), (265, 296), (265, 297), (265, 298), (265, 299), (265, 300), (265, 301), (265, 302), (265, 303), (265, 304), (265, 305), (265, 306), (265, 307), (265, 308), (265, 309), (265, 310), (265, 311), (265, 312), (265, 313), (265, 314), (265, 315), (265, 316), (265, 317), (265, 318), (265, 319), (265, 320), (265, 321), (265, 322), (265, 323), (265, 324), (265, 325), (265, 326), (265, 327), (265, 328), (265, 329), (265, 330), (265, 331), (265, 332), (265, 333), (265, 334), (265, 335), (265, 336), (265, 337), (265, 338), (265, 339), (265, 340), (265, 341), (265, 342), (265, 343), (265, 344), (265, 345), (265, 346), (265, 347), (265, 348), (265, 349), (265, 350), (265, 351), (265, 352), (265, 353), (265, 354), (265, 355), (265, 356), (265, 357), (265, 358), (265, 359), (265, 360), (265, 361), (265, 362), (265, 363), (265, 364), (265, 365), (265, 366),
(265, 367), (265, 368), (265, 369), (265, 370), (265, 372), (266, 259), (266, 261), (266, 262), (266, 263), (266, 264), (266, 265), (266, 266), (266, 267), (266, 268), (266, 269), (266, 270), (266, 271), (266, 272), (266, 273), (266, 274), (266, 275), (266, 276), (266, 277), (266, 278), (266, 279), (266, 280), (266, 281), (266, 282), (266, 283), (266, 284), (266, 285), (266, 286), (266, 287), (266, 288), (266, 289), (266, 290), (266, 291), (266, 292), (266, 293), (266, 294), (266, 295), (266, 296), (266, 297), (266, 298), (266, 299), (266, 300), (266, 301), (266, 302), (266, 303), (266, 304), (266, 305), (266, 306), (266, 307), (266, 308), (266, 309), (266, 310), (266, 311), (266, 312), (266, 313), (266, 314), (266, 315), (266, 316), (266, 317), (266, 318), (266, 319), (266, 320), (266, 321), (266, 322), (266, 323), (266, 324), (266, 325), (266, 326),
(266, 327), (266, 328), (266, 329), (266, 330), (266, 331), (266, 332), (266, 333), (266, 334), (266, 335), (266, 336), (266, 337), (266, 338), (266, 339), (266, 340), (266, 341), (266, 342), (266, 343), (266, 344), (266, 345), (266, 346), (266, 347), (266, 348), (266, 349), (266, 350), (266, 351), (266, 352), (266, 353), (266, 354), (266, 355), (266, 356), (266, 357), (266, 358), (266, 359), (266, 360), (266, 361), (266, 362), (266, 363), (266, 364), (266, 365), (266, 366), (266, 367), (266, 368), (266, 369), (266, 370), (266, 372), (267, 260), (267, 262), (267, 263), (267, 264), (267, 265), (267, 266), (267, 267), (267, 268), (267, 269), (267, 270), (267, 271), (267, 272), (267, 273), (267, 274), (267, 275), (267, 276), (267, 277), (267, 278), (267, 279), (267, 280), (267, 281), (267, 282), (267, 283), (267, 284), (267, 285), (267, 286), (267, 287),
(267, 288), (267, 289), (267, 290), (267, 291), (267, 292), (267, 293), (267, 294), (267, 295), (267, 296), (267, 297), (267, 298), (267, 299), (267, 300), (267, 301), (267, 302), (267, 303), (267, 304), (267, 305), (267, 306), (267, 307), (267, 308), (267, 309), (267, 310), (267, 311), (267, 312), (267, 313), (267, 314), (267, 315), (267, 316), (267, 317), (267, 318), (267, 319), (267, 320), (267, 321), (267, 322), (267, 323), (267, 324), (267, 325), (267, 326), (267, 327), (267, 328), (267, 329), (267, 330), (267, 331), (267, 332), (267, 333), (267, 334), (267, 335), (267, 336), (267, 337), (267, 338), (267, 339), (267, 340), (267, 341), (267, 342), (267, 343), (267, 344), (267, 345), (267, 346), (267, 347), (267, 348), (267, 349), (267, 350), (267, 351), (267, 352), (267, 353), (267, 354), (267, 355), (267, 356), (267, 357), (267, 358), (267, 359),
(267, 360), (267, 361), (267, 362), (267, 363), (267, 364), (267, 365), (267, 366), (267, 367), (267, 368), (267, 369), (267, 370), (267, 372), (268, 260), (268, 262), (268, 263), (268, 264), (268, 265), (268, 266), (268, 267), (268, 268), (268, 269), (268, 270), (268, 271), (268, 272), (268, 273), (268, 274), (268, 275), (268, 276), (268, 277), (268, 278), (268, 279), (268, 280), (268, 281), (268, 282), (268, 283), (268, 284), (268, 285), (268, 286), (268, 287), (268, 288), (268, 289), (268, 290), (268, 291), (268, 292), (268, 293), (268, 294), (268, 295), (268, 296), (268, 297), (268, 298), (268, 299), (268, 300), (268, 301), (268, 302), (268, 303), (268, 304), (268, 305), (268, 306), (268, 307), (268, 308), (268, 309), (268, 310), (268, 311), (268, 312), (268, 313), (268, 314), (268, 315), (268, 316), (268, 317), (268, 318), (268, 319), (268, 320),
(268, 321), (268, 322), (268, 323), (268, 324), (268, 325), (268, 326), (268, 327), (268, 328), (268, 329), (268, 330), (268, 331), (268, 332), (268, 333), (268, 334), (268, 335), (268, 336), (268, 337), (268, 338), (268, 339), (268, 340), (268, 341), (268, 342), (268, 343), (268, 344), (268, 345), (268, 346), (268, 347), (268, 348), (268, 349), (268, 350), (268, 351), (268, 352), (268, 353), (268, 354), (268, 355), (268, 356), (268, 357), (268, 358), (268, 359), (268, 360), (268, 361), (268, 362), (268, 363), (268, 364), (268, 365), (268, 366), (268, 367), (268, 368), (268, 369), (268, 370), (268, 371), (268, 373), (269, 261), (269, 263), (269, 264), (269, 265), (269, 266), (269, 267), (269, 268), (269, 269), (269, 270), (269, 271), (269, 272), (269, 273), (269, 274), (269, 275), (269, 276), (269, 277), (269, 278), (269, 279), (269, 280), (269, 281),
(269, 282), (269, 283), (269, 284), (269, 285), (269, 286), (269, 287), (269, 288), (269, 289), (269, 290), (269, 291), (269, 292), (269, 293), (269, 294), (269, 295), (269, 296), (269, 297), (269, 298), (269, 299), (269, 300), (269, 301), (269, 302), (269, 303), (269, 304), (269, 305), (269, 306), (269, 307), (269, 308), (269, 309), (269, 310), (269, 311), (269, 312), (269, 313), (269, 314), (269, 315), (269, 316), (269, 317), (269, 318), (269, 319), (269, 320), (269, 321), (269, 322), (269, 323), (269, 324), (269, 325), (269, 326), (269, 327), (269, 328), (269, 329), (269, 330), (269, 331), (269, 332), (269, 333), (269, 334), (269, 335), (269, 336), (269, 337), (269, 338), (269, 339), (269, 340), (269, 341), (269, 342), (269, 343), (269, 344), (269, 345), (269, 346), (269, 347), (269, 348), (269, 349), (269, 350), (269, 351), (269, 352), (269, 353),
(269, 354), (269, 355), (269, 356), (269, 357), (269, 358), (269, 359), (269, 360), (269, 361), (269, 362), (269, 363), (269, 364), (269, 365), (269, 366), (269, 367), (269, 368), (269, 369), (269, 370), (269, 371), (269, 373), (270, 261), (270, 263), (270, 264), (270, 265), (270, 266), (270, 267), (270, 268), (270, 269), (270, 270), (270, 271), (270, 272), (270, 273), (270, 274), (270, 275), (270, 276), (270, 277), (270, 278), (270, 279), (270, 280), (270, 281), (270, 282), (270, 283), (270, 284), (270, 285), (270, 286), (270, 287), (270, 288), (270, 289), (270, 290), (270, 291), (270, 292), (270, 293), (270, 294), (270, 295), (270, 296), (270, 297), (270, 298), (270, 299), (270, 300), (270, 301), (270, 302), (270, 303), (270, 304), (270, 305), (270, 306), (270, 307), (270, 308), (270, 309), (270, 310), (270, 311), (270, 312), (270, 313), (270, 314),
(270, 315), (270, 316), (270, 317), (270, 318), (270, 319), (270, 320), (270, 321), (270, 322), (270, 323), (270, 324), (270, 325), (270, 326), (270, 327), (270, 328), (270, 329), (270, 330), (270, 331), (270, 332), (270, 333), (270, 334), (270, 335), (270, 336), (270, 337), (270, 338), (270, 339), (270, 340), (270, 341), (270, 342), (270, 343), (270, 344), (270, 345), (270, 346), (270, 347), (270, 348), (270, 349), (270, 350), (270, 351), (270, 352), (270, 353), (270, 354), (270, 355), (270, 356), (270, 357), (270, 358), (270, 359), (270, 360), (270, 361), (270, 362), (270, 363), (270, 364), (270, 365), (270, 366), (270, 367), (270, 368), (270, 369), (270, 370), (270, 371), (270, 373), (271, 261), (271, 263), (271, 264), (271, 265), (271, 266), (271, 267), (271, 268), (271, 269), (271, 270), (271, 271), (271, 272), (271, 273), (271, 274), (271, 275),
(271, 276), (271, 277), (271, 278), (271, 279), (271, 280), (271, 281), (271, 282), (271, 283), (271, 284), (271, 285), (271, 286), (271, 287), (271, 288), (271, 289), (271, 290), (271, 291), (271, 292), (271, 293), (271, 294), (271, 295), (271, 296), (271, 297), (271, 298), (271, 299), (271, 300), (271, 301), (271, 302), (271, 303), (271, 304), (271, 305), (271, 306), (271, 307), (271, 308), (271, 309), (271, 310), (271, 311), (271, 312), (271, 313), (271, 314), (271, 315), (271, 316), (271, 317), (271, 318), (271, 319), (271, 320), (271, 321), (271, 322), (271, 323), (271, 324), (271, 325), (271, 326), (271, 327), (271, 328), (271, 329), (271, 330), (271, 331), (271, 332), (271, 333), (271, 334), (271, 335), (271, 336), (271, 337), (271, 338), (271, 339), (271, 340), (271, 341), (271, 342), (271, 343), (271, 344), (271, 345), (271, 346), (271, 347),
(271, 348), (271, 349), (271, 350), (271, 351), (271, 352), (271, 353), (271, 354), (271, 355), (271, 356), (271, 357), (271, 358), (271, 359), (271, 360), (271, 361), (271, 362), (271, 363), (271, 364), (271, 365), (271, 366), (271, 367), (271, 368), (271, 369), (271, 370), (271, 371), (271, 372), (271, 374), (272, 261), (272, 263), (272, 264), (272, 265), (272, 266), (272, 267), (272, 268), (272, 269), (272, 270), (272, 271), (272, 272), (272, 273), (272, 274), (272, 275), (272, 276), (272, 277), (272, 278), (272, 279), (272, 280), (272, 281), (272, 282), (272, 283), (272, 284), (272, 285), (272, 286), (272, 287), (272, 288), (272, 289), (272, 290), (272, 291), (272, 292), (272, 293), (272, 294), (272, 295), (272, 296), (272, 297), (272, 298), (272, 299), (272, 300), (272, 301), (272, 302), (272, 303), (272, 304), (272, 305), (272, 306), (272, 307),
(272, 308), (272, 309), (272, 310), (272, 311), (272, 312), (272, 313), (272, 314), (272, 315), (272, 316), (272, 317), (272, 318), (272, 319), (272, 320), (272, 321), (272, 322), (272, 323), (272, 324), (272, 325), (272, 326), (272, 327), (272, 328), (272, 329), (272, 330), (272, 331), (272, 332), (272, 333), (272, 334), (272, 335), (272, 336), (272, 337), (272, 338), (272, 339), (272, 340), (272, 341), (272, 342), (272, 343), (272, 344), (272, 345), (272, 346), (272, 347), (272, 348), (272, 349), (272, 350), (272, 351), (272, 352), (272, 353), (272, 354), (272, 355), (272, 356), (272, 357), (272, 358), (272, 359), (272, 360), (272, 361), (272, 362), (272, 363), (272, 364), (272, 365), (272, 366), (272, 367), (272, 368), (272, 369), (272, 370), (272, 371), (272, 372), (272, 373), (272, 376), (273, 261), (273, 263), (273, 264), (273, 265), (273, 266),
(273, 267), (273, 268), (273, 269), (273, 270), (273, 271), (273, 272), (273, 273), (273, 274), (273, 275), (273, 276), (273, 277), (273, 278), (273, 279), (273, 280), (273, 281), (273, 282), (273, 283), (273, 284), (273, 285), (273, 286), (273, 287), (273, 288), (273, 289), (273, 290), (273, 291), (273, 292), (273, 293), (273, 294), (273, 295), (273, 296), (273, 297), (273, 298), (273, 299), (273, 300), (273, 301), (273, 302), (273, 303), (273, 304), (273, 305), (273, 306), (273, 307), (273, 308), (273, 309), (273, 310), (273, 311), (273, 312), (273, 313), (273, 314), (273, 315), (273, 316), (273, 317), (273, 318), (273, 319), (273, 320), (273, 321), (273, 322), (273, 323), (273, 324), (273, 325), (273, 326), (273, 327), (273, 328), (273, 329), (273, 330), (273, 331), (273, 332), (273, 333), (273, 334), (273, 335), (273, 336), (273, 337), (273, 338),
(273, 339), (273, 340), (273, 341), (273, 342), (273, 343), (273, 344), (273, 345), (273, 346), (273, 347), (273, 348), (273, 349), (273, 350), (273, 351), (273, 352), (273, 353), (273, 354), (273, 355), (273, 356), (273, 357), (273, 358), (273, 359), (273, 360), (273, 361), (273, 362), (273, 363), (273, 364), (273, 365), (273, 366), (273, 367), (273, 368), (273, 369), (273, 370), (273, 371), (273, 372), (273, 373), (273, 374), (273, 377), (274, 261), (274, 263), (274, 264), (274, 265), (274, 266), (274, 267), (274, 268), (274, 269), (274, 270), (274, 271), (274, 272), (274, 273), (274, 274), (274, 275), (274, 276), (274, 277), (274, 278), (274, 279), (274, 280), (274, 281), (274, 282), (274, 283), (274, 284), (274, 285), (274, 286), (274, 287), (274, 288), (274, 289), (274, 290), (274, 291), (274, 292), (274, 293), (274, 294), (274, 295), (274, 296),
(274, 297), (274, 298), (274, 299), (274, 300), (274, 301), (274, 302), (274, 303), (274, 304), (274, 305), (274, 306), (274, 307), (274, 308), (274, 309), (274, 310), (274, 311), (274, 312), (274, 313), (274, 314), (274, 315), (274, 316), (274, 317), (274, 318), (274, 319), (274, 320), (274, 321), (274, 322), (274, 323), (274, 324), (274, 325), (274, 326), (274, 327), (274, 328), (274, 329), (274, 330), (274, 331), (274, 332), (274, 333), (274, 334), (274, 335), (274, 336), (274, 337), (274, 338), (274, 339), (274, 340), (274, 341), (274, 342), (274, 343), (274, 344), (274, 345), (274, 346), (274, 347), (274, 348), (274, 349), (274, 350), (274, 351), (274, 352), (274, 353), (274, 354), (274, 355), (274, 356), (274, 357), (274, 358), (274, 359), (274, 360), (274, 361), (274, 362), (274, 363), (274, 364), (274, 365), (274, 366), (274, 367), (274, 368),
(274, 369), (274, 370), (274, 371), (274, 372), (274, 373), (274, 374), (274, 375), (274, 378), (275, 261), (275, 263), (275, 264), (275, 265), (275, 266), (275, 267), (275, 268), (275, 269), (275, 270), (275, 271), (275, 272), (275, 273), (275, 274), (275, 275), (275, 276), (275, 277), (275, 278), (275, 279), (275, 280), (275, 281), (275, 282), (275, 283), (275, 284), (275, 285), (275, 286), (275, 287), (275, 288), (275, 289), (275, 290), (275, 291), (275, 292), (275, 293), (275, 294), (275, 295), (275, 296), (275, 297), (275, 298), (275, 299), (275, 300), (275, 301), (275, 302), (275, 303), (275, 304), (275, 305), (275, 306), (275, 307), (275, 308), (275, 309), (275, 310), (275, 311), (275, 312), (275, 313), (275, 314), (275, 315), (275, 316), (275, 317), (275, 318), (275, 319), (275, 320), (275, 321), (275, 322), (275, 323), (275, 324), (275, 325),
(275, 326), (275, 327), (275, 328), (275, 329), (275, 330), (275, 331), (275, 332), (275, 333), (275, 334), (275, 335), (275, 336), (275, 337), (275, 338), (275, 339), (275, 340), (275, 341), (275, 342), (275, 343), (275, 344), (275, 345), (275, 346), (275, 347), (275, 348), (275, 349), (275, 350), (275, 351), (275, 352), (275, 353), (275, 354), (275, 355), (275, 356), (275, 357), (275, 358), (275, 359), (275, 360), (275, 361), (275, 362), (275, 363), (275, 364), (275, 365), (275, 366), (275, 367), (275, 368), (275, 369), (275, 370), (275, 371), (275, 372), (275, 373), (275, 374), (275, 375), (275, 376), (275, 377), (275, 379), (276, 263), (276, 264), (276, 265), (276, 266), (276, 267), (276, 268), (276, 269), (276, 270), (276, 271), (276, 272), (276, 273), (276, 274), (276, 275), (276, 276), (276, 277), (276, 278), (276, 279), (276, 280), (276, 281),
(276, 282), (276, 283), (276, 284), (276, 285), (276, 286), (276, 287), (276, 288), (276, 289), (276, 290), (276, 291), (276, 292), (276, 293), (276, 294), (276, 295), (276, 296), (276, 297), (276, 298), (276, 299), (276, 300), (276, 301), (276, 302), (276, 303), (276, 304), (276, 305), (276, 306), (276, 307), (276, 308), (276, 309), (276, 310), (276, 311), (276, 312), (276, 313), (276, 314), (276, 315), (276, 316), (276, 317), (276, 318), (276, 319), (276, 320), (276, 321), (276, 322), (276, 323), (276, 324), (276, 325), (276, 326), (276, 327), (276, 328), (276, 329), (276, 330), (276, 331), (276, 332), (276, 333), (276, 334), (276, 335), (276, 336), (276, 337), (276, 338), (276, 339), (276, 340), (276, 341), (276, 342), (276, 343), (276, 344), (276, 345), (276, 346), (276, 347), (276, 348), (276, 349), (276, 350), (276, 351), (276, 352), (276, 353),
(276, 354), (276, 355), (276, 356), (276, 357), (276, 358), (276, 359), (276, 360), (276, 361), (276, 362), (276, 363), (276, 364), (276, 365), (276, 366), (276, 367), (276, 368), (276, 369), (276, 370), (276, 371), (276, 372), (276, 373), (276, 374), (276, 375), (276, 376), (276, 377), (276, 378), (276, 380), (277, 262), (277, 264), (277, 265), (277, 266), (277, 267), (277, 268), (277, 269), (277, 270), (277, 271), (277, 272), (277, 273), (277, 274), (277, 275), (277, 276), (277, 277), (277, 278), (277, 279), (277, 280), (277, 281), (277, 282), (277, 283), (277, 284), (277, 285), (277, 286), (277, 287), (277, 288), (277, 289), (277, 290), (277, 291), (277, 292), (277, 293), (277, 294), (277, 295), (277, 296), (277, 297), (277, 298), (277, 299), (277, 300), (277, 301), (277, 302), (277, 303), (277, 304), (277, 305), (277, 306), (277, 307), (277, 308),
(277, 309), (277, 310), (277, 311), (277, 312), (277, 313), (277, 314), (277, 315), (277, 316), (277, 317), (277, 318), (277, 319), (277, 320), (277, 321), (277, 322), (277, 323), (277, 324), (277, 325), (277, 326), (277, 327), (277, 328), (277, 329), (277, 330), (277, 331), (277, 332), (277, 333), (277, 334), (277, 335), (277, 336), (277, 337), (277, 338), (277, 339), (277, 340), (277, 341), (277, 342), (277, 343), (277, 344), (277, 345), (277, 346), (277, 347), (277, 348), (277, 349), (277, 350), (277, 351), (277, 352), (277, 353), (277, 354), (277, 355), (277, 356), (277, 357), (277, 358), (277, 359), (277, 360), (277, 361), (277, 362), (277, 363), (277, 364), (277, 365), (277, 366), (277, 367), (277, 368), (277, 369), (277, 370), (277, 371), (277, 372), (277, 373), (277, 374), (277, 375), (277, 376), (277, 377), (277, 378), (277, 379), (277, 381),
(278, 263), (278, 265), (278, 266), (278, 267), (278, 268), (278, 269), (278, 270), (278, 271), (278, 272), (278, 273), (278, 274), (278, 275), (278, 276), (278, 277), (278, 278), (278, 279), (278, 280), (278, 281), (278, 282), (278, 283), (278, 284), (278, 285), (278, 286), (278, 287), (278, 288), (278, 289), (278, 290), (278, 291), (278, 292), (278, 293), (278, 294), (278, 295), (278, 296), (278, 297), (278, 298), (278, 299), (278, 300), (278, 301), (278, 302), (278, 303), (278, 304), (278, 305), (278, 306), (278, 307), (278, 308), (278, 309), (278, 310), (278, 311), (278, 312), (278, 313), (278, 314), (278, 315), (278, 316), (278, 317), (278, 318), (278, 319), (278, 320), (278, 321), (278, 322), (278, 323), (278, 324), (278, 325), (278, 326), (278, 327), (278, 328), (278, 329), (278, 330), (278, 331), (278, 332), (278, 333), (278, 334), (278, 335),
(278, 336), (278, 337), (278, 338), (278, 339), (278, 340), (278, 341), (278, 342), (278, 343), (278, 344), (278, 345), (278, 346), (278, 347), (278, 348), (278, 349), (278, 350), (278, 351), (278, 352), (278, 353), (278, 354), (278, 355), (278, 356), (278, 357), (278, 358), (278, 359), (278, 360), (278, 361), (278, 362), (278, 363), (278, 364), (278, 365), (278, 366), (278, 367), (278, 368), (278, 369), (278, 370), (278, 371), (278, 372), (278, 373), (278, 374), (278, 375), (278, 376), (278, 377), (278, 378), (278, 379), (278, 381), (279, 266), (279, 267), (279, 268), (279, 269), (279, 270), (279, 271), (279, 272), (279, 273), (279, 274), (279, 275), (279, 276), (279, 277), (279, 278), (279, 279), (279, 280), (279, 281), (279, 282), (279, 283), (279, 284), (279, 285), (279, 286), (279, 287), (279, 288), (279, 289), (279, 290), (279, 291), (279, 292),
(279, 293), (279, 294), (279, 295), (279, 296), (279, 297), (279, 298), (279, 299), (279, 300), (279, 301), (279, 302), (279, 303), (279, 304), (279, 305), (279, 306), (279, 307), (279, 308), (279, 309), (279, 310), (279, 311), (279, 312), (279, 313), (279, 314), (279, 315), (279, 316), (279, 317), (279, 318), (279, 319), (279, 320), (279, 321), (279, 322), (279, 323), (279, 324), (279, 325), (279, 326), (279, 327), (279, 328), (279, 329), (279, 330), (279, 331), (279, 332), (279, 333), (279, 334), (279, 335), (279, 336), (279, 337), (279, 338), (279, 339), (279, 340), (279, 341), (279, 342), (279, 343), (279, 344), (279, 345), (279, 346), (279, 347), (279, 348), (279, 349), (279, 350), (279, 351), (279, 352), (279, 353), (279, 354), (279, 355), (279, 356), (279, 357), (279, 358), (279, 359), (279, 360), (279, 361), (279, 362), (279, 363), (279, 364),
(279, 365), (279, 366), (279, 367), (279, 368), (279, 369), (279, 370), (279, 371), (279, 372), (279, 373), (279, 374), (279, 375), (279, 376), (279, 377), (279, 378), (279, 379), (279, 381), (280, 264), (280, 266), (280, 267), (280, 268), (280, 269), (280, 270), (280, 271), (280, 272), (280, 273), (280, 274), (280, 275), (280, 276), (280, 277), (280, 278), (280, 279), (280, 280), (280, 281), (280, 282), (280, 283), (280, 284), (280, 285), (280, 286), (280, 287), (280, 288), (280, 289), (280, 290), (280, 291), (280, 292), (280, 293), (280, 294), (280, 295), (280, 296), (280, 297), (280, 298), (280, 299), (280, 300), (280, 301), (280, 302), (280, 303), (280, 304), (280, 305), (280, 306), (280, 307), (280, 308), (280, 309), (280, 310), (280, 311), (280, 312), (280, 313), (280, 314), (280, 315), (280, 316), (280, 317), (280, 318), (280, 319), (280, 320),
(280, 321), (280, 322), (280, 323), (280, 324), (280, 325), (280, 326), (280, 327), (280, 328), (280, 329), (280, 330), (280, 331), (280, 332), (280, 333), (280, 334), (280, 335), (280, 336), (280, 337), (280, 338), (280, 339), (280, 340), (280, 341), (280, 342), (280, 343), (280, 344), (280, 345), (280, 346), (280, 347), (280, 348), (280, 349), (280, 350), (280, 351), (280, 352), (280, 353), (280, 354), (280, 355), (280, 356), (280, 357), (280, 358), (280, 359), (280, 360), (280, 361), (280, 362), (280, 363), (280, 364), (280, 365), (280, 366), (280, 367), (280, 368), (280, 369), (280, 370), (280, 371), (280, 372), (280, 373), (280, 374), (280, 375), (280, 376), (280, 377), (280, 378), (280, 379), (280, 381), (281, 265), (281, 267), (281, 268), (281, 269), (281, 270), (281, 271), (281, 272), (281, 273), (281, 274), (281, 275), (281, 276), (281, 277),
(281, 278), (281, 279), (281, 280), (281, 281), (281, 282), (281, 283), (281, 284), (281, 285), (281, 286), (281, 287), (281, 288), (281, 289), (281, 290), (281, 291), (281, 292), (281, 293), (281, 294), (281, 295), (281, 296), (281, 297), (281, 298), (281, 299), (281, 300), (281, 301), (281, 302), (281, 303), (281, 304), (281, 305), (281, 306), (281, 307), (281, 308), (281, 309), (281, 310), (281, 311), (281, 312), (281, 313), (281, 314), (281, 315), (281, 316), (281, 317), (281, 318), (281, 319), (281, 320), (281, 321), (281, 322), (281, 323), (281, 324), (281, 325), (281, 326), (281, 327), (281, 328), (281, 329), (281, 330), (281, 331), (281, 332), (281, 333), (281, 334), (281, 335), (281, 336), (281, 337), (281, 338), (281, 339), (281, 340), (281, 341), (281, 342), (281, 343), (281, 344), (281, 345), (281, 346), (281, 347), (281, 348), (281, 349),
(281, 350), (281, 351), (281, 352), (281, 353), (281, 354), (281, 355), (281, 356), (281, 357), (281, 358), (281, 359), (281, 360), (281, 361), (281, 362), (281, 363), (281, 364), (281, 365), (281, 366), (281, 367), (281, 368), (281, 369), (281, 370), (281, 371), (281, 372), (281, 373), (281, 374), (281, 375), (281, 376), (281, 377), (281, 378), (281, 379), (281, 381), (282, 266), (282, 268), (282, 269), (282, 270), (282, 271), (282, 272), (282, 273), (282, 274), (282, 275), (282, 276), (282, 277), (282, 278), (282, 279), (282, 280), (282, 281), (282, 282), (282, 283), (282, 284), (282, 285), (282, 286), (282, 287), (282, 288), (282, 289), (282, 290), (282, 291), (282, 292), (282, 293), (282, 294), (282, 295), (282, 296), (282, 297), (282, 298), (282, 299), (282, 300), (282, 301), (282, 302), (282, 303), (282, 304), (282, 305), (282, 306), (282, 307),
(282, 308), (282, 309), (282, 310), (282, 311), (282, 312), (282, 313), (282, 314), (282, 315), (282, 316), (282, 317), (282, 318), (282, 319), (282, 320), (282, 321), (282, 322), (282, 323), (282, 324), (282, 325), (282, 326), (282, 327), (282, 328), (282, 329), (282, 330), (282, 331), (282, 332), (282, 333), (282, 334), (282, 335), (282, 336), (282, 337), (282, 338), (282, 339), (282, 340), (282, 341), (282, 342), (282, 343), (282, 344), (282, 345), (282, 346), (282, 347), (282, 348), (282, 349), (282, 350), (282, 351), (282, 352), (282, 353), (282, 354), (282, 355), (282, 356), (282, 357), (282, 358), (282, 359), (282, 360), (282, 361), (282, 362), (282, 363), (282, 364), (282, 365), (282, 366), (282, 367), (282, 368), (282, 369), (282, 370), (282, 371), (282, 372), (282, 373), (282, 374), (282, 375), (282, 376), (282, 377), (282, 378), (282, 379),
(282, 381), (283, 267), (283, 269), (283, 270), (283, 271), (283, 272), (283, 273), (283, 274), (283, 275), (283, 276), (283, 277), (283, 278), (283, 279), (283, 280), (283, 281), (283, 282), (283, 283), (283, 284), (283, 285), (283, 286), (283, 287), (283, 288), (283, 289), (283, 290), (283, 291), (283, 292), (283, 293), (283, 294), (283, 295), (283, 296), (283, 297), (283, 298), (283, 299), (283, 300), (283, 301), (283, 302), (283, 303), (283, 304), (283, 305), (283, 306), (283, 307), (283, 308), (283, 309), (283, 310), (283, 311), (283, 312), (283, 313), (283, 314), (283, 315), (283, 316), (283, 317), (283, 318), (283, 319), (283, 320), (283, 321), (283, 322), (283, 323), (283, 324), (283, 325), (283, 326), (283, 327), (283, 328), (283, 329), (283, 330), (283, 331), (283, 332), (283, 333), (283, 334), (283, 335), (283, 336), (283, 337), (283, 338),
(283, 339), (283, 340), (283, 341), (283, 342), (283, 343), (283, 344), (283, 345), (283, 346), (283, 347), (283, 348), (283, 349), (283, 350), (283, 351), (283, 352), (283, 353), (283, 354), (283, 355), (283, 356), (283, 357), (283, 358), (283, 359), (283, 360), (283, 361), (283, 362), (283, 363), (283, 364), (283, 365), (283, 366), (283, 367), (283, 368), (283, 369), (283, 370), (283, 371), (283, 372), (283, 373), (283, 374), (283, 375), (283, 376), (283, 377), (283, 378), (283, 379), (283, 381), (284, 268), (284, 270), (284, 271), (284, 272), (284, 273), (284, 274), (284, 275), (284, 276), (284, 277), (284, 278), (284, 279), (284, 280), (284, 281), (284, 282), (284, 283), (284, 284), (284, 285), (284, 286), (284, 287), (284, 288), (284, 289), (284, 290), (284, 291), (284, 292), (284, 293), (284, 294), (284, 295), (284, 296), (284, 297), (284, 298),
(284, 299), (284, 300), (284, 301), (284, 302), (284, 303), (284, 304), (284, 305), (284, 306), (284, 307), (284, 308), (284, 309), (284, 310), (284, 311), (284, 312), (284, 313), (284, 314), (284, 315), (284, 316), (284, 317), (284, 318), (284, 319), (284, 320), (284, 321), (284, 322), (284, 323), (284, 324), (284, 325), (284, 326), (284, 327), (284, 328), (284, 329), (284, 330), (284, 331), (284, 332), (284, 333), (284, 334), (284, 335), (284, 336), (284, 337), (284, 338), (284, 339), (284, 340), (284, 341), (284, 342), (284, 343), (284, 344), (284, 345), (284, 346), (284, 347), (284, 348), (284, 349), (284, 350), (284, 351), (284, 352), (284, 353), (284, 354), (284, 355), (284, 356), (284, 357), (284, 358), (284, 359), (284, 360), (284, 361), (284, 362), (284, 363), (284, 364), (284, 365), (284, 366), (284, 367), (284, 368), (284, 369), (284, 370),
(284, 371), (284, 372), (284, 373), (284, 374), (284, 375), (284, 376), (284, 377), (284, 378), (284, 379), (284, 381), (285, 269), (285, 271), (285, 272), (285, 273), (285, 274), (285, 275), (285, 276), (285, 277), (285, 278), (285, 279), (285, 280), (285, 281), (285, 282), (285, 283), (285, 284), (285, 285), (285, 286), (285, 287), (285, 288), (285, 289), (285, 290), (285, 291), (285, 292), (285, 293), (285, 294), (285, 295), (285, 296), (285, 297), (285, 298), (285, 299), (285, 300), (285, 301), (285, 302), (285, 303), (285, 304), (285, 305), (285, 306), (285, 307), (285, 308), (285, 309), (285, 310), (285, 311), (285, 312), (285, 313), (285, 314), (285, 315), (285, 316), (285, 317), (285, 318), (285, 319), (285, 320), (285, 321), (285, 322), (285, 323), (285, 324), (285, 325), (285, 326), (285, 327), (285, 328), (285, 329), (285, 330), (285, 331),
(285, 332), (285, 333), (285, 334), (285, 335), (285, 336), (285, 337), (285, 338), (285, 339), (285, 340), (285, 341), (285, 342), (285, 343), (285, 344), (285, 345), (285, 346), (285, 347), (285, 348), (285, 349), (285, 350), (285, 351), (285, 352), (285, 353), (285, 354), (285, 355), (285, 356), (285, 357), (285, 358), (285, 359), (285, 360), (285, 361), (285, 362), (285, 363), (285, 364), (285, 365), (285, 366), (285, 367), (285, 368), (285, 369), (285, 370), (285, 371), (285, 372), (285, 373), (285, 374), (285, 375), (285, 376), (285, 377), (285, 378), (285, 379), (285, 381), (286, 270), (286, 272), (286, 273), (286, 274), (286, 275), (286, 276), (286, 277), (286, 278), (286, 279), (286, 280), (286, 281), (286, 282), (286, 283), (286, 284), (286, 285), (286, 286), (286, 287), (286, 288), (286, 289), (286, 290), (286, 291), (286, 292), (286, 293),
(286, 294), (286, 295), (286, 296), (286, 297), (286, 298), (286, 299), (286, 300), (286, 301), (286, 302), (286, 303), (286, 304), (286, 305), (286, 306), (286, 307), (286, 308), (286, 309), (286, 310), (286, 311), (286, 312), (286, 313), (286, 314), (286, 315), (286, 316), (286, 317), (286, 318), (286, 319), (286, 320), (286, 321), (286, 322), (286, 323), (286, 324), (286, 325), (286, 326), (286, 327), (286, 328), (286, 329), (286, 330), (286, 331), (286, 332), (286, 333), (286, 334), (286, 335), (286, 336), (286, 337), (286, 338), (286, 339), (286, 340), (286, 341), (286, 342), (286, 343), (286, 344), (286, 345), (286, 346), (286, 347), (286, 348), (286, 349), (286, 350), (286, 351), (286, 352), (286, 353), (286, 354), (286, 355), (286, 356), (286, 357), (286, 358), (286, 359), (286, 360), (286, 361), (286, 362), (286, 363), (286, 364), (286, 365),
(286, 366), (286, 367), (286, 368), (286, 369), (286, 370), (286, 371), (286, 372), (286, 373), (286, 374), (286, 375), (286, 376), (286, 377), (286, 378), (286, 379), (286, 381), (287, 271), (287, 273), (287, 274), (287, 275), (287, 276), (287, 277), (287, 278), (287, 279), (287, 280), (287, 281), (287, 282), (287, 283), (287, 284), (287, 285), (287, 286), (287, 287), (287, 288), (287, 289), (287, 290), (287, 291), (287, 292), (287, 293), (287, 294), (287, 295), (287, 296), (287, 297), (287, 298), (287, 299), (287, 300), (287, 301), (287, 302), (287, 303), (287, 304), (287, 305), (287, 306), (287, 307), (287, 308), (287, 309), (287, 310), (287, 311), (287, 312), (287, 313), (287, 314), (287, 315), (287, 316), (287, 317), (287, 318), (287, 319), (287, 320), (287, 321), (287, 322), (287, 323), (287, 324), (287, 325), (287, 326), (287, 327), (287, 328),
(287, 329), (287, 330), (287, 331), (287, 332), (287, 333), (287, 334), (287, 335), (287, 336), (287, 337), (287, 338), (287, 339), (287, 340), (287, 341), (287, 342), (287, 343), (287, 344), (287, 345), (287, 346), (287, 347), (287, 348), (287, 349), (287, 350), (287, 351), (287, 352), (287, 353), (287, 354), (287, 355), (287, 356), (287, 357), (287, 358), (287, 359), (287, 360), (287, 361), (287, 362), (287, 363), (287, 364), (287, 365), (287, 366), (287, 367), (287, 368), (287, 369), (287, 370), (287, 371), (287, 372), (287, 373), (287, 374), (287, 375), (287, 376), (287, 377), (287, 378), (287, 379), (287, 381), (288, 272), (288, 274), (288, 275), (288, 276), (288, 277), (288, 278), (288, 279), (288, 280), (288, 281), (288, 282), (288, 283), (288, 284), (288, 285), (288, 286), (288, 287), (288, 288), (288, 289), (288, 290), (288, 291), (288, 292),
(288, 293), (288, 294), (288, 295), (288, 296), (288, 297), (288, 298), (288, 299), (288, 300), (288, 301), (288, 302), (288, 303), (288, 304), (288, 305), (288, 306), (288, 307), (288, 308), (288, 309), (288, 310), (288, 311), (288, 312), (288, 313), (288, 314), (288, 315), (288, 316), (288, 317), (288, 318), (288, 319), (288, 320), (288, 321), (288, 322), (288, 323), (288, 324), (288, 325), (288, 326), (288, 327), (288, 328), (288, 329), (288, 330), (288, 331), (288, 332), (288, 333), (288, 334), (288, 335), (288, 336), (288, 337), (288, 338), (288, 339), (288, 340), (288, 341), (288, 342), (288, 343), (288, 344), (288, 345), (288, 346), (288, 347), (288, 348), (288, 349), (288, 350), (288, 351), (288, 352), (288, 353), (288, 354), (288, 355), (288, 356), (288, 357), (288, 358), (288, 359), (288, 360), (288, 361), (288, 362), (288, 363), (288, 364),
(288, 365), (288, 366), (288, 367), (288, 368), (288, 369), (288, 370), (288, 371), (288, 372), (288, 373), (288, 374), (288, 375), (288, 376), (288, 377), (288, 378), (288, 379), (288, 381), (289, 273), (289, 275), (289, 276), (289, 277), (289, 278), (289, 279), (289, 280), (289, 281), (289, 282), (289, 283), (289, 284), (289, 285), (289, 286), (289, 287), (289, 288), (289, 289), (289, 290), (289, 291), (289, 292), (289, 293), (289, 294), (289, 295), (289, 296), (289, 297), (289, 298), (289, 299), (289, 300), (289, 301), (289, 302), (289, 303), (289, 304), (289, 305), (289, 306), (289, 307), (289, 308), (289, 309), (289, 310), (289, 311), (289, 312), (289, 313), (289, 314), (289, 315), (289, 316), (289, 317), (289, 318), (289, 319), (289, 320), (289, 321), (289, 322), (289, 323), (289, 324), (289, 325), (289, 326), (289, 327), (289, 328), (289, 329),
(289, 330), (289, 331), (289, 332), (289, 333), (289, 334), (289, 335), (289, 336), (289, 337), (289, 338), (289, 339), (289, 340), (289, 341), (289, 342), (289, 343), (289, 344), (289, 345), (289, 346), (289, 347), (289, 348), (289, 349), (289, 350), (289, 351), (289, 352), (289, 353), (289, 354), (289, 355), (289, 356), (289, 357), (289, 358), (289, 359), (289, 360), (289, 361), (289, 362), (289, 363), (289, 364), (289, 365), (289, 366), (289, 367), (289, 368), (289, 369), (289, 370), (289, 371), (289, 372), (289, 373), (289, 374), (289, 375), (289, 376), (289, 377), (289, 378), (289, 379), (289, 381), (290, 274), (290, 276), (290, 277), (290, 278), (290, 279), (290, 280), (290, 281), (290, 282), (290, 283), (290, 284), (290, 285), (290, 286), (290, 287), (290, 288), (290, 289), (290, 290), (290, 291), (290, 292), (290, 293), (290, 294), (290, 295),
(290, 296), (290, 297), (290, 298), (290, 299), (290, 300), (290, 301), (290, 302), (290, 303), (290, 304), (290, 305), (290, 306), (290, 307), (290, 308), (290, 309), (290, 310), (290, 311), (290, 312), (290, 313), (290, 314), (290, 315), (290, 316), (290, 317), (290, 318), (290, 319), (290, 320), (290, 321), (290, 322), (290, 323), (290, 324), (290, 325), (290, 326), (290, 327), (290, 328), (290, 329), (290, 330), (290, 331), (290, 332), (290, 333), (290, 334), (290, 335), (290, 336), (290, 337), (290, 338), (290, 339), (290, 340), (290, 341), (290, 342), (290, 343), (290, 344), (290, 345), (290, 346), (290, 347), (290, 348), (290, 349), (290, 350), (290, 351), (290, 352), (290, 353), (290, 354), (290, 355), (290, 356), (290, 357), (290, 358), (290, 359), (290, 360), (290, 361), (290, 362), (290, 363), (290, 364), (290, 365), (290, 366), (290, 367),
(290, 368), (290, 369), (290, 370), (290, 371), (290, 372), (290, 373), (290, 374), (290, 375), (290, 376), (290, 377), (290, 378), (290, 379), (290, 381), (291, 275), (291, 277), (291, 278), (291, 279), (291, 280), (291, 281), (291, 282), (291, 283), (291, 284), (291, 285), (291, 286), (291, 287), (291, 288), (291, 289), (291, 290), (291, 291), (291, 292), (291, 293), (291, 294), (291, 295), (291, 296), (291, 297), (291, 298), (291, 299), (291, 300), (291, 301), (291, 302), (291, 303), (291, 304), (291, 305), (291, 306), (291, 307), (291, 308), (291, 309), (291, 310), (291, 311), (291, 312), (291, 313), (291, 314), (291, 315), (291, 316), (291, 317), (291, 318), (291, 319), (291, 320), (291, 321), (291, 322), (291, 323), (291, 324), (291, 325), (291, 326), (291, 327), (291, 328), (291, 329), (291, 330), (291, 331), (291, 332), (291, 333), (291, 334),
(291, 335), (291, 336), (291, 337), (291, 338), (291, 339), (291, 340), (291, 341), (291, 342), (291, 343), (291, 344), (291, 345), (291, 346), (291, 347), (291, 348), (291, 349), (291, 350), (291, 351), (291, 352), (291, 353), (291, 354), (291, 355), (291, 356), (291, 357), (291, 358), (291, 359), (291, 360), (291, 361), (291, 362), (291, 363), (291, 364), (291, 365), (291, 366), (291, 367), (291, 368), (291, 369), (291, 370), (291, 371), (291, 372), (291, 373), (291, 374), (291, 375), (291, 376), (291, 377), (291, 378), (291, 379), (291, 381), (292, 276), (292, 278), (292, 279), (292, 280), (292, 281), (292, 282), (292, 283), (292, 284), (292, 285), (292, 286), (292, 287), (292, 288), (292, 289), (292, 290), (292, 291), (292, 292), (292, 293), (292, 294), (292, 295), (292, 296), (292, 297), (292, 298), (292, 299), (292, 300), (292, 301), (292, 302),
(292, 303), (292, 304), (292, 305), (292, 306), (292, 307), (292, 308), (292, 309), (292, 310), (292, 311), (292, 312), (292, 313), (292, 314), (292, 315), (292, 316), (292, 317), (292, 318), (292, 319), (292, 320), (292, 321), (292, 322), (292, 323), (292, 324), (292, 325), (292, 326), (292, 327), (292, 328), (292, 329), (292, 330), (292, 331), (292, 332), (292, 333), (292, 334), (292, 335), (292, 336), (292, 337), (292, 338), (292, 339), (292, 340), (292, 341), (292, 342), (292, 343), (292, 344), (292, 345), (292, 346), (292, 347), (292, 348), (292, 349), (292, 350), (292, 351), (292, 352), (292, 353), (292, 354), (292, 355), (292, 356), (292, 357), (292, 358), (292, 359), (292, 360), (292, 361), (292, 362), (292, 363), (292, 364), (292, 365), (292, 366), (292, 367), (292, 368), (292, 369), (292, 370), (292, 371), (292, 372), (292, 373), (292, 374),
(292, 375), (292, 376), (292, 377), (292, 378), (292, 379), (292, 381), (293, 277), (293, 279), (293, 280), (293, 281), (293, 282), (293, 283), (293, 284), (293, 285), (293, 286), (293, 287), (293, 288), (293, 289), (293, 290), (293, 291), (293, 292), (293, 293), (293, 294), (293, 295), (293, 296), (293, 297), (293, 298), (293, 299), (293, 300), (293, 301), (293, 302), (293, 303), (293, 304), (293, 305), (293, 306), (293, 307), (293, 308), (293, 309), (293, 310), (293, 311), (293, 312), (293, 313), (293, 314), (293, 315), (293, 316), (293, 317), (293, 318), (293, 319), (293, 320), (293, 321), (293, 322), (293, 323), (293, 324), (293, 325), (293, 326), (293, 327), (293, 328), (293, 329), (293, 330), (293, 331), (293, 332), (293, 333), (293, 334), (293, 335), (293, 336), (293, 337), (293, 338), (293, 339), (293, 340), (293, 341), (293, 342), (293, 343),
(293, 344), (293, 345), (293, 346), (293, 347), (293, 348), (293, 349), (293, 350), (293, 351), (293, 352), (293, 353), (293, 354), (293, 355), (293, 356), (293, 357), (293, 358), (293, 359), (293, 360), (293, 361), (293, 362), (293, 363), (293, 364), (293, 365), (293, 366), (293, 367), (293, 368), (293, 369), (293, 370), (293, 371), (293, 372), (293, 373), (293, 374), (293, 375), (293, 376), (293, 377), (293, 378), (293, 379), (293, 381), (294, 278), (294, 280), (294, 281), (294, 282), (294, 283), (294, 284), (294, 285), (294, 286), (294, 287), (294, 288), (294, 289), (294, 290), (294, 291), (294, 292), (294, 293), (294, 294), (294, 295), (294, 296), (294, 297), (294, 298), (294, 299), (294, 300), (294, 301), (294, 302), (294, 303), (294, 304), (294, 305), (294, 306), (294, 307), (294, 308), (294, 309), (294, 310), (294, 311), (294, 312), (294, 313),
(294, 314), (294, 315), (294, 316), (294, 317), (294, 318), (294, 319), (294, 320), (294, 321), (294, 322), (294, 323), (294, 324), (294, 325), (294, 326), (294, 327), (294, 328), (294, 329), (294, 330), (294, 331), (294, 332), (294, 333), (294, 334), (294, 335), (294, 336), (294, 337), (294, 338), (294, 339), (294, 340), (294, 341), (294, 342), (294, 343), (294, 344), (294, 345), (294, 346), (294, 347), (294, 348), (294, 349), (294, 350), (294, 351), (294, 352), (294, 353), (294, 354), (294, 355), (294, 356), (294, 357), (294, 358), (294, 359), (294, 360), (294, 361), (294, 362), (294, 363), (294, 364), (294, 365), (294, 366), (294, 367), (294, 368), (294, 369), (294, 370), (294, 371), (294, 372), (294, 373), (294, 374), (294, 375), (294, 376), (294, 377), (294, 378), (294, 380), (295, 279), (295, 281), (295, 282), (295, 283), (295, 284), (295, 285),
(295, 286), (295, 287), (295, 288), (295, 289), (295, 290), (295, 291), (295, 292), (295, 293), (295, 294), (295, 295), (295, 296), (295, 297), (295, 298), (295, 299), (295, 300), (295, 301), (295, 302), (295, 303), (295, 304), (295, 305), (295, 306), (295, 307), (295, 308), (295, 309), (295, 310), (295, 311), (295, 312), (295, 313), (295, 314), (295, 315), (295, 316), (295, 317), (295, 318), (295, 319), (295, 320), (295, 321), (295, 322), (295, 323), (295, 324), (295, 325), (295, 326), (295, 327), (295, 328), (295, 329), (295, 330), (295, 331), (295, 332), (295, 333), (295, 334), (295, 335), (295, 336), (295, 337), (295, 338), (295, 339), (295, 340), (295, 341), (295, 342), (295, 343), (295, 344), (295, 345), (295, 346), (295, 347), (295, 348), (295, 349), (295, 350), (295, 351), (295, 352), (295, 353), (295, 354), (295, 355), (295, 356), (295, 357),
(295, 358), (295, 359), (295, 360), (295, 361), (295, 362), (295, 363), (295, 364), (295, 365), (295, 366), (295, 367), (295, 368), (295, 369), (295, 370), (295, 371), (295, 372), (295, 373), (295, 374), (295, 375), (295, 376), (295, 377), (295, 378), (295, 380), (296, 281), (296, 282), (296, 283), (296, 284), (296, 285), (296, 286), (296, 287), (296, 288), (296, 289), (296, 290), (296, 291), (296, 292), (296, 293), (296, 294), (296, 295), (296, 296), (296, 297), (296, 298), (296, 299), (296, 300), (296, 301), (296, 302), (296, 303), (296, 304), (296, 305), (296, 306), (296, 307), (296, 308), (296, 309), (296, 310), (296, 311), (296, 312), (296, 313), (296, 314), (296, 315), (296, 316), (296, 317), (296, 318), (296, 319), (296, 320), (296, 321), (296, 322), (296, 323), (296, 324), (296, 325), (296, 326), (296, 327), (296, 328), (296, 329), (296, 330),
(296, 331), (296, 332), (296, 333), (296, 334), (296, 335), (296, 336), (296, 337), (296, 338), (296, 339), (296, 340), (296, 341), (296, 342), (296, 343), (296, 344), (296, 345), (296, 346), (296, 347), (296, 348), (296, 349), (296, 350), (296, 351), (296, 352), (296, 353), (296, 354), (296, 355), (296, 356), (296, 357), (296, 358), (296, 359), (296, 360), (296, 361), (296, 362), (296, 363), (296, 364), (296, 365), (296, 366), (296, 367), (296, 368), (296, 369), (296, 370), (296, 371), (296, 372), (296, 373), (296, 374), (296, 375), (296, 376), (296, 377), (296, 378), (296, 380), (297, 280), (297, 282), (297, 283), (297, 284), (297, 285), (297, 286), (297, 287), (297, 288), (297, 289), (297, 290), (297, 291), (297, 292), (297, 293), (297, 294), (297, 295), (297, 296), (297, 297), (297, 298), (297, 299), (297, 300), (297, 301), (297, 302), (297, 303),
(297, 304), (297, 305), (297, 306), (297, 307), (297, 308), (297, 309), (297, 310), (297, 311), (297, 312), (297, 313), (297, 314), (297, 315), (297, 316), (297, 317), (297, 318), (297, 319), (297, 320), (297, 321), (297, 322), (297, 323), (297, 324), (297, 325), (297, 326), (297, 327), (297, 328), (297, 329), (297, 330), (297, 331), (297, 332), (297, 333), (297, 334), (297, 335), (297, 336), (297, 337), (297, 338), (297, 339), (297, 340), (297, 341), (297, 342), (297, 343), (297, 344), (297, 345), (297, 346), (297, 347), (297, 348), (297, 349), (297, 350), (297, 351), (297, 352), (297, 353), (297, 354), (297, 355), (297, 356), (297, 357), (297, 358), (297, 359), (297, 360), (297, 361), (297, 362), (297, 363), (297, 364), (297, 365), (297, 366), (297, 367), (297, 368), (297, 369), (297, 370), (297, 371), (297, 372), (297, 373), (297, 374), (297, 375),
(297, 376), (297, 377), (297, 378), (297, 380), (298, 281), (298, 283), (298, 284), (298, 285), (298, 286), (298, 287), (298, 288), (298, 289), (298, 290), (298, 291), (298, 292), (298, 293), (298, 294), (298, 295), (298, 296), (298, 297), (298, 298), (298, 299), (298, 300), (298, 301), (298, 302), (298, 303), (298, 304), (298, 305), (298, 306), (298, 307), (298, 308), (298, 309), (298, 310), (298, 311), (298, 312), (298, 313), (298, 314), (298, 315), (298, 316), (298, 317), (298, 318), (298, 319), (298, 320), (298, 321), (298, 322), (298, 323), (298, 324), (298, 325), (298, 326), (298, 327), (298, 328), (298, 329), (298, 330), (298, 331), (298, 332), (298, 333), (298, 334), (298, 335), (298, 336), (298, 337), (298, 338), (298, 339), (298, 340), (298, 341), (298, 342), (298, 343), (298, 344), (298, 345), (298, 346), (298, 347), (298, 348), (298, 349),
(298, 350), (298, 351), (298, 352), (298, 353), (298, 354), (298, 355), (298, 356), (298, 357), (298, 358), (298, 359), (298, 360), (298, 361), (298, 362), (298, 363), (298, 364), (298, 365), (298, 366), (298, 367), (298, 368), (298, 369), (298, 370), (298, 371), (298, 372), (298, 373), (298, 374), (298, 375), (298, 376), (298, 377), (298, 378), (298, 380), (299, 281), (299, 284), (299, 285), (299, 286), (299, 287), (299, 288), (299, 289), (299, 290), (299, 291), (299, 292), (299, 293), (299, 294), (299, 295), (299, 296), (299, 297), (299, 298), (299, 299), (299, 300), (299, 301), (299, 302), (299, 303), (299, 304), (299, 305), (299, 306), (299, 307), (299, 308), (299, 309), (299, 310), (299, 311), (299, 312), (299, 313), (299, 314), (299, 315), (299, 316), (299, 317), (299, 318), (299, 319), (299, 320), (299, 321), (299, 322), (299, 323), (299, 324),
(299, 325), (299, 326), (299, 327), (299, 328), (299, 329), (299, 330), (299, 331), (299, 332), (299, 333), (299, 334), (299, 335), (299, 336), (299, 337), (299, 338), (299, 339), (299, 340), (299, 341), (299, 342), (299, 343), (299, 344), (299, 345), (299, 346), (299, 347), (299, 348), (299, 349), (299, 350), (299, 351), (299, 352), (299, 353), (299, 354), (299, 355), (299, 356), (299, 357), (299, 358), (299, 359), (299, 360), (299, 361), (299, 362), (299, 363), (299, 364), (299, 365), (299, 366), (299, 367), (299, 368), (299, 369), (299, 370), (299, 371), (299, 372), (299, 373), (299, 374), (299, 375), (299, 376), (299, 377), (299, 378), (299, 379), (299, 380), (300, 282), (300, 286), (300, 287), (300, 288), (300, 289), (300, 290), (300, 291), (300, 292), (300, 293), (300, 294), (300, 295), (300, 296), (300, 297), (300, 298), (300, 299), (300, 300),
(300, 301), (300, 302), (300, 303), (300, 304), (300, 305), (300, 306), (300, 307), (300, 308), (300, 309), (300, 310), (300, 311), (300, 312), (300, 313), (300, 314), (300, 315), (300, 316), (300, 317), (300, 318), (300, 319), (300, 320), (300, 321), (300, 322), (300, 323), (300, 324), (300, 325), (300, 326), (300, 327), (300, 328), (300, 329), (300, 330), (300, 331), (300, 332), (300, 333), (300, 334), (300, 335), (300, 336), (300, 337), (300, 338), (300, 339), (300, 340), (300, 341), (300, 342), (300, 343), (300, 344), (300, 345), (300, 346), (300, 347), (300, 348), (300, 349), (300, 350), (300, 351), (300, 352), (300, 353), (300, 354), (300, 355), (300, 356), (300, 357), (300, 358), (300, 359), (300, 360), (300, 361), (300, 362), (300, 363), (300, 364), (300, 365), (300, 366), (300, 367), (300, 368), (300, 369), (300, 370), (300, 371), (300, 372),
(300, 373), (300, 374), (300, 375), (300, 376), (300, 377), (300, 379), (301, 284), (301, 287), (301, 288), (301, 289), (301, 290), (301, 291), (301, 292), (301, 293), (301, 294), (301, 295), (301, 296), (301, 297), (301, 298), (301, 299), (301, 300), (301, 301), (301, 302), (301, 303), (301, 304), (301, 305), (301, 306), (301, 307), (301, 308), (301, 309), (301, 310), (301, 311), (301, 312), (301, 313), (301, 314), (301, 315), (301, 316), (301, 317), (301, 318), (301, 319), (301, 320), (301, 321), (301, 322), (301, 323), (301, 324), (301, 325), (301, 326), (301, 327), (301, 328), (301, 329), (301, 330), (301, 331), (301, 332), (301, 333), (301, 334), (301, 335), (301, 336), (301, 337), (301, 338), (301, 339), (301, 340), (301, 341), (301, 342), (301, 343), (301, 344), (301, 345), (301, 346), (301, 347), (301, 348), (301, 349), (301, 350), (301, 351),
(301, 352), (301, 353), (301, 354), (301, 355), (301, 356), (301, 357), (301, 358), (301, 359), (301, 360), (301, 361), (301, 362), (301, 363), (301, 364), (301, 365), (301, 366), (301, 367), (301, 368), (301, 369), (301, 370), (301, 371), (301, 372), (301, 373), (301, 374), (301, 375), (301, 376), (301, 377), (301, 379), (302, 286), (302, 289), (302, 290), (302, 291), (302, 292), (302, 293), (302, 294), (302, 295), (302, 296), (302, 297), (302, 298), (302, 299), (302, 300), (302, 301), (302, 302), (302, 303), (302, 304), (302, 305), (302, 306), (302, 307), (302, 308), (302, 309), (302, 310), (302, 311), (302, 312), (302, 313), (302, 314), (302, 315), (302, 316), (302, 317), (302, 318), (302, 319), (302, 320), (302, 321), (302, 322), (302, 323), (302, 324), (302, 325), (302, 326), (302, 327), (302, 328), (302, 329), (302, 330), (302, 331), (302, 332),
(302, 333), (302, 334), (302, 335), (302, 336), (302, 337), (302, 338), (302, 339), (302, 340), (302, 341), (302, 342), (302, 343), (302, 344), (302, 345), (302, 346), (302, 347), (302, 348), (302, 349), (302, 350), (302, 351), (302, 352), (302, 353), (302, 354), (302, 355), (302, 356), (302, 357), (302, 358), (302, 359), (302, 360), (302, 361), (302, 362), (302, 363), (302, 364), (302, 365), (302, 366), (302, 367), (302, 368), (302, 369), (302, 373), (302, 374), (302, 375), (302, 376), (302, 377), (302, 379), (303, 287), (303, 290), (303, 291), (303, 292), (303, 293), (303, 294), (303, 295), (303, 296), (303, 297), (303, 298), (303, 299), (303, 300), (303, 301), (303, 302), (303, 303), (303, 304), (303, 305), (303, 306), (303, 307), (303, 308), (303, 309), (303, 310), (303, 311), (303, 312), (303, 313), (303, 314), (303, 315), (303, 316), (303, 317),
(303, 318), (303, 319), (303, 320), (303, 321), (303, 322), (303, 323), (303, 324), (303, 325), (303, 326), (303, 327), (303, 328), (303, 329), (303, 330), (303, 331), (303, 332), (303, 333), (303, 334), (303, 335), (303, 336), (303, 337), (303, 338), (303, 339), (303, 340), (303, 341), (303, 342), (303, 343), (303, 344), (303, 345), (303, 346), (303, 347), (303, 348), (303, 349), (303, 350), (303, 351), (303, 352), (303, 353), (303, 354), (303, 355), (303, 356), (303, 357), (303, 358), (303, 359), (303, 360), (303, 361), (303, 362), (303, 363), (303, 364), (303, 365), (303, 366), (303, 367), (303, 368), (303, 371), (303, 374), (303, 375), (303, 376), (303, 378), (304, 291), (304, 292), (304, 293), (304, 294), (304, 295), (304, 296), (304, 297), (304, 298), (304, 299), (304, 300), (304, 301), (304, 302), (304, 303), (304, 304), (304, 305), (304, 306),
(304, 307), (304, 308), (304, 309), (304, 310), (304, 311), (304, 312), (304, 313), (304, 314), (304, 315), (304, 316), (304, 317), (304, 318), (304, 319), (304, 320), (304, 321), (304, 322), (304, 323), (304, 324), (304, 325), (304, 326), (304, 327), (304, 328), (304, 329), (304, 330), (304, 331), (304, 332), (304, 333), (304, 334), (304, 335), (304, 336), (304, 337), (304, 338), (304, 339), (304, 340), (304, 341), (304, 342), (304, 343), (304, 344), (304, 345), (304, 346), (304, 347), (304, 348), (304, 349), (304, 350), (304, 351), (304, 352), (304, 353), (304, 354), (304, 355), (304, 356), (304, 357), (304, 358), (304, 359), (304, 360), (304, 361), (304, 362), (304, 363), (304, 364), (304, 365), (304, 366), (304, 367), (304, 369), (304, 373), (304, 375), (304, 377), (305, 290), (305, 292), (305, 293), (305, 294), (305, 295), (305, 296), (305, 297),
(305, 298), (305, 299), (305, 300), (305, 301), (305, 302), (305, 303), (305, 304), (305, 305), (305, 306), (305, 307), (305, 308), (305, 309), (305, 310), (305, 311), (305, 312), (305, 313), (305, 314), (305, 315), (305, 316), (305, 317), (305, 318), (305, 319), (305, 320), (305, 321), (305, 322), (305, 323), (305, 324), (305, 325), (305, 326), (305, 327), (305, 328), (305, 329), (305, 330), (305, 331), (305, 332), (305, 333), (305, 334), (305, 335), (305, 336), (305, 337), (305, 338), (305, 339), (305, 340), (305, 341), (305, 342), (305, 343), (305, 344), (305, 345), (305, 346), (305, 347), (305, 348), (305, 349), (305, 350), (305, 351), (305, 352), (305, 353), (305, 354), (305, 355), (305, 356), (305, 357), (305, 358), (305, 359), (305, 360), (305, 361), (305, 362), (305, 363), (305, 364), (305, 365), (305, 366), (305, 374), (305, 376), (306, 291),
(306, 293), (306, 294), (306, 295), (306, 296), (306, 297), (306, 298), (306, 299), (306, 300), (306, 301), (306, 302), (306, 303), (306, 304), (306, 305), (306, 306), (306, 307), (306, 308), (306, 309), (306, 310), (306, 311), (306, 312), (306, 313), (306, 314), (306, 315), (306, 316), (306, 317), (306, 318), (306, 319), (306, 320), (306, 321), (306, 322), (306, 323), (306, 324), (306, 325), (306, 326), (306, 327), (306, 328), (306, 329), (306, 330), (306, 331), (306, 332), (306, 333), (306, 334), (306, 335), (306, 336), (306, 337), (306, 338), (306, 339), (306, 340), (306, 341), (306, 342), (306, 343), (306, 344), (306, 345), (306, 346), (306, 347), (306, 348), (306, 349), (306, 350), (306, 351), (306, 352), (306, 353), (306, 354), (306, 355), (306, 356), (306, 357), (306, 358), (306, 359), (306, 360), (306, 361), (306, 362), (306, 363), (306, 364),
(306, 365), (306, 375), (307, 292), (307, 294), (307, 295), (307, 296), (307, 297), (307, 298), (307, 299), (307, 300), (307, 301), (307, 302), (307, 303), (307, 304), (307, 305), (307, 306), (307, 307), (307, 308), (307, 309), (307, 310), (307, 311), (307, 312), (307, 313), (307, 314), (307, 315), (307, 316), (307, 317), (307, 318), (307, 319), (307, 320), (307, 321), (307, 322), (307, 323), (307, 324), (307, 325), (307, 326), (307, 327), (307, 328), (307, 329), (307, 330), (307, 331), (307, 332), (307, 333), (307, 334), (307, 335), (307, 336), (307, 337), (307, 338), (307, 339), (307, 340), (307, 341), (307, 342), (307, 343), (307, 344), (307, 345), (307, 346), (307, 347), (307, 348), (307, 349), (307, 350), (307, 351), (307, 352), (307, 353), (307, 354), (307, 355), (307, 356), (307, 357), (307, 358), (307, 359), (307, 360), (307, 361), (307, 362),
(307, 363), (307, 364), (308, 293), (308, 295), (308, 296), (308, 297), (308, 298), (308, 299), (308, 300), (308, 301), (308, 302), (308, 303), (308, 304), (308, 305), (308, 306), (308, 307), (308, 308), (308, 309), (308, 310), (308, 311), (308, 312), (308, 313), (308, 314), (308, 315), (308, 316), (308, 317), (308, 318), (308, 319), (308, 320), (308, 321), (308, 322), (308, 323), (308, 324), (308, 325), (308, 326), (308, 327), (308, 328), (308, 329), (308, 330), (308, 331), (308, 332), (308, 333), (308, 334), (308, 335), (308, 336), (308, 337), (308, 338), (308, 339), (308, 340), (308, 341), (308, 342), (308, 343), (308, 344), (308, 345), (308, 346), (308, 347), (308, 348), (308, 349), (308, 350), (308, 351), (308, 352), (308, 353), (308, 354), (308, 355), (308, 356), (308, 357), (308, 358), (308, 359), (308, 360), (308, 361), (308, 362), (308, 363),
(309, 294), (309, 296), (309, 297), (309, 298), (309, 299), (309, 300), (309, 301), (309, 302), (309, 303), (309, 304), (309, 305), (309, 306), (309, 307), (309, 308), (309, 309), (309, 310), (309, 311), (309, 312), (309, 313), (309, 314), (309, 315), (309, 316), (309, 317), (309, 318), (309, 319), (309, 320), (309, 321), (309, 322), (309, 323), (309, 324), (309, 325), (309, 326), (309, 327), (309, 328), (309, 329), (309, 330), (309, 331), (309, 332), (309, 333), (309, 334), (309, 335), (309, 336), (309, 337), (309, 338), (309, 339), (309, 340), (309, 341), (309, 342), (309, 343), (309, 344), (309, 345), (309, 346), (309, 347), (309, 348), (309, 349), (309, 350), (309, 351), (309, 352), (309, 353), (309, 354), (309, 355), (309, 356), (309, 357), (309, 358), (309, 359), (309, 360), (309, 361), (309, 362), (310, 294), (310, 296), (310, 297), (310, 298),
(310, 299), (310, 300), (310, 301), (310, 302), (310, 303), (310, 304), (310, 305), (310, 306), (310, 307), (310, 308), (310, 309), (310, 310), (310, 311), (310, 312), (310, 313), (310, 314), (310, 315), (310, 316), (310, 317), (310, 318), (310, 319), (310, 320), (310, 321), (310, 322), (310, 323), (310, 324), (310, 325), (310, 326), (310, 327), (310, 328), (310, 329), (310, 330), (310, 331), (310, 332), (310, 333), (310, 334), (310, 335), (310, 336), (310, 337), (310, 338), (310, 339), (310, 340), (310, 341), (310, 342), (310, 343), (310, 344), (310, 345), (310, 346), (310, 347), (310, 348), (310, 349), (310, 350), (310, 351), (310, 352), (310, 353), (310, 354), (310, 355), (310, 356), (310, 357), (310, 358), (310, 359), (311, 295), (311, 297), (311, 298), (311, 299), (311, 300), (311, 301), (311, 302), (311, 303), (311, 304), (311, 305), (311, 306),
(311, 307), (311, 308), (311, 309), (311, 310), (311, 311), (311, 312), (311, 313), (311, 314), (311, 315), (311, 316), (311, 317), (311, 318), (311, 319), (311, 320), (311, 321), (311, 322), (311, 323), (311, 324), (311, 325), (311, 326), (311, 327), (311, 328), (311, 329), (311, 330), (311, 331), (311, 332), (311, 333), (311, 334), (311, 335), (311, 336), (311, 337), (311, 338), (311, 339), (311, 340), (311, 341), (311, 342), (311, 343), (311, 344), (311, 345), (311, 346), (311, 347), (311, 348), (311, 360), (311, 362), (312, 295), (312, 297), (312, 298), (312, 299), (312, 300), (312, 301), (312, 302), (312, 303), (312, 304), (312, 305), (312, 306), (312, 307), (312, 308), (312, 309), (312, 310), (312, 311), (312, 312), (312, 313), (312, 314), (312, 315), (312, 316), (312, 317), (312, 318), (312, 319), (312, 320), (312, 321), (312, 322), (312, 323),
(312, 324), (312, 325), (312, 326), (312, 327), (312, 328), (312, 329), (312, 330), (312, 331), (312, 332), (312, 333), (312, 334), (312, 335), (312, 336), (312, 337), (312, 338), (312, 339), (312, 340), (312, 341), (312, 342), (312, 343), (312, 344), (312, 349), (312, 350), (312, 351), (312, 352), (312, 353), (312, 354), (312, 355), (312, 356), (312, 357), (312, 358), (312, 359), (313, 296), (313, 299), (313, 300), (313, 301), (313, 302), (313, 303), (313, 304), (313, 305), (313, 306), (313, 307), (313, 308), (313, 309), (313, 310), (313, 311), (313, 312), (313, 313), (313, 314), (313, 315), (313, 316), (313, 317), (313, 318), (313, 319), (313, 320), (313, 321), (313, 322), (313, 323), (313, 324), (313, 325), (313, 326), (313, 327), (313, 328), (313, 329), (313, 330), (313, 331), (313, 332), (313, 333), (313, 334), (313, 335), (313, 336), (313, 337),
(313, 345), (313, 346), (313, 347), (314, 296), (314, 298), (314, 299), (314, 338), (314, 339), (314, 340), (314, 341), (314, 342), (314, 343), (314, 344), (315, 300), (315, 301), (315, 302), (315, 303), (315, 304), (315, 305), (315, 306), (315, 307), (315, 308), (315, 309), (315, 310), (315, 311), (315, 312), (315, 313), (315, 314), (315, 315), (315, 316), (315, 317), (315, 318), (315, 319), (315, 320), (315, 321), (315, 322), (315, 323), (315, 324), (315, 325), (315, 326), (315, 327), (315, 328), (315, 329), (315, 330), (315, 331), (315, 332), (315, 333), (315, 334), (315, 335), (315, 336), (315, 337), )
|
py | 1a3945703c7e9d64bd9b920ee503808d83544e2f | import numpy as np
def genSinusoid(amp, frq, phs, start, stop):
#phase in radians
n = np.arange(start, stop + 1)
x = amp * np.sin((frq * np.pi * n) + phs)
return n, x
|
py | 1a3945a13ca60964141c65596dc29ddc9b18266e | from django.apps import AppConfig
class CustomerCareConfig(AppConfig):
name = 'kitsune.customercare'
def ready(self):
from kitsune.customercare.badges import register_signals
# register signals for badges
register_signals()
|
py | 1a3947d3b56338e4d02de37948daf978e2ac2bed | import numpy as np
import time
import sys
from ServoMotor import *
from fns import *
import pandas as pd
import math as m
# Initialize motor control library & USB Port
filename = "/dev/ttyUSB0"
motor = ServoMotor(filename)
IO = motor.IO_Init()
if IO < 0:
print('IO exit')
sys.exit()
version= "0.2.0"
# Read v values from those saved from simulation
df = pd.read_csv('V/Hardcoded_{}.csv'.format(version))
v = np.array(df['Best Values'])
# Call corresponding function to convert sim2real/real2sim
def convFns(pos, convType):
conv = [left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder,
left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder]
targ = np.zeros(12)
for i in range(len(pos)):
if i==0:
targ[i] = conv[i](pos[i], convType, "front")
elif i==6:
targ[i] = conv[i](pos[i], convType, "back")
else:
targ[i] = conv[i](pos[i], convType)
return targ
# Return position to take
def get_action(i):
nextPos = [ 0, (v[6] + v[7]*m.sin(i*v[36] + v[8])), (v[3] + v[4]*m.sin(i*v[36] + v[5])),
0, (v[15] + v[16]*m.sin(i*v[36] + v[17])), (v[12] + v[13]*m.sin(i*v[36] + v[14])),
0, (v[24] + v[25]*m.sin(i*v[36] + v[26])), (v[21] + v[22]*m.sin(i*v[36] + v[23])),
0, (v[33] + v[34]*m.sin(i*v[36] + v[35])), (v[30] + v[31]*m.sin(i*v[36] + v[32]))]
return convFns(nextPos, 'sim2real')
# MOVE MOTOR TO GIVEN POSITION
def walk(pos):
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 0)
h+=1
#time.sleep(0.01)
# Read motor positions
def get_state():
state = []
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
state.append(motor.readPosition(i))
return state
# Initialize motors as servos and set offset
offsets = [30, 0, 64, 0, 70, 50, 26, 0, 55, 80, 90, 35]
h = 0
# Set servo mode to all servos with their offset
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.setServoMode(i)
if offsets[h]!=0:
motor.setPositionOffset(i,offsets[h])
h+=1
# RESET position and stand down & up before walking
pos = [500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
pos = [500, 750, 583, 500, 250, 417, 500, 750, 583, 500, 250, 417]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
if h>5:
motor.move(i, int(pos[h]), 700)
else:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
pos = get_action(0)
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
error = []
j=0
# WALK
while j < 400:
# Get current position of motors
state = get_state()
'''
if j>1:
diff = []
for i in range(len(state)):
diff.append(abs(state[i]-pos[i]))
error.append(diff)
'''
# Get target position
pos = get_action(j)
# Move robot to target position
walk(pos)
j += 1
#error_df = pd.DataFrame(error, columns=[10, 11, 12, 20, 21, 22, 30, 31, 32, 40, 41, 42])
#error_df.to_csv('Errors.csv')
print('donio')
|
py | 1a394831e756693195c37d48cfd8a5d94fa50fe2 | from enum import Enum
from glTF_editor.common.utils_py import \
UnicodeType
from glTF_editor.common.data_serializer import \
serializer
class Buffer(object):
"""
...
"""
def __init__(self,
byteLength=None,
uri=None,
name=None,
extensions=None,
extras=None):
self._byteLength = 0
self._uri = None
self._name = None
self._extensions = None
self._extras = None
if byteLength is not None:
self.byteLength = byteLength
if uri is not None:
self.uri = uri
if name is not None:
self.name = name
if extensions is not None:
self.extensions = extensions
if extras is not None:
self.extras = extras
@classmethod
def cast(cls, obj):
if isinstance(obj, cls):
return cls(
byteLength=obj.byteLength,
uri=obj.uri,
name=obj.name,
extensions=obj.extensions,
extras=obj.extras)
elif isinstance(obj, Mapping):
return cls(
byteLength=obj.get("byteLength"),
uri=obj.get("uri"),
name=obj.get("name"),
extensions=obj.get("extensions"),
extras=obj.get("extras"))
elif isinstance(obj, UnicodeType):
unserialized = serializer.loads(obj)
if not isinstance(unserialized, UnicodeType):
return cls.cast(unserialized)
elif isinstance(obj, Iterable):
it = iter(obj)
return cls(
byteLength=next(it),
uri=next(it),
name=next(it),
extensions=next(it),
extras=next(it))
raise RuntimeError("Unable to Cast {obj!r} to {cls} instance.".format(**locals()))
def getByteLength(self):
if self._byteLength is None:
return None
else:
return iter(self._byteLength)
def setByteLength(self, newByteLength):
if newByteLength is None:
self._byteLength = None
else:
self._byteLength = [UnicodeType(n) for n in newByteLength]
byteLength = property(getByteLength, setByteLength)
def getUri(self):
if self._uri is None:
return None
else:
return iter(self._uri)
def setUri(self, newUri):
if newUri is None:
self._uri = None
else:
self._uri = [UnicodeType(n) for n in newUri]
uri = property(getUri, setUri)
def getName(self):
if self._name is None:
return None
else:
return iter(self._name)
def setName(self, newName):
if newName is None:
self._name = None
else:
self._name = [UnicodeType(n) for n in newName]
name = property(getName, setName)
def getExtensions(self):
if self._extensions is None:
return None
else:
return iter(self._extensions)
def setExtensions(self, newExtensions):
if newExtensions is None:
self._extensions = None
else:
self._extensions = [UnicodeType(n) for n in newExtensions]
extensions = property(getExtensions, setExtensions)
def getExtras(self):
if self._extras is None:
return None
else:
return iter(self._extras)
def setExtras(self, newExtras):
if newExtras is None:
self._extras = None
else:
self._extras = [UnicodeType(n) for n in newExtras]
extras = property(getExtras, setExtras)
def __str__(self):
return serializer.dumps(self, type_hints=False)
def __repr__(self):
cls = type(self)
return ("{cls.__name__}("
"byteLength={self.byteLength!r}, "
"uri={self.uri!r}, "
"name={self.name!r}, "
"extensions={self.extensions!r}, "
"extras={self.extras!r}")").format(**locals())
def to_dict(obj):
result = oDict()
def add_valid(attr_name):
value = getattr(obj, attr_name, None)
if value is not None:
result[attr_name] = value.value if isinstance(value, Enum) else value
add_valid("byteLength")
add_valid("uri")
add_valid("name")
add_valid("extensions")
add_valid("extras")
return result
serializer.register(
data_cls=Buffer,
from_dict=lambda dct: Buffer.cast(dct),
to_dict=to_dict)
|
py | 1a39485e161b5ca2eb8b263415b5641f20191228 | """HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
import SocketServer
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout, e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
|
py | 1a3949b321459b377cc6d0ebb8b03949387f8e76 | a = 5
a -= 7
|
py | 1a394b625c29e75b3a92dc2266f61aacf7314bbc | from bika.lims.jsonapi.read import read
from plone.jsonapi.core import router
from plone.jsonapi.core.interfaces import IRouteProvider
from Products.CMFCore.utils import getToolByName
from zExceptions import BadRequest
from zope import interface
import json
import transaction
class doActionFor(object):
interface.implements(IRouteProvider)
def initialize(self, context, request):
pass
@property
def routes(self):
return (
("/doActionFor", "doActionFor", self.do_action_for, dict(methods=['GET', 'POST'])),
("/doActionFor_many", "doActionFor_many", self.do_action_for_many, dict(methods=['GET', 'POST'])),
)
def do_action_for(self, context, request):
"""/@@API/doActionFor: Perform workflow transition on values returned
by jsonapi "read" function.
Required parameters:
- action: The workflow transition to apply to found objects.
Parameters used to locate objects are the same as used for the "read"
method.
"""
savepoint = transaction.savepoint()
workflow = getToolByName(context, 'portal_workflow')
uc = getToolByName(context, 'uid_catalog')
action = request.get('action', '')
if not action:
raise BadRequest("No action specified in request")
ret = {
"url": router.url_for("doActionFor", force_external=True),
"success": True,
"error": False,
}
data = read(context, request)
objects = data.get('objects', [])
if len(objects) == 0:
raise BadRequest("No matching objects found")
for obj_dict in objects:
try:
obj = uc(UID=obj_dict['UID'])[0].getObject()
workflow.doActionFor(obj, action)
obj.reindexObject()
except Exception as e:
savepoint.rollback()
msg = "Cannot execute '{0}' on {1} ({2})".format(
action, obj, e.message)
msg = msg.replace("${action_id}", action)
raise BadRequest(msg)
return ret
def do_action_for_many(self, context, request):
"""/@@API/doActionFor: Perform workflow transition on a list of objects.
required parameters:
- obj_paths: a json encoded list of objects to transition.
- action: the id of the transition
"""
savepoint = transaction.savepoint()
workflow = getToolByName(context, 'portal_workflow')
site_path = request['PATH_INFO'].replace("/@@API/doActionFor_many", "")
obj_paths = json.loads(request.get('f', '[]'))
action = request.get('action', '')
if not action:
raise BadRequest("No action specified in request")
ret = {
"url": router.url_for("doActionFor_many", force_external=True),
"success": True,
"error": False,
}
for obj_path in obj_paths:
if not obj_path.startswith("/"):
obj_path = "/" + obj_path
obj = context.restrictedTraverse(str(site_path + obj_path))
if obj_path.startswith(site_path):
obj_path = obj_path[len(site_path):]
try:
workflow.doActionFor(obj, action)
obj.reindexObject()
except Exception as e:
savepoint.rollback()
msg = "Cannot execute '{0}' on {1} ({2})".format(
action, obj, e.message)
msg = msg.replace("${action_id}", action)
raise BadRequest(msg)
return ret
|
py | 1a394bae1ed92ec9b5beb5cf982f8ab0b6b8e582 | import os
import sys
from setuptools import setup
if sys.platform == 'darwin':
import py2app
elif sys.platform == 'win32':
import py2exe
sys.setrecursionlimit(100000)
VERSION = os.environ['DEEPN_VERSION']
BUNDLE_VERSION = VERSION.replace(".", "")
APP = ['gc_jm.py']
INCLUDES = ['sys', 'subprocess']
OPTIONS = {'argv_emulation': True,
'iconfile' : 'icon/Icon1.icns',
'plist': {'CFBundleGetInfoString': 'GCJM',
'CFBundleIdentifier': 'edu.uiowa.robertpiper.deepn.gcjm',
'CFBundleShortVersionString': VERSION,
'CFBundleName': 'GCJM',
'CFBundleVersion': BUNDLE_VERSION,
'NSHumanReadableCopyright': '(c) 2016 Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes'},
'includes': INCLUDES,
'excludes': ['PyQt4.QtDesigner', 'PyQt4.QtNetwork', 'PyQt4.QtOpenGL', 'PyQt4.QtScript', 'PyQt4.QtSql', 'PyQt4.QtTest', 'PyQt4.QtWebKit', 'PyQt4.QtXml', 'PyQt4.phonon'],
}
if sys.platform == 'darwin':
setup(
app=APP,
name='GCJM',
options={'py2app': OPTIONS},
setup_requires=['py2app'],
author='Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes',
data_files=[],
)
elif sys.platform == 'win32':
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in ("msvcp71.dll", "dwmapi.dll", "'msvcp90.dll'"):
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
setup(
console=APP,
version=VERSION,
description='GCJM',
author='Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes',
windows=[{"script":'gc_jm.py',
"icon_resources": [(1, "icon/Icon1.ico")],
"dest_base":"GCJM"
}],
data_files=[],
options={"py2exe": {'includes': INCLUDES,
"optimize": 2,
"compressed": 2,
"bundle_files": 1,
"dist_dir": "dist\GCJM"
}}
)
|
py | 1a394bf47dd92f1ac3da360644640ede4384a29a | from __future__ import absolute_import
from .bot import TelegramComm # NOQA
|
py | 1a394bf717b4712a23549ec1ea274a5b3352a652 | from .jarv import app
|
py | 1a394cf9c7eb99717e2514108e5f1a318701bbde | from typing import Callable
from jax import lax
from flax import linen as nn
class MultiTaskDense(nn.Module):
features: int
n_tasks: int
kernel_init: Callable = nn.initializers.lecun_normal()
bias_init: Callable = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
kernel = self.param(
"kernel", self.kernel_init, (self.n_tasks, inputs.shape[-1], self.features)
)
y = lax.dot_general(
inputs, kernel, dimension_numbers=(((2,), (1,)), ((0,), (0,)))
)
bias = self.param("bias", self.bias_init, (self.n_tasks, 1, self.features))
y = y + bias
return y
|
py | 1a394e0ae77290c8fcd67674496d66aa5bf24760 | """
Generalized vs Standard Lomb-Scargle
------------------------------------
Figure 10.16
A comparison of the standard and generalized Lomb-Scargle periodograms for a
signal y(t) = 10 + sin(2pi t/P) with P = 0.3, corresponding to omega_0 ~ 21.
This example is, in some sense, a worst-case scenario for the standard
Lomb-Scargle algorithm because there are no sampled points during the times
when ytrue < 10, which leads to a gross overestimation of the mean. The bottom
panel shows the Lomb-Scargle and generalized Lomb-Scargle periodograms for
these data; the generalized method recovers the expected peak as the highest
peak, while the standard method incorrectly chooses the peak at omega ~ 17.6
(because it is higher than the true peak at omega_0 ~ 21). The dotted lines
show the 1% and 5% significance levels for the highest peak in the generalized
periodogram, determined by 1000 bootstrap resamplings (see Section 10.3.2).
Note: This Plot Contains an Error
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After the book was in press, a reader pointed out that this plot contains a
typo. Instead of passing the noisy data to the Lomb-Scargle routine, we
had passed the underlying, non-noisy data. This caused an over-estimate
of the Lomb-Scargle power.
Because of this, we add two extra plots to this script: the first reproduces
the current plot without the typo. In it, we see that for the noisy data,
the period is not detected for just ~30 points within ten periods.
In the second additional plot, we increase the baseline and the number of
points by a factor of ten. With this configuration, the peak is detected,
and the qualitative aspects of the above discussion hold true.
We regret the error!
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import \
lomb_scargle, lomb_scargle_BIC, lomb_scargle_bootstrap
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate data where y is positive
np.random.seed(0)
N = 30
P = 0.3
t = P / 2 * np.random.random(N) + P * np.random.randint(100, size=N)
y = 10 + np.sin(2 * np.pi * t / P)
dy = 0.5 + 0.5 * np.random.random(N)
y_obs = y + np.random.normal(dy)
omega_0 = 2 * np.pi / P
#######################################################################
# Generate the plot with and without the original typo
for typo in [True, False]:
#------------------------------------------------------------
# Compute the Lomb-Scargle Periodogram
sig = np.array([0.1, 0.01, 0.001])
omega = np.linspace(17, 22, 1000)
# Notice the typo: we used y rather than y_obs
if typo is True:
P_S = lomb_scargle(t, y, dy, omega, generalized=False)
P_G = lomb_scargle(t, y, dy, omega, generalized=True)
else:
P_S = lomb_scargle(t, y_obs, dy, omega, generalized=False)
P_G = lomb_scargle(t, y_obs, dy, omega, generalized=True)
#------------------------------------------------------------
# Get significance via bootstrap
D = lomb_scargle_bootstrap(t, y_obs, dy, omega, generalized=True,
N_bootstraps=1000, random_state=0)
sig1, sig5 = np.percentile(D, [99, 95])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
# First panel: input data
ax = fig.add_subplot(211)
ax.errorbar(t, y_obs, dy, fmt='.k', lw=1, ecolor='gray')
ax.plot([-2, 32], [10, 10], ':k', lw=1)
ax.set_xlim(-2, 32)
ax.set_xlabel('$t$')
ax.set_ylabel('$y(t)$')
if typo is False:
ax.set_title('Corrected version')
# Second panel: periodogram
ax = fig.add_subplot(212)
ax.plot(omega, P_S, '--k', lw=1, label='standard')
ax.plot(omega, P_G, '-k', lw=1, label='generalized')
ax.legend(loc=2)
# plot the significance lines.
xlim = (omega[0], omega[-1])
ax.plot(xlim, [sig1, sig1], ':', c='black')
ax.plot(xlim, [sig5, sig5], ':', c='black')
# label BIC on the right side
ax2 = ax.twinx()
ax2.set_ylim(tuple(lomb_scargle_BIC(ax.get_ylim(), y_obs, dy)))
ax2.set_ylabel(r'$\Delta BIC$')
ax.set_xlabel('$\omega$')
ax.set_ylabel(r'$P_{\rm LS}(\omega)$')
ax.set_xlim(xlim)
ax.set_ylim(0, 1.1)
#######################################################################
# Redo the plot without the typo
# We need a larger data range to actually get significant power
# with actual noisy data
#------------------------------------------------------------
# Generate data where y is positive
np.random.seed(0)
N = 300
P = 0.3
t = P / 2 * np.random.random(N) + P * np.random.randint(1000, size=N)
y = 10 + np.sin(2 * np.pi * t / P)
dy = 0.5 + 0.5 * np.random.random(N)
y_obs = y + np.random.normal(dy)
omega_0 = 2 * np.pi / P
#------------------------------------------------------------
# Compute the Lomb-Scargle Periodogram
sig = np.array([0.1, 0.01, 0.001])
omega = np.linspace(20.5, 21.1, 1000)
P_S = lomb_scargle(t, y_obs, dy, omega, generalized=False)
P_G = lomb_scargle(t, y_obs, dy, omega, generalized=True)
#------------------------------------------------------------
# Get significance via bootstrap
D = lomb_scargle_bootstrap(t, y_obs, dy, omega, generalized=True,
N_bootstraps=1000, random_state=0)
sig1, sig5 = np.percentile(D, [99, 95])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
# First panel: input data
ax = fig.add_subplot(211)
ax.errorbar(t, y_obs, dy, fmt='.k', lw=1, ecolor='gray')
ax.plot([-20, 320], [10, 10], ':k', lw=1)
ax.set_xlim(-20, 320)
ax.set_xlabel('$t$')
ax.set_ylabel('$y(t)$')
# Second panel: periodogram
ax = fig.add_subplot(212)
ax.plot(omega, P_S, '--k', lw=1, label='standard')
ax.plot(omega, P_S, '-', c='gray', lw=1)
ax.plot(omega, P_G, '-k', lw=1, label='generalized')
ax.legend(loc=2)
# plot the significance lines.
xlim = (omega[0], omega[-1])
ax.plot(xlim, [sig1, sig1], ':', c='black')
ax.plot(xlim, [sig5, sig5], ':', c='black')
# label BIC on the right side
ax2 = ax.twinx()
ax2.set_ylim(tuple(lomb_scargle_BIC(ax.get_ylim(), y_obs, dy)))
ax2.set_ylabel(r'$\Delta BIC$')
ax.set_xlabel('$\omega$')
ax.set_ylabel(r'$P_{\rm LS}(\omega)$')
ax.set_xlim(xlim)
ax.set_ylim(0, 0.12)
plt.show()
|
py | 1a394eb6c5cc6e245d516401c9b29f43efd171f0 | import math
def mirror_numbers_graphing(n, factor, mid, width):
# https://stackoverflow.com/questions/38130895/find-middle-of-a-list/38131003
middle = math.floor(float(n) / 2)
if n % 2 != 0:
fudge_array_minus = []
fudge_array_plus = []
if middle < 1:
adj = 1
else:
adj = 2
for i in range(1, middle + 1):
fudge_array_minus.append(-i * adj * width * factor + mid)
fudge_array_plus.append(i * adj * factor * width + mid)
x = fudge_array_minus[::-1] + [0.0 + mid] + fudge_array_plus
# x = [-factor * i + mid for i in reversed(range(1, middle + 1))] + [0.0 + mid] + [factor * i + mid for i in range(1, middle + 1)]
return x
else:
acc = 0
fudge_array_minus = []
fudge_array_plus = []
for i in range(1, middle + 1):
fudge_array_minus.append((-i - acc) * width * factor + mid)
fudge_array_plus.append((i + acc) * width * factor + mid)
acc += 1
x = fudge_array_minus[::-1] + fudge_array_plus
# x = [-factor * i + mid for i in reversed(range(1, middle + 1))] + [factor * i + mid for i in range(1, middle + 1)]
return x
|
py | 1a394fcc763f71dedd023be7d884cde52b8998c5 | """
Defines the class used for server_utils sync
"""
from time import sleep
from colorama import Back, Fore, Style
from .serverprocess import ServerProcess
from .sharedvars import SharedVariables
class ServerSync(ServerProcess):
"""
Class used for server_utils synchronization
2 modes supported : auto for auto run (time interval)
and manual, waiting for the user to manually advance in time
"""
def __init__(self, shared_variables: SharedVariables, time_interval: int):
super().__init__(shared_variables)
self.time_interval = time_interval
self.turn = 0
def update(self):
"""
Used to sync every other subprocess, waiting the barrier
when timer expired OR when received the instruction to do so
"""
print(
f"\n\n{Back.LIGHTBLUE_EX}{Fore.BLACK}***** Turn {self.turn} ended, "
f"begin turn {self.turn + 1} *****{Style.RESET_ALL}"
)
def write(self):
"""
Used to begin the next turn once all houses have finished their exchanges
"""
self.turn += 1
sleep(self.time_interval)
print("Timer expired, begin next turn")
def kill(self) -> None:
"""
Kills softly the process
"""
print(f"{Fore.RED}Stopping sync{Style.RESET_ALL}")
super().kill()
|
py | 1a3950c8b3cead50a01fc7d7ba6979f8a49835fe | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import uuid
from pyramid.httpexceptions import (
HTTPMovedPermanently, HTTPSeeOther, HTTPTooManyRequests
)
from pyramid.security import Authenticated, remember, forget
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from warehouse.accounts import REDIRECT_FIELD_NAME
from warehouse.accounts.forms import (
LoginForm, RegistrationForm, RequestPasswordResetForm, ResetPasswordForm,
)
from warehouse.accounts.interfaces import (
IUserService, ITokenService, TokenExpired, TokenInvalid, TokenMissing,
TooManyFailedLogins,
)
from warehouse.accounts.models import Email
from warehouse.cache.origin import origin_cache
from warehouse.email import (
send_password_reset_email, send_email_verification_email,
)
from warehouse.packaging.models import Project, Release
from warehouse.utils.http import is_safe_url
USER_ID_INSECURE_COOKIE = "user_id__insecure"
@view_config(context=TooManyFailedLogins)
def failed_logins(exc, request):
resp = HTTPTooManyRequests(
"There have been too many unsuccessful login attempts. Please try "
"again later.",
retry_after=exc.resets_in.total_seconds(),
)
# TODO: This is kind of gross, but we need it for as long as the legacy
# upload API exists and is supported. Once we get rid of that we can
# get rid of this as well.
resp.status = "{} {}".format(
resp.status_code,
"Too Many Failed Login Attempts",
)
return resp
@view_config(
route_name="accounts.profile",
renderer="accounts/profile.html",
decorator=[
origin_cache(
1 * 24 * 60 * 60, # 1 day
stale_while_revalidate=5 * 60, # 5 minutes
stale_if_error=1 * 24 * 60 * 60, # 1 day
),
],
)
def profile(user, request):
if user.username != request.matchdict.get("username", user.username):
return HTTPMovedPermanently(
request.current_route_path(username=user.username),
)
projects = (
request.db.query(Project)
.filter(Project.users.contains(user))
.join(Project.releases)
.order_by(Release.created.desc())
.all()
)
return {"user": user, "projects": projects}
@view_config(
route_name="accounts.login",
renderer="accounts/login.html",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def login(request, redirect_field_name=REDIRECT_FIELD_NAME,
_form_class=LoginForm):
# TODO: Logging in should reset request.user
# TODO: Configure the login view as the default view for not having
# permission to view something.
if request.authenticated_userid is not None:
return HTTPSeeOther(request.route_path('manage.projects'))
user_service = request.find_service(IUserService, context=None)
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
form = _form_class(request.POST, user_service=user_service)
if request.method == "POST":
request.registry.datadog.increment('warehouse.authentication.start',
tags=['auth_method:login_form'])
if form.validate():
# Get the user id for the given username.
username = form.username.data
userid = user_service.find_userid(username)
# If the user-originating redirection url is not safe, then
# redirect to the index instead.
if (not redirect_to or
not is_safe_url(url=redirect_to, host=request.host)):
redirect_to = request.route_path('manage.projects')
# Actually perform the login routine for our user.
headers = _login_user(request, userid)
# Now that we're logged in we'll want to redirect the user to
# either where they were trying to go originally, or to the default
# view.
resp = HTTPSeeOther(redirect_to, headers=dict(headers))
# We'll use this cookie so that client side javascript can
# Determine the actual user ID (not username, user ID). This is
# *not* a security sensitive context and it *MUST* not be used
# where security matters.
#
# We'll also hash this value just to avoid leaking the actual User
# IDs here, even though it really shouldn't matter.
resp.set_cookie(
USER_ID_INSECURE_COOKIE,
hashlib.blake2b(
str(userid).encode("ascii"),
person=b"warehouse.userid",
).hexdigest().lower(),
)
request.registry.datadog.increment(
'warehouse.authentication.complete',
tags=['auth_method:login_form']
)
return resp
else:
request.registry.datadog.increment(
'warehouse.authentication.failure',
tags=['auth_method:login_form']
)
return {
"form": form,
"redirect": {
"field": REDIRECT_FIELD_NAME,
"data": redirect_to,
},
}
@view_config(
route_name="accounts.logout",
renderer="accounts/logout.html",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def logout(request, redirect_field_name=REDIRECT_FIELD_NAME):
# TODO: If already logged out just redirect to ?next=
# TODO: Logging out should reset request.user
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
if request.method == "POST":
# A POST to the logout view tells us to logout. There's no form to
# validate here because there's no data. We should be protected against
# CSRF attacks still because of the CSRF framework, so users will still
# need a post body that contains the CSRF token.
headers = forget(request)
# When crossing an authentication boundary we want to create a new
# session identifier. We don't want to keep any information in the
# session when going from authenticated to unauthenticated because
# user's generally expect that logging out is a destructive action
# that erases all of their private data. However, if we don't clear the
# session then another user can use the computer after them, log in to
# their account, and then gain access to anything sensitive stored in
# the session for the original user.
request.session.invalidate()
# If the user-originating redirection url is not safe, then redirect to
# the index instead.
if (not redirect_to or
not is_safe_url(url=redirect_to, host=request.host)):
redirect_to = "/"
# Now that we're logged out we'll want to redirect the user to either
# where they were originally, or to the default view.
resp = HTTPSeeOther(redirect_to, headers=dict(headers))
# Ensure that we delete our user_id__insecure cookie, since the user is
# no longer logged in.
resp.delete_cookie(USER_ID_INSECURE_COOKIE)
return resp
return {"redirect": {"field": REDIRECT_FIELD_NAME, "data": redirect_to}}
@view_config(
route_name="accounts.register",
renderer="accounts/register.html",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def register(request, _form_class=RegistrationForm):
if request.authenticated_userid is not None:
return HTTPSeeOther(request.route_path('manage.projects'))
# Check if the honeypot field has been filled
if request.method == "POST" and request.POST.get('confirm_form'):
return HTTPSeeOther(request.route_path("index"))
if request.flags.enabled('disallow-new-user-registration'):
request.session.flash(
("New User Registration Temporarily Disabled "
"See https://pypi.org/help#admin-intervention for details"),
queue="error",
)
return HTTPSeeOther(request.route_path("index"))
user_service = request.find_service(IUserService, context=None)
form = _form_class(data=request.POST, user_service=user_service,)
if request.method == "POST" and form.validate():
user = user_service.create_user(
form.username.data, form.full_name.data, form.new_password.data,
)
email = user_service.add_email(user.id, form.email.data, primary=True)
send_email_verification_email(request, user, email)
return HTTPSeeOther(
request.route_path("index"),
headers=dict(_login_user(request, user.id))
)
return {"form": form}
@view_config(
route_name="accounts.request-password-reset",
renderer="accounts/request-password-reset.html",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def request_password_reset(request, _form_class=RequestPasswordResetForm):
if request.authenticated_userid is not None:
return HTTPSeeOther(request.route_path('index'))
user_service = request.find_service(IUserService, context=None)
form = _form_class(request.POST, user_service=user_service)
if request.method == "POST" and form.validate():
user = user_service.get_user_by_username(form.username_or_email.data)
if user is None:
user = user_service.get_user_by_email(form.username_or_email.data)
send_password_reset_email(request, user)
token_service = request.find_service(ITokenService, name='password')
n_hours = token_service.max_age // 60 // 60
return {"n_hours": n_hours}
return {"form": form}
@view_config(
route_name="accounts.reset-password",
renderer="accounts/reset-password.html",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def reset_password(request, _form_class=ResetPasswordForm):
if request.authenticated_userid is not None:
return HTTPSeeOther(request.route_path('index'))
user_service = request.find_service(IUserService, context=None)
token_service = request.find_service(ITokenService, name="password")
def _error(message):
request.session.flash(message, queue="error")
return HTTPSeeOther(
request.route_path("accounts.request-password-reset"),
)
try:
token = request.params.get('token')
data = token_service.loads(token)
except TokenExpired:
return _error("Expired token - Request a new password reset link")
except TokenInvalid:
return _error("Invalid token - Request a new password reset link")
except TokenMissing:
return _error("Invalid token - No token supplied")
# Check whether this token is being used correctly
if data.get('action') != "password-reset":
return _error("Invalid token - Not a password reset token")
# Check whether a user with the given user ID exists
user = user_service.get_user(uuid.UUID(data.get("user.id")))
if user is None:
return _error("Invalid token - User not found")
# Check whether the user has logged in since the token was created
last_login = data.get("user.last_login")
if str(user.last_login) > last_login:
# TODO: track and audit this, seems alertable
return _error(
"Invalid token - User has logged in since this token was requested"
)
# Check whether the password has been changed since the token was created
password_date = data.get("user.password_date")
if str(user.password_date) > password_date:
return _error(
"Invalid token - Password has already been changed since this "
"token was requested"
)
form = _form_class(
**request.params,
username=user.username,
full_name=user.name,
email=user.email,
user_service=user_service
)
if request.method == "POST" and form.validate():
# Update password.
user_service.update_user(user.id, password=form.new_password.data)
# Flash a success message
request.session.flash(
"You have successfully reset your password", queue="success"
)
# Perform login just after reset password and redirect to default view.
return HTTPSeeOther(
request.route_path("index"),
headers=dict(_login_user(request, user.id))
)
return {"form": form}
@view_config(
route_name="accounts.verify-email",
uses_session=True,
effective_principals=Authenticated,
)
def verify_email(request):
token_service = request.find_service(ITokenService, name="email")
def _error(message):
request.session.flash(message, queue="error")
return HTTPSeeOther(request.route_path("manage.account"))
try:
token = request.params.get('token')
data = token_service.loads(token)
except TokenExpired:
return _error("Expired token - Request a new verification link")
except TokenInvalid:
return _error("Invalid token - Request a new verification link")
except TokenMissing:
return _error("Invalid token - No token supplied")
# Check whether this token is being used correctly
if data.get('action') != "email-verify":
return _error("Invalid token - Not an email verification token")
try:
email = (
request.db.query(Email)
.filter(Email.id == data['email.id'], Email.user == request.user)
.one()
)
except NoResultFound:
return _error("Email not found")
if email.verified:
return _error("Email already verified")
email.verified = True
email.unverify_reason = None
email.transient_bounces = 0
request.user.is_active = True
request.session.flash(
f'Email address {email.email} verified. ' +
'You can now set this email as your primary address.',
queue='success'
)
return HTTPSeeOther(request.route_path("manage.account"))
def _login_user(request, userid):
# We have a session factory associated with this request, so in order
# to protect against session fixation attacks we're going to make sure
# that we create a new session (which for sessions with an identifier
# will cause it to get a new session identifier).
# We need to protect against session fixation attacks, so make sure
# that we create a new session (which will cause it to get a new
# session identifier).
if (request.unauthenticated_userid is not None and
request.unauthenticated_userid != userid):
# There is already a userid associated with this request and it is
# a different userid than the one we're trying to remember now. In
# this case we want to drop the existing session completely because
# we don't want to leak any data between authenticated userids.
request.session.invalidate()
else:
# We either do not have an associated userid with this request
# already, or the userid is the same one we're trying to remember
# now. In either case we want to keep all of the data but we want
# to make sure that we create a new session since we're crossing
# a privilege boundary.
data = dict(request.session.items())
request.session.invalidate()
request.session.update(data)
# Remember the userid using the authentication policy.
headers = remember(request, str(userid))
# Cycle the CSRF token since we've crossed an authentication boundary
# and we don't want to continue using the old one.
request.session.new_csrf_token()
# Whenever we log in the user, we want to update their user so that it
# records when the last login was.
user_service = request.find_service(IUserService, context=None)
user_service.update_user(userid, last_login=datetime.datetime.utcnow())
return headers
@view_config(
route_name="includes.current-user-profile-callout",
renderer="includes/accounts/profile-callout.html",
uses_session=True,
)
def profile_callout(user, request):
return {"user": user}
@view_config(
route_name="includes.edit-profile-button",
renderer="includes/accounts/edit-profile-button.html",
uses_session=True,
)
def edit_profile_button(user, request):
return {'user': user}
|
py | 1a39513f3e6fa499ad6251fa96214230c35f42c5 | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^+5#)r7c*q8(4x+bvyty5%w)6$9r5^s96bhy3%b)1k-v_j(%u&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a39515c913f1b25cabcfd0fef8f117b0e6c277b | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from lgbserver import LightGBMModelRepository
model_dir = os.path.join(os.path.dirname(__file__), "example_model")
invalid_model_dir = os.path.join(os.path.dirname(__file__), "model_not_exist", "model")
@pytest.mark.asyncio
async def test_load():
repo = LightGBMModelRepository(model_dir=model_dir, nthread=1)
model_name = "model"
await repo.load(model_name)
assert repo.get_model(model_name) is not None
assert repo.is_model_ready(model_name)
@pytest.mark.asyncio
async def test_load_fail():
repo = LightGBMModelRepository(model_dir=model_dir, nthread=1)
model_name = "model"
with pytest.raises(Exception):
await repo.load(model_name)
assert repo.get_model(model_name) is None
assert not repo.is_model_ready(model_name)
|
py | 1a39519bec274e4a9858ef2dba286ebcf6be95b5 | import requests,json
def get_random_quotes():
r = requests.get('http://quotes.stormconsultancy.co.uk/random.json')
if r.status_code == 200:
quote = r.json()
return quote |
py | 1a39528505d50b7178c355e42d7f6998c99ba30d | #!/usr/bin/python
import sys
import os
sys.path.append( "." )
import argparse
from src import align
from src import score
from src import merge
EPILOG = "Commands:\n\
align Align the FASTQ sequencing files into bait and prey sequences to generate interaction matrix\n\
score Usging two interaction matries to generate an interaction score matrix\n\
merge Merge several interaction score matries and apply quartile correction to generate a final average interaction score matrix\n\n\
Run 'recYnH.py COMMAND --help' for more information on a command."
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='recYnH program',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = EPILOG )
#parser.add_argument('cmd', metavar='COMMAND', help="set COMMAND ('align'|'merge')", default="" )
subparsers = parser.add_subparsers(dest='cmd', metavar="COMMAND", help='sub-command help')
align_parser = subparsers.add_parser('align', help='a help for align')
align_parser.add_argument('-p', '--program', default='Y2H', help="set the experiments type ('Y2H'|'Y3H') (default 'Y2H')" ) # Y2H or Y3H
align_parser.add_argument('-r', '--relaxed', action='store_true', help="set relaxed alignment mode" ) # Y2H or Y3H
align_parser.add_argument('-i1', '--fasta1', required=True, help="set the sequence of baits and preys; if i2 is set then it is for baits; RNA baits in Y3H" ) # Y2H or Y3H
align_parser.add_argument('-i2', '--fasta2', required=False, help="set the sequence of preys (OPTIONAL in Y2H; protein preys in Y3H)" ) # Y2H or Y3H
align_parser.add_argument('-l1', '--lastnt1', default=150, type=int, required=False, help="set the sequence of length of baits (default 150nt); In Y3H, it is not used" ) # Y2H or Y3H
align_parser.add_argument('-l2', '--lastnt2', default=150, type=int, required=False, help="set the sequence of length of preys (default 150nt)" ) # Y2H or Y3H
align_parser.add_argument('-f1', '--fastq1', required=True, help="set the FASTQ file (read 1 = baits)" ) # Y2H or Y3H
align_parser.add_argument('-f2', '--fastq2', required=True, help="set the FASTQ file (read 2 = preys)" ) # Y2H or Y3H
align_parser.add_argument('-o', '--output', required=False, help="set the output directory path (default = same folder as FASTQ file 1)" ) # Y2H or Y3H
align_parser.add_argument('-n', '--name', default='recYnH.raw', required=False, help="set the output filename (default 'recYnH.raw')" ) # Y2H or Y3H
score_parser = subparsers.add_parser('score', help='a help for score') #, epilog = "Run 'recYnH.py score --help' for more information on a command.")
score_parser.add_argument('-p', '--program', default='Y2H', help="set the experiments type ('Y2H'|'Y3H') (default 'Y2H')" ) # Y2H or Y3H
score_parser.add_argument('-m1', '--matrix1', required=True, help="set the interaction matrix of non-selection condition" ) # Y2H or Y3H
score_parser.add_argument('-m2', '--matrix2', required=True, help="set the interaction matrix of selection condition" ) # Y2H or Y3H
score_parser.add_argument('-o', '--output', required=False, help="set the output folder name (default = same folder as interaction matrix file)" ) # Y2H or Y3H
score_parser.add_argument('-n', '--name', default='recYnH.IS', required=False, help="set the output filename (default 'recYnH.IS')" ) # Y2H or Y3H
merge_parser = subparsers.add_parser('merge', help='a help for merge') #, epilog = "Run 'recYnH.py merge --help' for more information on a command.")
merge_parser.add_argument('-i', '--input', required = True, nargs = '*', help="list of interaction score matrices" ) # Y2H or Y3H
merge_parser.add_argument('-q', '--quartile', default=0.75, required=False, help="set the percentile auto-activation signal correction value (default 0.75)" ) # Y2H or Y3H
merge_parser.add_argument('-o', '--output', required=False, help="set the output folder name (default = same folder as interaction matrix file 1)" ) # Y2H or Y3H
merge_parser.add_argument('-n', '--name', default='recYnH.avgIS', required=False, help="set the output filename (default 'recYnH.avgIS')" ) # Y2H or Y3H
args = parser.parse_args()
if args.cmd not in [ "align", "score", "merge" ]:
exit(0 )
print "[ Starting rec-YnH Pipeline ]"
if ( args.cmd == "align" ):
align.run( args )
elif ( args.cmd == "score" ):
score.run( args )
elif ( args.cmd == "merge" ):
merge.run( args )
print "[ Finishing rec-YnH Pipeline ]"
|
py | 1a3952c071030e449e96c1e1924d30c0aa922be6 | __version__ = '4.19.8' |
py | 1a3952fc6de813873ea9b7ff7c63b6bc64d831d1 | from flask import session, redirect, url_for, render_template, request
from flask import make_response
from . import main
from .forms import LoginForm, RequestKeyForm
import secrets
import sys
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Protocol.KDF import PBKDF2
import binascii
import uuid
from .serverm import ServerM
from .cryptographicHash import HashOperation
#import base64
from base64 import b64decode
sessionKey = secrets.token_hex(32)
secret = 'bil548'
roomCounter = 5
peopleCounter = 0
allRooms = {} #dictionary of all rooms, room name - room key pairs
for i in range(roomCounter):
roomName = 'room' + str(i)
allRooms[roomName] = secrets.token_hex(16)
print( allRooms)
peoplePerRoom = {} #dictionary of all people in each room
allPeople = [] #array of all people
allSessionKeys = {} #dictionary of all session keys, person - session key pairs
@main.route('/', methods=['GET', 'POST'])
def index():
"""Login form to enter a room."""
form = LoginForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
hashedPassword = HashOperation.hashBasedOnPassword(form.room.data)
userName = form.name.data
challangeFromServer = ServerM.insertUser(userName, hashedPassword)
# Encrpyt challenge with the hash of the password
iv = b'Sixteen byte key'
cipher = AES.new(hashedPassword, AES.MODE_CFB, iv)
msg = cipher.encrypt(challangeFromServer)
status = ServerM.getEncrytedMessage(msg, userName)
print(status)
if status == True:
sessionKeyTrue = uuid.uuid1()
################################################# SESSION KEY BURADA DEVREYE GİRİYOR#################
session['key'] = form.key.data
if( session['key'] != allRooms[session['room']]):
session['error'] = 'invalid_key'
return redirect(url_for('.error'))
if( session['name'] not in allPeople):
allPeople.append(session['name'])
allSessionKeys[session['name']] = '123456'
return render_template('chat.html', username=session['name'], room=session['room'], key=session['key'])
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
form.key.data = session.get('key', '')
return render_template('index.html', form=form)
@main.route('/chat')
def chat():
"""Chat room. The user's name and room must be stored in
the session."""
name = session.get('name', '')
room = session.get('room', '')
key = session.get('key', '')
if name == '' or room == '':
return redirect(url_for('.index'))
return render_template('chat.html', username=name, room=room, key=key)
@main.route('/initiateSession')
def initiateSession():
form = LoginForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
session['key'] = form.key.data
return redirect(url_for('.chat'))
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
form.key.data = session.get('key', '')
return render_template('initiateSession.html', form = form)
@main.route('/requestKey', methods=['GET', 'POST'])
def requestKey():
##THESE VALUES ARE USED IN HTML FILES##
name = session.get('name', '')
room = session.get('room', '')
key = allRooms[room]
form = RequestKeyForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
return redirect(url_for('.requestKey'))
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
return render_template('requestKey.html', form = form, key=key, room=room, username=name)
@main.route('/error')
def error():
error = session.get('error', '')
if error == 'invalid_key':
return render_template('errorPage.html', note='Room key is incorrect.')
return render_template('errorPage.html')
@main.route('/aes', methods=['GET', 'POST'])
def aes():
message = None
if request.method == 'POST':
key = request.form['_key']
iv = request.form['_iv']
text = request.form['_text']
result = decrypt(key, iv, text)
resp = make_response(result)
resp.headers['Content-Type'] = "application/json"
return resp
@main.route('/decrypt2', methods=['GET', 'POST'])
def decrypt2():
message = None
if request.method == 'POST':
data = request.form['data']
roomkey = request.form['key']
result = decrypt2(data, roomkey)
resp = make_response(result)
resp.headers['Content-Type'] = "application/json"
return resp
def decrypt(key, iv, encrypted_text):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decrypted_text = aes.decrypt(encrypted_text_bytes)
return decrypted_text.decode('ascii')
def decrypt2(data, roomkey):
data = b64decode(data)
byte = PBKDF2( roomkey.encode("utf-8"), "1234salt".encode("utf-8"), 48, 128)
iv = byte[0:16]
key = byte[16:48]
cipher = AES.new(key, AES.MODE_CBC, iv)
text = cipher.decrypt(data)
text = text[:-text[-1]].decode("utf-8")
return text
|
py | 1a39533807c48e3dbeed95a11fa8dbf1746a1d68 | from matrix import utils
from matrix import corpus |
py | 1a3953dd34e86e7c40d8719b3523768bad2568ba | ''' Utils module '''
import os
import re
import json
import shutil
import subprocess
from colorama import Fore, Style, init
from lxml import etree
import __main__
from modules.utils.exceptions import NotCreatedDescribeLog
from modules.utils.models import MetadataType, MetadataTypeFromJSON
init(autoreset=True)
INFO_TAG = f'{Fore.YELLOW}[INFO]{Fore.RESET}'
ERROR_TAG = f'{Fore.RED}[ERROR]{Fore.RESET}'
WARNING_TAG = f'{Fore.MAGENTA}[WARNING]{Fore.RESET}'
FATAL_LINE = f'{Fore.RED}[FATAL]'
SUCCESS_LINE = f'{Fore.GREEN}[SUCCESS]'
WARNING_LINE = f'{Fore.MAGENTA}[WARNING]'
API_VERSION = '44.0'
DELTA_FOLDER = 'srcToDeploy'
SOURCE_FOLDER = 'src'
TEMPLATE_FILE = "expansionPanels.html"
PWD = os.path.dirname(os.path.realpath(__main__.__file__))
FOLDER_PATTERN = ['│ ', ' ']
FILE_PATTERN = ['├─ ', '└─ ']
ENV_PROJECT_ID = 'gitMergeRequestTargetProjectId'
ENV_GITLAB_ACCESS_TOKEN = 'GITLAB_ACCESS_TOKEN'
def write_file(folder, filename, content, print_log=False):
''' Writes into a file, creating the folders if not exists '''
if print_log:
print(f'\t- Writting \'{filename}\' in \'{folder}\''
f'{Style.NORMAL}')
if not os.path.exists(folder):
os.makedirs(folder)
with open(f'{folder}/{filename}', 'w', encoding='utf-8') as output_file:
output_file.write(content)
def call_subprocess(command, verbose=True):
''' Calls subprocess, returns output and return code,
if verbose flag is active it will print the output '''
try:
stdout = subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True).decode('utf-8')
if verbose:
print_output(f'{Style.DIM}{stdout}{Style.NORMAL}')
return stdout, 0
except subprocess.CalledProcessError as exc:
output = exc.output.decode('utf-8')
returncode = exc.returncode
if verbose:
print(f'{ERROR_TAG} Subprocess returned non-zero exit '
f'status {returncode}')
print_output(output, color=Fore.RED)
return output, returncode
def pprint_xml(xml, declaration=True):
''' Pretty print the passed xml '''
return etree.tostring(xml, pretty_print=True,
encoding='utf-8',
xml_declaration=declaration).decode('utf-8')
def print_apiname(apiname, package_name):
''' Print a warning message '''
indent = ' ' * 3
print(f'{Style.DIM}{indent}▶︎ {Fore.GREEN}[{package_name}] {Fore.MAGENTA}'
f'{apiname} {Fore.RESET}')
def print_differences(child_xml_name, added, modified, erased):
''' Pretty print differences '''
if added or modified or erased:
added_string = __difference_line(f'Added ({len(added)})',
sorted(added))
modified_string = __difference_line(f'Modified ({len(modified)})',
sorted(modified))
erased_string = __difference_line(f'Erased ({len(erased)})',
sorted(erased))
indent = ' ' * 6
print(f'{Style.DIM}{indent}► {Fore.MAGENTA}{child_xml_name}'
f'{Fore.RESET}:\n{added_string}{modified_string}{erased_string}',
end='')
def print_warning(message):
''' Print a warning message '''
indent = ' ' * 6
print(f'{Style.DIM}{indent}⚠ {Fore.MAGENTA}{message}{Fore.RESET}')
def __difference_line(name, values):
''' Returns a formated string with the Difference type and values '''
if values:
indent = ' ' * 9
return f'{indent}▹ {Fore.YELLOW}{name}{Fore.RESET}: {values}\n'
return ''
def print_output(output, color='', tab_level=1):
''' Prints output in the color passed '''
formated = '\t' * tab_level + output.replace('\n', '\n' + '\t' * tab_level)
print(f'{color}{formated}{Fore.RESET}')
def truncate_string(value, size, fill=False):
''' Truncates a tring to passed size '''
string_value = str(value)
if len(string_value) > size:
return string_value[:size].strip() + (string_value[size:] and '...')
if fill:
return string_value.ljust(size, ' ')
return string_value
def copy_parents(src, dest_folder, dir_offset=0):
''' Copies src tree into dest, offset (optional) omits n
folders of the src path'''
if src.endswith('-meta.xml'): # if its meta file, erase meta part
src = src[:-len('-meta.xml')]
prev_offset = (0 if dir_offset == 0 else
src.replace('/', '%', dir_offset - 1).find('/') + 1)
post_offset = src.rfind('/')
src_dirs = '' if post_offset == -1 else src[prev_offset:post_offset]
src_filename = src[post_offset + 1:]
os.makedirs(f'{dest_folder}/{src_dirs}', exist_ok=True)
dest_file_path = f'{dest_folder}/{src_dirs}/{src_filename}'
copy_file(src, dest_file_path, True)
copy_file(f'{src}-meta.xml', f'{dest_file_path}-meta.xml', True)
def copy_file(src, dest, handle_errors):
''' Copy a file from source to dest, if handle flag is activated,
an an exception is launch while trying to copy it will not fail '''
try:
shutil.copy(src, dest)
except Exception as exception: # noqa # pylint: disable=W0703,W0612
if not handle_errors:
raise exception
else:
pass # TODO log verbose level
def get_xml_names(filepath):
''' Extracts the xml names from a describe '''
if not os.path.isfile(filepath):
raise NotCreatedDescribeLog(filepath)
with open(filepath, 'r') as file:
try:
data = json.load( file )
isJSON = True
except:
data = file.read()
isJSON = False
if isJSON:
dictionary = getXmlNamesFromJSON( data )
else:
dictionary = getXmlNamesFromLog( data )
return dictionary
def getXmlNamesFromJSON(data):
dictionary = {}
for metadataInfo in data[ 'metadataObjects' ]:
inFolder = metadataInfo[ 'inFolder' ]
hasMetadata = metadataInfo[ 'metaFile' ]
childObjects = metadataInfo[ 'childXmlNames' ] if 'childXmlNames' in metadataInfo else []
suffix = metadataInfo[ 'suffix' ] if 'suffix' in metadataInfo else ''
xmlName = metadataInfo[ 'xmlName' ]
dirName = metadataInfo[ 'directoryName' ]
dictKey = dirName
if 'territory2Models' == dirName and 'territory2Model' != suffix:
dictKey = suffix
dictionary[ dictKey ] = MetadataTypeFromJSON( xmlName, dirName, suffix, hasMetadata, inFolder, childObjects )
return dictionary
def getXmlNamesFromLog( data ):
regex_string = (r'\*+\nXMLName: ([a-zA-Z0-9]+)\nDirName: ([a-zA-Z0-9]+)\nSuffix:'
r' ([a-zA-Z0-9]+)\nHasMetaFile: ([a-zA-Z]+)\nInFolder:'
r' ([a-zA-Z]+)\nChildObjects: (?:([a-zA-Z,]+),|)\*+')
xml_names = re.findall(regex_string, data, re.MULTILINE)
dictionary = dict()
for (xml_name, dir_name, suffix, has_metadata,
in_folder, child_objects) in xml_names:
in_folder = castToBoolean( in_folder )
has_metadata = castToBoolean( has_metadata )
dict_key = dir_name
if 'territory2Models' == dir_name and 'territory2Model' != suffix:
dict_key = suffix
dictionary[dict_key] = MetadataType(xml_name, dir_name, suffix,
has_metadata, in_folder,
child_objects)
return dictionary
def castToBoolean( value ):
boolValue = False
if 'true' == value:
boolValue = True
return boolValue
def tree(path, do_output=True, print_hidden=False, max_depth=100, margin=1):
"""Print file and directory tree starting at path.
By default, it prints upto a depth of 100 and doesn't print hidden files,
ie. files whose name begin with a '.'. It can be modified to only return
the count of files and directories, and not print anything.
Returns the tuple of number of files and number of directories
"""
def _tree(path, depth, margin, output):
file_count, directory_count = 0, 0
files = sorted((os.path.join(path, filename)
for filename in os.listdir(path)
if print_hidden or not filename.startswith('.')),
key=lambda s: s.lower())
files_count = len(files)
for i, filepath in enumerate(files, start=1):
# Print current file, based on previously gathered info
if do_output:
folder_lines = ''.join(FOLDER_PATTERN[folder]
for folder in parent_folders)
corner = FILE_PATTERN[i == files_count]
file_name = os.path.basename(filepath)
margin_str = '\t' * margin
output += f'{margin_str}{folder_lines}{corner}{file_name}\n'
# Recurse if we find a new subdirectory
if os.path.isdir(filepath) and depth < max_depth:
# Append whether current directory is last in current list
parent_folders.append(i == files_count)
# Print subdirectory and get numbers
output, subdir_file_count, subdir_directory_count = \
_tree(os.path.join(filepath), depth + 1, margin, output)
# Back in current directory, remove the newly added directory
parent_folders.pop()
# Update counters
file_count += subdir_file_count
directory_count += subdir_directory_count + 1
elif os.path.isdir(filepath):
directory_count += 1
else:
file_count += 1
return output, file_count, directory_count
parent_folders = []
output, file_count, directory_count = _tree(path, 1, margin, '')
output += f'\n\t{file_count} files, {directory_count} directories\n'
print(f'{Style.DIM}{output}{Style.NORMAL}')
def remove_file(file_path):
''' Removes file if exists '''
if os.path.exists(file_path):
os.remove(file_path)
def check_exist(path):
''' Detects if a file exists '''
if not os.path.exists(path):
print(f"{INFO_TAG} The path {path} didn't exists.")
return False
return True
def print_key_value_list(top_message, items):
''' Prints a key value list '''
message = f'{top_message}\n'
for key, value in items:
message += f'{key_value_list(key, value)}\n'
print(message)
def key_value_list(key, value):
''' Returns a pretty formated list, with key in cyan '''
return f'\t- {Fore.CYAN}{key}{Fore.RESET}: {value}'
def get_first_set_value(values):
''' Extracts the first value of the passed set '''
value = values.pop()
values.add(value)
return value
|
py | 1a39541438b0a5377534ea98a3ccb62762f73cb7 | # Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2021 Taras Lykhenko, Rafael Soares
from anna.anna_pb2 import (
# Anna's lattice types as an enum
LWW, SET, ORDERED_SET, SINGLE_CAUSAL, MULTI_CAUSAL, PRIORITY, WREN,
# Serialized representations of Anna's lattices
LWWValue, SetValue, SingleKeyCausalValue, MultiKeyCausalValue, PriorityValue, MultiKeyWrenValue
)
class Lattice:
def __init__(self):
raise NotImplementedError
def __str__(self):
return str(self.reveal())
def __eq__(self, other):
if other is None:
return False
if type(other) != type(self):
return False
return self.reveal() == other.reveal()
def reveal(self):
'''
The reveal method returns an unwrapped version of the data underlying
data structure stored by the lattice.
'''
raise NotImplementedError
def assign(self, value):
'''
Assigns a new value to the lattice -- this must be the same as the type
expected when creating an instance of a particular lattice.
'''
raise NotImplementedError
def merge(self, other):
'''
Merge two lattices into one. How the merge function works is contingent
on what the underlying data structure us.
'''
raise NotImplementedError
def serialize(self):
'''
Serializes the underlying data structure, including metadata relevant
to the lattice, into a protobuf and returns a protobuf object along
with an enum tag indicating the type of this lattice.
'''
raise NotImplementedError
class LWWPairLattice(Lattice):
def __init__(self, timestamp, value, promise = 0):
if type(timestamp) != int or type(value) != bytes or type(promise) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = timestamp
self.val = value
self.promise = promise
def reveal(self):
return self.val
def assign(self, value):
if type(value) == str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != int \
or type(value[2]) != bytes or type(value[1]) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = value[0]
self.promise = value[1]
self.val = value[2]
def merge(self, other):
if other.ts > self.ts:
return other
else:
return self
def serialize(self):
res = LWWValue()
res.timestamp = self.ts
res.value = self.val
res.promise = self.promise
return res, LWW
class WrenLattice(Lattice):
def __init__(self, timestamp, value, promise = 0):
if type(timestamp) != int or type(value) != bytes or type(promise) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = timestamp
self.val = value
self.promise = promise
def reveal(self):
return self.val
def assign(self, value):
if type(value) == str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != int \
or type(value[2]) != bytes or type(value[1]) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = value[0]
self.promise = value[1]
self.val = value[2]
def merge(self, other):
if other.ts > self.ts:
return other
else:
return self
def serialize(self):
res = LWWValue()
res.timestamp = self.ts
res.value = self.val
res.promise = self.promise
return res, WREN
class SetLattice(Lattice):
def __init__(self, value=set()):
if type(value) != set:
raise ValueError('SetLattice can only be formed from a set.')
self.val = value
def reveal(self):
return self.val
def assign(self, value):
if type(value) != set:
raise ValueError('SetLattice can only be formed from a set.')
self.val = value
def merge(self, other):
if type(other) != SetLattice:
raise ValueError('Cannot merge SetLattice with invalid type ' +
str(type(other)) + '.')
new_set = set()
for v in other.val:
new_set.insert(v)
for v in self.val:
new_set.insert(v)
return SetLattice(new_set)
def serialize(self):
res = SetValue()
for v in self.val:
if type(v) != bytes:
raise ValueError('Unsupported type %s in SetLattice!' %
(str(type(v))))
res.values.append(v)
return res, SET
# A wrapper class that implements some convenience OrderedSet operations on
# top of a list. # We use this because it is way cheaper to deserialize into,
# at the cost of having expensive reordering operations (e.g. random insert),
# which we expect to be rare for our use cases (we will almost always be
# inserting at the end).
class ListBasedOrderedSet:
# Preconditions: iterable's elements are unique and sorted ascending.
# Behaviour is undefined if it is not.
def __init__(self, iterable=[]):
self.lst = []
for val in iterable:
self.insert(val)
# Inserts a value, maintaining sorted order.
def insert(self, value):
# Microoptimization for the common case.
if len(self.lst) == 0:
self.lst.append(value)
elif value > self.lst[-1]:
self.lst.append(value)
else:
idx, present = self._index_of(value)
if not present:
self.lst.insert(idx, value)
# Finds the index of an element, or where to insert it if you want to
# maintain sorted order.
# Returns (int index, bool present).
# E.g. _index_of(lst, 'my-value') -> (42, true)
# => lst[42] = 'my-value'
# _index_of(lst, 'my-value') -> (42, false)
# => lst[41] < 'my-value' < lst[42]
def _index_of(self, value):
low = 0
high = len(self.lst)
while low < high:
middle = low + int((high - low) / 2)
pivot = self.lst[middle]
if value == pivot:
return (middle, True)
elif value < pivot:
high = middle
elif pivot < value:
low = middle + 1
return (low, False)
class OrderedSetLattice(Lattice):
def __init__(self, value=ListBasedOrderedSet()):
if type(value) != ListBasedOrderedSet:
raise ValueError('OrderedSetLattice can only be formed from a '
+ 'ListBasedOrderedSet.')
self.val = value
def reveal(self):
return self.val.lst
def assign(self, value):
if type(value) != ListBasedOrderedSet:
raise ValueError('OrderedSetLattice can only be formed from a' +
' ListBasedOrderedSet.')
self.val = value
def merge(self, other):
if type(other) != OrderedSetLattice:
raise ValueError('Cannot merge OrderedSetLattice with type ' +
str(type(other)) + '.')
# Merge the two sorted lists by lockstep merge.
# Note that reconstruction is faster than in-place merge.
new_lst = []
other = other.reveal().lst
us = self.val.lst
i, j = 0, 0 # Earliest unmerged indices.
while i < len(us) or j < len(other):
if i == len(us):
new_lst.extend(other[j:])
break
elif j == len(other):
new_lst.extend(us[i:])
break
else:
a = us[i]
b = other[j]
if a == b:
new_lst.append(a)
i += 1
j += 1
elif a < b:
new_lst.append(a)
i += 1
elif b < a:
new_lst.append(b)
j += 1
return OrderedSetLattice(ListBasedOrderedSet(new_lst))
def serialize(self):
res = SetValue()
res.values.extend(self.val.lst)
return res, ORDERED_SET
class MaxIntLattice(Lattice):
def __init__(self, value):
if type(value) != int:
raise ValueError('MaxIntLattice only accepts integers.')
self.value = value
def reveal(self):
return self.value
def assign(self, value):
if type(value) != int:
raise ValueError('MaxIntLattice only accepts integers.')
self.value = value
def merge(self, other):
if type(other) != MaxIntLattice:
raise ValueError('Cannot merge MaxIntLattice with type ' +
str(type(other)) + '.')
if other.value > self.value:
self.value = other.value
class MapLattice(Lattice):
def __init__(self, mp):
if type(mp) != dict:
raise ValueError('MapLattice only accepts dict data structures.')
self.mp = mp
def reveal(self):
return self.mp
def assign(self, mp):
if type(mp) != dict:
raise ValueError('MapLattice only accepts dict data structures.')
self.mp = mp
def merge(self, other):
if type(other) != MapLattice:
raise ValueError('Cannot merge MapLattice with type ' +
str(type(other)) + '.')
for key in other.mp.keys:
if key in self.mp:
if (not isinstance(self.mp[key], Lattice) or not
isinstance(other.mp[key], Lattice)):
raise ValueError('Cannot merge a MapLattice with values' +
' that are not lattice types.')
self.mp[key].merge(other.mp[key])
else:
self.mp[key] = other.mp[key]
def copy(self):
return MapLattice(self.mp.copy())
class VectorClock(MapLattice):
def __init__(self, mp, deserialize=False):
if type(mp) != dict:
raise ValueError('VectorClock must be a dict, not {type(mp)}.')
if deserialize:
self.mp = VectorClock._deserialize(mp)
else:
VectorClock._validate_vc(mp)
self.mp = mp
def _deserialize(mp):
result = {}
for key in mp:
if type(mp[key]) != int:
raise ValueError('Cannot deserialize VectorClock from'
+ ' non-integer values.')
result[key] = MaxIntLattice(mp[key])
return result
def _validate_vc(mp):
for val in mp.values():
if type(val) != MaxIntLattice:
raise ValueError(('VectorClock values must be MaxIntLattices,'
+ ' not %s.') % str(type(val)))
def assign(self, mp):
if type(mp) != dict:
raise ValueError('VectorClock must be a dict.')
VectorClock._validate_vc(mp)
self.mp = mp
def update(self, key, count):
if key in self.mp:
lattice = MaxIntLattice(count)
self.mp[key].merge(lattice)
def serialize(self, pobj):
for key in self.mp:
pobj[key] = self.mp[key].reveal()
class SingleKeyCausalLattice(Lattice):
def __init__(self, vector_clock, value):
if type(vector_clock) != VectorClock:
raise ValueError('Vector clock of SingleKeyCausalLattice must be a'
+ ' VectorClock.')
if type(value) != SetLattice:
raise ValueError('Value of SingleKeyCausalLattice must be a'
+ ' SetLattice.')
self.vector_clock = vector_clock
self.value = value
def reveal(self):
return list(self.value.reveal())
def assign(self, value):
if type(value) != SetLattice:
raise ValueError('Value of SingleKeyCausalLattice must be a'
+ ' SetLattice.')
self.value = value
def merge(self, other):
if type(other) != SingleKeyCausalLattice:
raise ValueError('Cannot merge SingleKeyCausalLattice with type ' +
str(type(other)) + '.')
previous = self.vector_clock.copy()
self.vector_clock.merge(other.vector_clock)
if self.vector_clock == other.vector_clock:
# The other version dominates this version.
self.value = other.value
elif self.vector_clock != previous:
# The versions are concurrent.
self.value.merge(other.value)
else:
# This version dominates, so we do nothing.
pass
def serialize(self):
skcv = SingleKeyCausalValue()
# Serialize the vector clock for this particular lattice by adding each
# key-counter pair.
self.vector_clock.serialize(skcv.vector_clock)
# Add the value(s) stored by this lattice.
for v in self.value:
skcv.values.add(v)
return skcv, SINGLE_CAUSAL
class MultiKeyCausalLattice(Lattice):
def __init__(self, vector_clock, dependencies, value):
if type(vector_clock) != VectorClock:
raise ValueError('Vector clock of MultiKeyCausalLattice must be a'
+ ' VectorClock.')
if type(dependencies) != MapLattice:
raise ValueError('Dependency set of MultiKeyCausalLattice must be'
+ ' a MapLattice.')
if type(value) != SetLattice:
raise ValueError('Value of MultiKeyCausalLattice must be a'
+ ' SetLattice.')
self.vector_clock = vector_clock
self.dependencies = dependencies
self.value = value
def reveal(self):
return list(self.value.reveal())
def assign(self, value):
if type(value) != SetLattice:
raise ValueError('Value of MultiKeyCausalLattice must be a'
+ ' SetLattice.')
self.value = value
def merge(self, other):
if type(other) != MultiKeyCausalLattice:
raise ValueError('Cannot merge MultiKeyCausalLattice with type ' +
str(type(other)) + '.')
previous = self.vector_clock.copy()
self.vector_clock.merge(other.vector_clock)
if self.vector_clock == other.vector_clock:
# other version dominates this version
self.dependencies = other.dependencies
self.value = other.value
elif self.vector_clock != previous:
# versions are concurrent
self.dependencies.merge(other.dependencies)
self.value.merge(other.value)
else:
# this version dominates, so we do nothing
pass
def serialize(self):
mkcv = MultiKeyCausalValue()
# Serialize the vector clock for this particular lattice by adding each
# key-counter pair.
self.vector_clock.serialize(mkcv.vector_clock)
# Serialize the vector clocks for each of the keys this lattice depends
# on.
for key in self.dependencies:
kv = mkcv.add_dependences()
kv.key = key
self.dependencies[key].serialize(kv.vector_clock)
# Add the value(s) stored by this lattice.
for v in self.value:
mkcv.values.add(v)
return mkcv, MULTI_CAUSAL
class PriorityLattice(Lattice):
def __init__(self, priority, value):
if type(priority) != float or type(value) != bytes:
raise ValueError('PriorityLattice must be a double-bytes pair.')
self.priority = priority
self.value = value
def reveal(self):
return self.value
def assign(self, value):
if type(value) != str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != float or type(value[1]) != bytes:
raise ValueError('PriorityLattice must be a double-bytes pair.')
self.priority = value[0]
self.value = value[1]
def merge(self, other):
if other.priority < self.priority:
return other
else:
return self
def serialize(self):
res = PriorityValue()
res.priority = self.priority
res.value = self.value
return res, PRIORITY
|
py | 1a39542b481588bc8fe3f8accd1d1048f8d05a05 | import random
def shuffle(string):
temporarylist = list(string)
random.shuffle(temporarylist)
return ''.join(temporarylist)
uppercaseLetter1=chr(random.randint(65,90))
lowercaseLetter1=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter2=chr(random.randint(65,90))
lowercaseLetter2=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter3=chr(random.randint(65,90))
lowercaseLetter3=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter4=chr(random.randint(65,90))
lowercaseLetter4=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter5=chr(random.randint(65,90))
lowercaseLetter5=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter6=chr(random.randint(65,90))
lowercaseLetter6=chr(random.randint(ord('a'), ord('z')))
uppercaseLetter7=chr(random.randint(65,90))
lowercaseLetter7=chr(random.randint(ord('a'), ord('z')))
number_1 = random.randint(0,9)
number_2 = random.randint(0,9)
number_3 = random.randint(0,9)
number_4 = random.randint(0,9)
number_5 = random.randint(0,9)
password = uppercaseLetter7+lowercaseLetter1+uppercaseLetter1+lowercaseLetter2+uppercaseLetter2+lowercaseLetter3+uppercaseLetter3+lowercaseLetter4+uppercaseLetter4+lowercaseLetter5+uppercaseLetter5+lowercaseLetter6+uppercaseLetter6+lowercaseLetter7+str(number_1)+str(number_2)+str(number_3)+str(number_4)+str(number_5)
password = shuffle(password)
print(password) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.