max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
netbox/extras/api/nested_serializers.py | TheFlyingCorpse/netbox | 4,994 | 11071626 | <reponame>TheFlyingCorpse/netbox<filename>netbox/extras/api/nested_serializers.py<gh_stars>1000+
from rest_framework import serializers
from extras import choices, models
from netbox.api import ChoiceField, WritableNestedSerializer
from netbox.api.serializers import NestedTagSerializer
from users.api.nested_serializers import NestedUserSerializer
__all__ = [
'NestedConfigContextSerializer',
'NestedCustomFieldSerializer',
'NestedCustomLinkSerializer',
'NestedExportTemplateSerializer',
'NestedImageAttachmentSerializer',
'NestedJobResultSerializer',
'NestedJournalEntrySerializer',
'NestedTagSerializer', # Defined in netbox.api.serializers
'NestedWebhookSerializer',
]
class NestedWebhookSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:webhook-detail')
class Meta:
model = models.Webhook
fields = ['id', 'url', 'display', 'name']
class NestedCustomFieldSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:customfield-detail')
class Meta:
model = models.CustomField
fields = ['id', 'url', 'display', 'name']
class NestedCustomLinkSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:customlink-detail')
class Meta:
model = models.CustomLink
fields = ['id', 'url', 'display', 'name']
class NestedConfigContextSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:configcontext-detail')
class Meta:
model = models.ConfigContext
fields = ['id', 'url', 'display', 'name']
class NestedExportTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:exporttemplate-detail')
class Meta:
model = models.ExportTemplate
fields = ['id', 'url', 'display', 'name']
class NestedImageAttachmentSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:imageattachment-detail')
class Meta:
model = models.ImageAttachment
fields = ['id', 'url', 'display', 'name', 'image']
class NestedJournalEntrySerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:journalentry-detail')
class Meta:
model = models.JournalEntry
fields = ['id', 'url', 'display', 'created']
class NestedJobResultSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='extras-api:jobresult-detail')
status = ChoiceField(choices=choices.JobResultStatusChoices)
user = NestedUserSerializer(
read_only=True
)
class Meta:
model = models.JobResult
fields = ['url', 'created', 'completed', 'user', 'status']
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_codecmaps_cn.py | CEOALT1/RefindPlusUDK | 2,757 | 11071636 | #!/usr/bin/env python
#
# test_codecmaps_cn.py
# Codec mapping tests for PRC encodings
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class TestGB2312Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb2312'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-CN.TXT'
class TestGBKMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gbk'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/' \
'MICSFT/WINDOWS/CP936.TXT'
class TestGB18030Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb18030'
mapfileurl = 'http://source.icu-project.org/repos/icu/data/' \
'trunk/charset/data/xml/gb-18030-2000.xml'
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
orator/dbal/column_diff.py | wjzero/orator | 1,484 | 11071637 | # -*- coding: utf-8 -*-
from .identifier import Identifier
class ColumnDiff(object):
def __init__(
self, old_column_name, column, changed_properties=None, from_column=None
):
self.old_column_name = old_column_name
self.column = column
self.changed_properties = changed_properties
self.from_column = from_column
def has_changed(self, property_name):
return property_name in self.changed_properties
def get_old_column_name(self):
return Identifier(self.old_column_name)
|
lldb/packages/Python/lldbsuite/test/lldbplatformutil.py | uber-common/llvm-project | 456 | 11071641 | """ This module contains functions used by the test cases to hide the
architecture and/or the platform dependent nature of the tests. """
from __future__ import absolute_import
# System modules
import itertools
import re
import subprocess
import sys
import os
# Third-party modules
import six
from six.moves.urllib import parse as urlparse
# LLDB modules
from . import configuration
import lldb
import lldbsuite.test.lldbplatform as lldbplatform
def check_first_register_readable(test_case):
arch = test_case.getArchitecture()
if arch in ['x86_64', 'i386']:
test_case.expect("register read eax", substrs=['eax = 0x'])
elif arch in ['arm', 'armv7', 'armv7k']:
test_case.expect("register read r0", substrs=['r0 = 0x'])
elif arch in ['aarch64', 'arm64', 'arm64e', 'arm64_32']:
test_case.expect("register read x0", substrs=['x0 = 0x'])
elif re.match("mips", arch):
test_case.expect("register read zero", substrs=['zero = 0x'])
elif arch in ['s390x']:
test_case.expect("register read r0", substrs=['r0 = 0x'])
elif arch in ['powerpc64le']:
test_case.expect("register read r0", substrs=['r0 = 0x'])
else:
# TODO: Add check for other architectures
test_case.fail(
"Unsupported architecture for test case (arch: %s)" %
test_case.getArchitecture())
def _run_adb_command(cmd, device_id):
device_id_args = []
if device_id:
device_id_args = ["-s", device_id]
full_cmd = ["adb"] + device_id_args + cmd
p = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def target_is_android():
if not hasattr(target_is_android, 'result'):
triple = lldb.DBG.GetSelectedPlatform().GetTriple()
match = re.match(".*-.*-.*-android", triple)
target_is_android.result = match is not None
return target_is_android.result
def android_device_api():
if not hasattr(android_device_api, 'result'):
assert configuration.lldb_platform_url is not None
device_id = None
parsed_url = urlparse.urlparse(configuration.lldb_platform_url)
host_name = parsed_url.netloc.split(":")[0]
if host_name != 'localhost':
device_id = host_name
if device_id.startswith('[') and device_id.endswith(']'):
device_id = device_id[1:-1]
retcode, stdout, stderr = _run_adb_command(
["shell", "getprop", "ro.build.version.sdk"], device_id)
if retcode == 0:
android_device_api.result = int(stdout)
else:
raise LookupError(
">>> Unable to determine the API level of the Android device.\n"
">>> stdout:\n%s\n"
">>> stderr:\n%s\n" %
(stdout, stderr))
return android_device_api.result
def match_android_device(device_arch, valid_archs=None, valid_api_levels=None):
if not target_is_android():
return False
if valid_archs is not None and device_arch not in valid_archs:
return False
if valid_api_levels is not None and android_device_api() not in valid_api_levels:
return False
return True
def finalize_build_dictionary(dictionary):
if target_is_android():
if dictionary is None:
dictionary = {}
dictionary["OS"] = "Android"
dictionary["PIE"] = 1
return dictionary
def getHostPlatform():
"""Returns the host platform running the test suite."""
# Attempts to return a platform name matching a target Triple platform.
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
return 'windows'
elif sys.platform.startswith('darwin'):
return 'darwin'
elif sys.platform.startswith('freebsd'):
return 'freebsd'
elif sys.platform.startswith('netbsd'):
return 'netbsd'
else:
return sys.platform
def getDarwinOSTriples():
return ['darwin', 'macosx', 'ios', 'watchos', 'tvos', 'bridgeos']
def getPlatform():
"""Returns the target platform which the tests are running on."""
platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]
if platform.startswith('freebsd'):
platform = 'freebsd'
elif platform.startswith('netbsd'):
platform = 'netbsd'
return platform
def platformIsDarwin():
"""Returns true if the OS triple for the selected platform is any valid apple OS"""
return getPlatform() in getDarwinOSTriples()
def findMainThreadCheckerDylib():
if not platformIsDarwin():
return ""
if getPlatform() in lldbplatform.translate(lldbplatform.darwin_embedded):
return "/Developer/usr/lib/libMainThreadChecker.dylib"
with os.popen('xcode-select -p') as output:
xcode_developer_path = output.read().strip()
mtc_dylib_path = '%s/usr/lib/libMainThreadChecker.dylib' % xcode_developer_path
if os.path.isfile(mtc_dylib_path):
return mtc_dylib_path
return ""
class _PlatformContext(object):
"""Value object class which contains platform-specific options."""
def __init__(self, shlib_environment_var, shlib_prefix, shlib_extension):
self.shlib_environment_var = shlib_environment_var
self.shlib_prefix = shlib_prefix
self.shlib_extension = shlib_extension
def createPlatformContext():
if platformIsDarwin():
return _PlatformContext('DYLD_LIBRARY_PATH', 'lib', 'dylib')
elif getPlatform() in ("freebsd", "linux", "netbsd"):
return _PlatformContext('LD_LIBRARY_PATH', 'lib', 'so')
else:
return None
def hasChattyStderr(test_case):
"""Some targets produce garbage on the standard error output. This utility function
determines whether the tests can be strict about the expected stderr contents."""
if match_android_device(test_case.getArchitecture(), ['aarch64'], range(22, 25+1)):
return True # The dynamic linker on the device will complain about unknown DT entries
return False
|
tests/testrun.py | yeti-threatintel/yeti | 1,250 | 11071677 | import sys
from os import path
from datetime import timedelta
from mongoengine import connect
YETI_ROOT = path.normpath(path.dirname(path.dirname(path.abspath(__file__))))
sys.path.append(YETI_ROOT)
from core.config.config import yeti_config
from core.entities.malware import MalwareFamily, Malware
from core.indicators import Regex, Indicator
from core.database import Link
from core.entities import TTP, Exploit, ExploitKit
from core.observables import Observable
from core.observables import Tag
from core.exports import Export, ExportTemplate
## Clean slate
db = connect("yeti", host=yeti_config.mongodb.host)
db.drop_database("yeti")
## Populate database with initial values
mailer = MalwareFamily("mailer").save()
banker = MalwareFamily("banker").save()
worm = MalwareFamily("worm").save()
ransomware = MalwareFamily("ransomware").save()
backdoor = MalwareFamily("backdoor").save()
stealer = MalwareFamily("stealer").save()
passwordstealer = MalwareFamily("passwordstealer").save()
rootkit = MalwareFamily("rootkit").save()
trojan = MalwareFamily("trojan").save()
dropper = MalwareFamily("dropper").save()
# Malware
e = ExploitKit(name="Angler").save()
e = ExploitKit(name="Neutrino").save()
e = Malware(name="Pony").save()
e.family = dropper
e.save()
e = ExploitKit(name="Magnitude").save()
e = ExploitKit(name="Fiesta").save()
e = ExploitKit(name="Nuclear").save()
e = Malware(name="Asprox").save()
e.family = dropper
e.save()
e = Malware(name="Neverquest").save()
e.family = trojan
e.save()
e = ExploitKit(name="Sweet Orange").save()
e = Malware(name="DarkComet").save()
e.family = trojan
e.save()
e = Malware(name="Upatre").save()
e.family = trojan
e.save()
e = ExploitKit(name="RIG").save()
e = Malware(name="CryptoWall").save()
e.family = ransomware
e.save()
e = Malware(name="Dridex").save()
e.family = trojan
e.save()
e = ExploitKit(name="BlackHole").save()
e = Malware(name="AlienSpy").save()
e.family = trojan
e.save()
e = Malware(name="Andromeda").save()
e.family = dropper
e.save()
e = Malware(name="Dyre").save()
e.family = trojan
e.save()
e = Exploit(name="CVE-2015-3113").save()
e = Malware(name="Teslacrypt").save()
e.family = ransomware
e.save()
e = Malware(name="Alphacrypt").save()
e.family = ransomware
e.save()
e = Malware(name="Locky").save()
e.family = ransomware
e.save()
t1 = Tag.get_or_create(name="zeus").add_produces(["crimeware", "banker", "malware"])
t2 = Tag.get_or_create(name="banker").add_produces(["crimeware", "malware"])
t3 = Tag.get_or_create(name="c2")
t3.add_replaces(["c&c", "cc"])
Tag.get_or_create(name="crimeware").add_produces("malware")
et = ExportTemplate(name="Default")
et.template = "{{ obs.value }}\n"
et.save()
et = ExportTemplate(name="Bluecoat")
et.template = """define category cert_blocklist
{% for obs in elements %}{{ obs.value }}
{% endfor %}end
"""
et.save()
Export(
name="TestExport",
acts_on="Url",
description="Test description",
frequency=timedelta(hours=1),
include_tags=[t1, t2],
template=et,
).save()
url = Observable.add_text("hxxp://zeuscpanel.com/gate.php")
url.tag(["zeus", "banker", "cc", "c2"])
print(url.tags)
# print url.find_tags()
# import pdb; pdb.set_trace()
## Create some instances of malware & co
bartalex = Malware.get_or_create(name="Bartalex")
bartalex.family = MalwareFamily.objects.get(name="dropper")
bartalex.killchain = "3"
bartalex.tags = ["bartalex"]
bartalex.save()
dridex = Malware.get_or_create(name="Dridex")
dridex.aliases = ["Cridex", "Drixed"]
dridex.family = MalwareFamily.objects.get(name="banker")
dridex.killchain = "7"
dridex.tags = ["dridex"]
dridex.save()
zeus = Malware.get_or_create(name="Zeus")
zeus.family = MalwareFamily.objects.get(name="banker")
zeus.killchain = "7"
zeus.tags = ["zeus"]
zeus.save()
## Create initial intelligence
# Indicators
bartalex_callback = Regex(name="Bartalex callback", pattern="/mg.jpg$")
bartalex_callback.description = "Bartalex [stage2] callback (extracted from macros)"
bartalex_callback.diamond = "capability"
bartalex_callback.location = "network"
bartalex_callback.save()
bartalex_callback.action(bartalex, "testrun", verb="indicates")
bartalex_callback2 = Regex(
name="Bartalex callback", pattern="/[0-9a-z]{7,8}/[0-9a-z]{7,8}.exe$"
)
bartalex_callback2.description = "Bartalex [stage2] callback (extracted from macros)"
bartalex_callback2.diamond = "capability"
bartalex_callback2.location = "network"
bartalex_callback2.save()
bartalex_callback2.action(bartalex, "testrun", verb="indicates")
bartalex_callback.action(dridex, "testrun", verb="hosts")
bartalex_callback2.action(dridex, "testrun", verb="hosts")
bartalex.action(dridex, "testrun", verb="drops")
zeus_callback = Regex(name="Zeus C2 check-in", pattern="/gate.php$")
zeus_callback.description = "ZeuS post-infection callback"
zeus_callback.diamond = "capability"
zeus_callback.location = "network"
zeus_callback.save()
zeus_callback.action(zeus, "testrun", verb="indicates")
# TTP
macrodoc = TTP(name="Macro-dropper")
macrodoc.killchain = "3"
macrodoc.description = "Macro-enabled MS Office document"
macrodoc.save()
bartalex.action(macrodoc, "testrun", verb="leverages")
bartalex.action(macrodoc, "testrun", verb="leverages")
bartalex.action(macrodoc, "testrun", verb="leverages")
bartalex_callback.action(macrodoc, "testrun", verb="seen in")
bartalex_callback2.action(macrodoc, "testrun", verb="seen in")
payload_download = TTP(name="Payload retrieval (HTTP)")
payload_download.killchain = "3"
payload_download.description = "Payload is retreived from an external URL"
payload_download.save()
macrodoc.action(payload_download, "testrun", verb="leverages")
bartalex_callback.action(payload_download, "testrun", verb="indicates")
bartalex_callback2.action(payload_download, "testrun", verb="indicates")
# add observables
o1 = Observable.add_text("172.16.58.3")
# o2 = Observable.add_text("http://soccersisters.net/mg.jpg")
o3 = Observable.add_text("http://agentseek.com/mg.jpg")
o4 = Observable.add_text("http://www.delianfoods.com/5t546523/lhf3f334f.exe")
o5 = Observable.add_text("http://sanoko.jp/5t546523/lhf3f334f.exe")
o6 = Observable.add_text("http://hrakrue-home.de/87yte55/6t45eyv.exe")
Link.connect(o6, bartalex_callback2)
Link.connect(o6, bartalex).add_history("testrun", "Queries")
Link.connect(o6, dridex).add_history("testrun", "Drops")
o7 = Observable.add_text("http://kdojinyhb.wz.cz/87yte55/6t45eyv.exe")
o8 = Observable.add_text("http://kdojinyhb.wz.cz/87yte55/6t45eyv.exe2")
o9 = Observable.add_text("http://zeuscpanel.com/gate.php")
o9.tag("zeus")
t1 = Observable.add_text("http://toto.com")
t2 = Observable.add_text("Http://tata.com")
t3 = Observable.add_text("hxxp://tomchop[.]me")
l = Link.connect(t1, t2)
print("Links", Link.objects(src=t1))
t2.delete()
print("Links", Link.objects(src=t1))
test = "http://soccersisters.net/mg.jpg"
for i in Indicator.objects():
if i.match(test):
for type, nodes in i.neighbors().items():
print(" {}".format(type))
for l, node in nodes:
print({"type": type, "link": l.info(), "node": node.info()})
print("Test with the following:")
print(o3.value)
print(o7.value)
print(t1.value)
|
p2p/cancellable.py | jin10086/py-evm | 137 | 11071681 | <gh_stars>100-1000
from typing import (
Awaitable,
TypeVar,
)
from cancel_token import CancelToken
class CancellableMixin:
cancel_token: CancelToken = None
_TReturn = TypeVar('_TReturn')
async def wait(self,
awaitable: Awaitable[_TReturn],
token: CancelToken = None,
timeout: float = None) -> _TReturn:
"""See wait_first()"""
return await self.wait_first(awaitable, token=token, timeout=timeout)
async def wait_first(self,
*awaitables: Awaitable[_TReturn],
token: CancelToken = None,
timeout: float = None) -> _TReturn:
"""
Wait for the first awaitable to complete, unless we timeout or the token chain is triggered.
The given token is chained with this service's token, so triggering either will cancel
this.
Returns the result of the first one to complete.
Raises TimeoutError if we timeout or OperationCancelled if the token chain is triggered.
All pending futures are cancelled before returning.
"""
if token is None:
token_chain = self.cancel_token
else:
token_chain = token.chain(self.cancel_token)
return await token_chain.cancellable_wait(*awaitables, timeout=timeout)
|
testing/utils/client.py | FerrySchuller/remme-core | 129 | 11071686 | <filename>testing/utils/client.py
"""
Provide utils clients use to send transaction.
It is vendor code from client library written in Python.
Core also has the same utilities, but it will be easier to substitute these function with
client library function (when it will be uploaded to PyPi), than particularly use core ones.
References:
- https://github.com/Remmeauth/remme-client-python/blob/develop/remme/remme_utils.py
- https://github.com/Remmeauth/remme-client-python/blob/develop/remme/remme_public_key_storage.py
"""
import hashlib
import binascii
import ed25519
from sawtooth_signing import create_context, CryptoFactory
from cryptography.hazmat.primitives import (
hashes,
serialization,
)
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import (
padding,
rsa,
)
def proto_error_msg(proto, errors):
return f"Invalid protobuf data of \"{proto.__name__}\", detailed: {errors}"
def sha256_hexdigest(data):
return hashlib.sha256(data.encode('utf-8') if isinstance(data, str) else data).hexdigest()
def sha512_hexdigest(data):
return hashlib.sha512(data.encode('utf-8') if isinstance(data, str) else data).hexdigest()
def generate_address(_family_name, _public_key_to):
return sha512_hexdigest(_family_name)[:6] + sha512_hexdigest(_public_key_to)[:64]
def generate_rsa_keys():
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend(),
)
public_key = private_key.public_key().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
return private_key, public_key
def generate_rsa_signature(data, private_key):
try:
data = data.encode('utf-8')
except AttributeError:
pass
return private_key.sign(data, padding.PKCS1v15(), hashes.SHA512())
def generate_ed25519_keys():
sk, vk = ed25519.create_keypair()
return sk, vk.to_bytes()
def generate_ed25519_signature(data, private_key):
try:
data = data.encode('utf-8')
except AttributeError:
pass
return private_key.sign(hashlib.sha512(data).digest())
def generate_ecdsa_keys():
context = create_context('secp256k1')
sk = CryptoFactory(context).new_signer(context.new_random_private_key())
return sk, sk.get_public_key().as_bytes()
def generate_ecdsa_signature(data, private_key):
try:
data = data.encode('utf-8')
except AttributeError:
pass
data = binascii.unhexlify(private_key.sign(data).encode('utf-8'))
return data
def generate_message(data):
return sha512_hexdigest(data)
def generate_entity_hash(message):
return message.encode('utf-8')
def generate_settings_address(key):
key_parts = key.split(".")[:4]
address_parts = [sha256_hexdigest(x)[0:16] for x in key_parts]
while (4 - len(address_parts)) != 0:
address_parts.append(sha256_hexdigest("")[0:16])
return "000000" + "".join(address_parts)
|
auctioning_platform/auctions_infrastructure/auctions_infrastructure/queries/__init__.py | nhdinh/smp-modulith | 299 | 11071688 | <reponame>nhdinh/smp-modulith<filename>auctioning_platform/auctions_infrastructure/auctions_infrastructure/queries/__init__.py
__all__ = ["SqlGetActiveAuctions", "SqlGetSingleAuction"]
from auctions_infrastructure.queries.auctions import SqlGetActiveAuctions, SqlGetSingleAuction
|
skompiler/toskast/sklearn/ensemble/forest.py | odinsemvosem/SKompiler | 112 | 11071689 | <filename>skompiler/toskast/sklearn/ensemble/forest.py
"""
Decision trees to SKAST
"""
from skompiler.dsl import sum_
from ..common import classifier
from ..tree.base import decision_tree
def random_forest_classifier(model, inputs, method="predict_proba"):
"""
Creates a SKAST expression corresponding to a given random forest classifier
"""
trees = [decision_tree(estimator.tree_, inputs, method="predict_proba", value_transform=lambda v: v/len(model.estimators_))
for estimator in model.estimators_]
return classifier(sum_(trees), method)
def random_forest_regressor(model, inputs):
"""
Creates a SKAST expression corresponding to a given random forest regressor
"""
return sum_([decision_tree(estimator.tree_, inputs=inputs, method="predict", value_transform=lambda v: v/len(model.estimators_))
for estimator in model.estimators_])
|
tests/artificial/gen_makefile.py | jmabry/pyaf | 377 | 11071699 | import os
import glob
def mkdir_p(path):
try:
os.makedirs(path)
except:
pass
subdirs = glob.glob("tests/artificial/transf_*");
print("PYTHON=python3\n\n");
lAllTarget = "";
for subdir1 in sorted(subdirs):
lBase = os.path.basename(subdir1);
test_target = "";
for filename in sorted(glob.glob(subdir1 + "/*/*/*/*.py")):
bn = os.path.basename(filename);
dirnames = os.path.dirname(filename).split("/");
logdir = "tests/references/artificial/" + dirnames[2] + "/" + dirnames[3] + "/" + dirnames[4]
mkdir_p(logdir)
logname = bn.replace("/" , "_");
logname = logname.replace(".py" , ".log");
logfile = "logs/" + logname;
reflogfile = logdir + "/" + logname;
difffile = logfile + ".diff"
print("#PROCESSING FILE : " , filename, bn , logfile);
print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1");
print("\t", "$(PYTHON) scripts/num_diff.py " , reflogfile , logfile, " > " , difffile);
print("\t", "tail -10 " , difffile, "\n");
test_target = bn + " " + test_target;
lAllTarget = lAllTarget + " " + lBase;
print("\n\n", lBase , ": ", test_target, "\n" , "\n");
print("\n# ********************************************** \n");
print("all: " , lAllTarget , "\n\t\n");
|
glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py | Steap/glance | 309 | 11071742 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add visibility to images
Revision ID: ocata_expand01
Revises: mitaka02
Create Date: 2017-01-27 12:58:16.647499
"""
from alembic import op
from sqlalchemy import Column, Enum, MetaData
from glance.cmd import manage
from glance.db import migration
from glance.db.sqlalchemy.schema import Boolean
# revision identifiers, used by Alembic.
revision = 'ocata_expand01'
down_revision = 'mitaka02'
branch_labels = migration.EXPAND_BRANCH
depends_on = None
ERROR_MESSAGE = 'Invalid visibility value'
MYSQL_INSERT_TRIGGER = """
CREATE TRIGGER insert_visibility BEFORE INSERT ON images
FOR EACH ROW
BEGIN
-- NOTE(abashmak):
-- The following IF/ELSE block implements a priority decision tree.
-- Strict order MUST be followed to correctly cover all the edge cases.
-- Edge case: neither is_public nor visibility specified
-- (or both specified as NULL):
IF NEW.is_public <=> NULL AND NEW.visibility <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: both is_public and visibility specified:
ELSEIF NOT(NEW.is_public <=> NULL OR NEW.visibility <=> NULL) THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Inserting with is_public, set visibility accordingly:
ELSEIF NOT NEW.is_public <=> NULL THEN
IF NEW.is_public = 1 THEN
SET NEW.visibility = 'public';
ELSE
SET NEW.visibility = 'shared';
END IF;
-- Inserting with visibility, set is_public accordingly:
ELSEIF NOT NEW.visibility <=> NULL THEN
IF NEW.visibility = 'public' THEN
SET NEW.is_public = 1;
ELSE
SET NEW.is_public = 0;
END IF;
-- Edge case: either one of: is_public or visibility,
-- is explicitly set to NULL:
ELSE
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
END IF;
END;
"""
MYSQL_UPDATE_TRIGGER = """
CREATE TRIGGER update_visibility BEFORE UPDATE ON images
FOR EACH ROW
BEGIN
-- Case: new value specified for is_public:
IF NOT NEW.is_public <=> OLD.is_public THEN
-- Edge case: is_public explicitly set to NULL:
IF NEW.is_public <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: new value also specified for visibility
ELSEIF NOT NEW.visibility <=> OLD.visibility THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Case: visibility not specified or specified as OLD value:
-- NOTE(abashmak): There is no way to reliably determine which
-- of the above two cases occurred, but allowing to proceed with
-- the update in either case does not break the model for both
-- N and N-1 services.
ELSE
-- Set visibility according to the value of is_public:
IF NEW.is_public <=> 1 THEN
SET NEW.visibility = 'public';
ELSE
SET NEW.visibility = 'shared';
END IF;
END IF;
-- Case: new value specified for visibility:
ELSEIF NOT NEW.visibility <=> OLD.visibility THEN
-- Edge case: visibility explicitly set to NULL:
IF NEW.visibility <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: new value also specified for is_public
ELSEIF NOT NEW.is_public <=> OLD.is_public THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Case: is_public not specified or specified as OLD value:
-- NOTE(abashmak): There is no way to reliably determine which
-- of the above two cases occurred, but allowing to proceed with
-- the update in either case does not break the model for both
-- N and N-1 services.
ELSE
-- Set is_public according to the value of visibility:
IF NEW.visibility <=> 'public' THEN
SET NEW.is_public = 1;
ELSE
SET NEW.is_public = 0;
END IF;
END IF;
END IF;
END;
"""
def _add_visibility_column(meta):
enum = Enum('private', 'public', 'shared', 'community', metadata=meta,
name='image_visibility')
enum.create()
v_col = Column('visibility', enum, nullable=True, server_default=None)
op.add_column('images', v_col)
op.create_index('visibility_image_idx', 'images', ['visibility'])
def _add_triggers(engine):
if engine.engine.name == 'mysql':
op.execute(MYSQL_INSERT_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE))
op.execute(MYSQL_UPDATE_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE, ERROR_MESSAGE))
def _change_nullability_and_default_on_is_public(meta):
# NOTE(hemanthm): we mark is_public as nullable so that when new versions
# add data only to be visibility column, is_public can be null.
with op.batch_alter_table('images') as batch_op:
batch_op.alter_column(
'is_public', nullable=True, server_default=None,
existing_type=Boolean())
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
_add_visibility_column(meta)
_change_nullability_and_default_on_is_public(meta)
if manage.USE_TRIGGERS:
_add_triggers(migrate_engine)
|
odps/udf/tests/test_usercounter.py | Emersonxuelinux/aliyun-odps-python-sdk | 412 | 11071754 | <reponame>Emersonxuelinux/aliyun-odps-python-sdk
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from odps.compat import unittest, six
from odps.counters import *
from odps.tests.core import TestBase
class TestUserCounter(TestBase):
def test_counter(self):
counter = Counter("test", 12)
self.assertEqual("test", counter.get_name())
self.assertEqual(12, counter.get_value())
counter = Counter("test2")
self.assertEqual("test2", counter.get_name())
self.assertEqual(0, counter.get_value())
def test_counter_group(self):
counter_group = CounterGroup("test_group")
self.assertEqual("test_group", counter_group.get_name())
counter_group.get_counter("test")
counter = Counter("test2")
counter_group.add_counter(counter)
self.assertEqual(2, counter_group.size())
def test_counters(self):
def _normalize_counter(json_str):
obj = json.loads(json_str)
for v in six.itervalues(obj):
if 'counters' not in v:
continue
v['counters'] = sorted(v['counters'], key=lambda item: item['name'])
return json.dumps(obj, sort_keys=True)
result_json = '''
{
"group1" : {
"name" : "group1",
"counters" : [
{
"name" : "test1",
"value" : 1
},
{
"name" : "test2",
"value" : 2
}
]},
"group2" : {
"name" : "group2",
"counters" : [
{
"name" : "test3",
"value" : 3
}
]
}
}
'''
counters = Counters()
c1 = counters.get_group("group1").get_counter("test1")
c1.increment(1)
c2 = counters.get_group("group1").get_counter("test2")
c2.increment(2)
c3 = counters.get_group("group2").get_counter("test3")
c3.increment(3)
self.assertEqual(2, counters.size())
self.assertEqual(_normalize_counter(result_json), _normalize_counter(counters.to_json_string()))
if __name__ == '__main__':
unittest.main()
|
experimental/lo/conv2d.py | kshithijiyer/qkeras | 388 | 11071762 | # Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements convolutional (?, h, w, c) facing input layer optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing as mp
import os
import shutil
from .compress import Compressor
import numpy as np
import six
from tensorflow.keras.models import Model
from .utils import get_padding_value
DEBUG = int(os.getenv("DEBUG", 0))
OG_IS_SYMBOLIC = 0
def parallel_index_table(
p, ni, size, idx_height, idx_width, i_dict, o_dict,
kernel, strides, padding, generate_pla):
"""Processes the table in parallel and use espresso to optimize it."""
print("... indexing table from {} to {} ({} => {})".format(
ni, ni+size, p[0].shape, p[1].shape))
table_ins = []
table_ous = []
table_set = Compressor(hash_only_input=True)
if DEBUG:
table_set_line = {}
for n in range(size):
# we need to traverse the outputs to compute the input coordinates
for ho in idx_height:
min_hi = strides[0]*ho - 2*padding[0]
max_hi = strides[0]*ho - 2*padding[0] + kernel[0]
if min_hi < 0 or max_hi > p[0].shape[0]:
continue
for wo in idx_width:
min_wi = strides[1]*wo - 2*padding[1]
max_wi = strides[1]*wo - 2*padding[1] + kernel[1]
if min_wi < 0 or max_wi > p[0].shape[1]:
continue
i_values = p[0][n, min_hi:max_hi, min_wi:max_wi].flatten()
# o_values has dimension (1, 1, C_O)
o_values = p[1][n, ho, wo]
# if we generate a pla entry, we care about a list of
# bits. Otherwise, we care about a list of floating point
# values.
table_i = "".join([i_dict[v] for v in i_values])
table_o = "".join([o_dict[v] for v in o_values])
if generate_pla:
table_s = "".join([str(v) for v in table_i])
bit_str = table_s
else:
table_s = ",".join([str(v) for v in table_i])
table_i = table_s
bit_str = "".join(i_dict[v] for v in i_values)
is_table_zero = bit_str != "0"*len(bit_str)
if table_set.has_entry(table_s) and not is_table_zero:
# if table is already stored, we do not store it again.
# from time to time, we may want to check if we have found
# diverging output values.
if DEBUG:
(table_o_old, (old_n, old_ho, old_wo)) = table_set_line[table_s]
if table_o != table_o_old:
print(
"contradicting outputs n={} old_n={} out_p={} out={}".format(
(n, ho, wo), (old_n, old_ho, old_wo), table_o_old,
table_o))
print(" I:", table_s)
print(" I:", i_values)
print("<<<", table_o_old)
print(">>>", table_o)
return (None, None)
continue
# these are unique table entries
table_ins.append(table_i)
table_ous.append(table_o)
# we store this information in order to be able to debug
# and discard information.
table_set.add_entry(table_s)
if DEBUG:
table_set_line[table_s] = (table_o, (n, ho, wo))
print("... indexing table from {} to {} completed".format(ni, ni+size))
return (table_ins, table_ous)
def parallel_compress_output_table(
filename, header, table_ins, table_ous, output_group, generate_pla,
n_bits_og, o, o_bits):
"""Processes in parallel compression of table and writes it to a disk."""
f = open(filename, "w")
f.write("".join(header))
c = Compressor()
for n in range(len(table_ins)):
for og in range(output_group):
if output_group > 1:
if generate_pla:
if OG_IS_SYMBOLIC:
og_l = ["0"] * n_bits_og
og_l[n_bits_og - 1 - og] = "1"
og_b = "".join(og_l)
table_i_suffix = " " + og_b
else:
og_b = bin(og)[2:]
table_i_suffix = " " + "0" * (n_bits_og - len(og_b)) + og_b
else:
table_i_suffix = "," + str(og)
else:
table_i_suffix = ""
table_i = table_ins[n] + table_i_suffix
table_o = table_ous[n][(o+og)*o_bits:(o+og+1)*o_bits]
if generate_pla:
c.add_entry(table_i + " " + table_o)
else:
c.add_entry(table_i + "," + str(table_o[0]))
for line in c():
f.write("{}\n".format(line[0]))
if generate_pla:
f.write(".e\n")
f.close()
print("... file {} generated".format(filename))
def optimize_conv2d_logic(
model, i_name, o_name, x_train,
i_dict=None, o_dict=None,
kernel=None, strides=None, padding=None,
output_group=1, samples=2000,
randomize=None, generate_pla=True, prefix=""):
"""Generates table for logic synthesis for conv2d or conv2d-like shape.
Generates table in either espresso format or csv format to be optimized
for logic synthesis. The parameters kernel, strides and padding usually
do not require any values, unless we want to embed maxpooling layer or
multiple convolutional layers between i_name and o_name. In that case,
we require the user to compute the proper kernel, strides, and padding
that will correspond to the combined layer, as Keras and tensorflow do not
provide a way to compute the receptive field between two layers.
Arguments:
model: Keras model
i_name: name of convolutional layer (input to this layer must be
quantized).
o_name: name of quantized output layer.
x_train: training set to be used to dump table.
i_dict: dictionary of floating point values to encoding for inputs.
o_dict: dictionary of floating point values to encoding for outputs.
kernel: kernel size, to be specified if we want to override convolution
kernel.
strides: strides, to be specified if we want to override first convolution
strides.
padding: padding, to be specified if we want to override first convolution
padding.
output_group: by default, we compute one PE per channel output. The user
can override that by specifying how many output channels should be
bundled into the same PE.
samples: how many images from x_train should be sampled when generating the
tables.
randomize: if specified, it should be the number of coordinates within the
same image we will use to derive the convolution table.
generate_pla: if true, we generate table in pla format. Otherwise, we
generate a csv file.
prefix: prefix name to create directory.
Returns:
list of files generated.
"""
# if no i_dict or no o_dict, we do not know how to encode, so we generate
# csv file.
if not i_dict or not o_dict:
generate_pla = False
# extract layer from i_name and o_name
i_layer = model.get_layer(i_name)
o_layer = model.get_layer(o_name)
# if kernel is not specified, use the kernel size from i_layer
if not kernel:
kernel = i_layer.kernel_size
# if strides is not specified, use the strides from i_layer
if not strides:
strides = i_layer.strides
# if padding is not specified, use the padding from i_layer
if not padding:
padding = i_layer.padding
# for conv2d, we want a list for kernel, strides and padding
if not isinstance(kernel, list) and not isinstance(kernel, tuple):
kernel = [kernel, kernel]
if not isinstance(strides, list) and not isinstance(strides, tuple):
strides = [strides, strides]
if not isinstance(padding, list) and not isinstance(padding, tuple):
padding = [padding, padding]
# compute the padding value
padding[0] = get_padding_value(padding[0], kernel[0])
padding[1] = get_padding_value(padding[1], kernel[1])
# resample inputs
skip = min(2000, samples)
indexes = np.array(range(x_train.shape[0]))
np.random.shuffle(indexes)
x_train = x_train[indexes[:samples]]
# we want to create a smaller model that from inputs generate
# i_layer.output + o_layer.output tensors, so that we can predict
# its values.
outputs = []
x = i_layer.input
y = o_layer.output
if not isinstance(x, list):
x = [x]
outputs = x + [y]
mo = Model(inputs=model.inputs, outputs=outputs)
p = mo.predict(x_train)
# in csv mode, each entry has "1" value, for PLA,
# we encode the floating point into multiple bits.
if not generate_pla:
i_bits = 1
# i_dict = {v:v for v in i_dict.keys()}
else:
i_bits = len(six.next(six.itervalues(i_dict)))
if not generate_pla:
o_bits = 1
# o_dict = {v:v for v in o_dict.keys()}
else:
o_bits = len(six.next(six.itervalues(o_dict)))
# if randomize is specified, we will sample sqrt(randomize)
# from each image, as the conv2d performs the filter everywhere
# in the image. Because the same image may contain a lot of
# reduntant information, we may want to restrict the number of
# samples.
if randomize:
idx_height = np.random.choice(
p[-1].shape[1],
int(np.round(np.sqrt(randomize))))
idx_width = np.random.choice(
p[-1].shape[2],
int(np.round(np.sqrt(randomize))))
else:
idx_height = range(p[-1].shape[1])
idx_width = range(p[-1].shape[2])
# this is just to inspect that the inputs and outputs are really quantized.
print("inputs:")
for i in range(len(x)):
print(i, np.min(p[i]), np.max(p[i]))
print("outputs:")
print(np.min(p[-1]), np.max(p[-1]))
# i_size and o_size are the channel sizes of the inputs and outputs
o_size = y.shape[-1]
i_size = p[0].shape[-1]
if generate_pla:
suffix = "pla"
else:
suffix = "csv"
prefix = prefix + "/" if prefix else ""
# lets try to remove the directory and create a new one
try:
shutil.rmtree(prefix + i_layer.name + "." + suffix)
except OSError:
pass
try:
os.makedirs(prefix + i_layer.name + "." + suffix)
except OSError:
pass
table_ins = list()
table_ous = list()
print("...indexing inputs")
# for each image in sampled x_train
# on Intel processors, mp.cpu_count() returns number of threads
number_of_processes = mp.cpu_count() // 2
pool = mp.Pool(number_of_processes)
results = []
for n in range(0, x_train.shape[0], skip):
res = pool.apply_async(
parallel_index_table,
args=((p[0][n:n+skip], p[1][n:n+skip]), n, skip, idx_height,
idx_width, i_dict, o_dict, kernel, strides, padding,
generate_pla))
results.append(res)
pool.close()
pool.join()
all_pools = [res.get(timeout=1) for res in results]
table_ins = sum([ap[0] for ap in all_pools], [])
table_ous = sum([ap[1] for ap in all_pools], [])
# input and output size
ni = len(table_ins[0])
no = len(table_ous[0])
print("... generating tables {} outputs, {} entries".format(
o_size, len(table_ins)))
# this step should be very fast
files = []
if OG_IS_SYMBOLIC:
if output_group > 1:
n_bits_og = output_group
else:
n_bits_og = 1
else:
if output_group == 2:
n_bits_og = 1
else:
n_bits_og = int(np.ceil(np.log2(output_group)))
# sometimes linux get very grumpy with too many files opened.
# let's limit to 20.
number_of_processes = min(20, mp.cpu_count() // 2)
pool = mp.Pool(number_of_processes)
for o in range(0, o_size, output_group):
filename = "{}{}.{}/{}_{}.raw.{}".format(
prefix, i_name, suffix, i_name, o, suffix)
files.append(filename)
header = []
if generate_pla:
header.append(".i {}\n".format(ni + n_bits_og))
header.append(".o {}\n".format(no // o_size))
header.append(".type fr\n")
if OG_IS_SYMBOLIC and output_group > 1:
header.append(".mv {} {} {} {}\n".format(
3, ni, n_bits_og, no // o_size))
# let's generate some labels
header.append(".ob " + " ".join([
"o_" + str(o) + "_" + str(o_bits - 1 - v)
for v in range(o_bits)]) + "\n")
i_names = []
# name is i_<channel>_<kernel_row>_<kernel_col>_bit
assert ni == (i_size * kernel[0] * kernel[1] * i_bits)
for channel in range(i_size):
for row in range(kernel[0]):
for col in range(kernel[1]):
for bit in range(i_bits):
i_names.append("i_{}_{}_{}_{}".format(
channel, row, col, (i_bits - 1 - bit)))
# if we are grouping multiple channels, these will be the inputs
for c in range(n_bits_og):
i_names.append("og_{}".format(n_bits_og - 1 - c))
header.append(".ilb " + " ".join(i_names) + "\n")
pool.apply_async(
parallel_compress_output_table,
args=((filename, header, table_ins, table_ous, output_group,
generate_pla, n_bits_og, o, o_bits)))
pool.close()
pool.join()
return files
|
basex-api/src/main/python/QueryBindExample.py | mrdziuban/basex | 441 | 11071777 | <reponame>mrdziuban/basex<gh_stars>100-1000
# -*- coding: utf-8 -*-
# This example shows how external variables can be bound to XQuery expressions.
#
# Documentation: https://docs.basex.org/wiki/Clients
#
# (C) BaseX Team 2005-21, BSD License
from BaseXClient import BaseXClient
# create session
session = BaseXClient.Session('localhost', 1984, 'admin', 'admin')
try:
# create query instance
input = "declare variable $name external; for $i in 1 to 10 return element { $name } { $i }"
query = session.query(input)
# bind variable
query.bind("$name", "number")
# print result
print(query.execute())
# close query object
query.close()
finally:
# close session
if session:
session.close()
|
pgoapi/auth_ptc.py | xDyN/api1311 | 2,557 | 11071791 | <reponame>xDyN/api1311<filename>pgoapi/auth_ptc.py<gh_stars>1000+
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
from future.standard_library import install_aliases
install_aliases()
import re
import six
import json
import logging
import requests
from urllib.parse import parse_qs
from pgoapi.auth import Auth
from pgoapi.utilities import get_time
from pgoapi.exceptions import AuthException
from requests.exceptions import ConnectionError
class AuthPtc(Auth):
PTC_LOGIN_URL = 'https://sso.pokemon.com/sso/login?service=https%3A%2F%2Fsso.pokemon.com%2Fsso%2Foauth2.0%2FcallbackAuthorize'
PTC_LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
PTC_LOGIN_CLIENT_SECRET = '<KEY>'
def __init__(self):
Auth.__init__(self)
self._auth_provider = 'ptc'
self._session = requests.session()
self._session.verify = True
def set_proxy(self, proxy_config):
self._session.proxies = proxy_config
def user_login(self, username, password):
self.log.info('PTC User Login for: {}'.format(username))
if not isinstance(username, six.string_types) or not isinstance(password, six.string_types):
raise AuthException("Username/password not correctly specified")
head = {'User-Agent': 'niantic'}
try:
r = self._session.get(self.PTC_LOGIN_URL, headers=head)
except ConnectionError as e:
raise AuthException("Caught ConnectionError: %s", e)
try:
jdata = json.loads(r.content.decode('utf-8'))
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
except ValueError as e:
self.log.error('PTC User Login Error - Field missing in response: %s', e)
return False
except KeyError as e:
self.log.error('PTC User Login Error - Field missing in response.content: %s', e)
return False
r1 = self._session.post(self.PTC_LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception as e:
try:
self.log.error('Could not retrieve token: %s', r1.json()['errors'][0])
except Exception as e:
self.log.error('Could not retrieve token! (%s)', e)
return False
self._refresh_token = ticket
self.log.info('PTC User Login successful.')
self.get_access_token()
return self._login
def set_refresh_token(self, refresh_token):
self.log.info('PTC Refresh Token provided by user')
self._refresh_token = refresh_token
def get_access_token(self, force_refresh = False):
token_validity = self.check_access_token()
if token_validity is True and force_refresh is False:
self.log.debug('Using cached PTC Access Token')
return self._access_token
else:
if force_refresh:
self.log.info('Forced request of PTC Access Token!')
else:
self.log.info('Request PTC Access Token...')
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': self.PTC_LOGIN_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': self._refresh_token,
}
r2 = self._session.post(self.PTC_LOGIN_OAUTH, data=data1)
qs = r2.content.decode('utf-8')
token_data = parse_qs(qs)
access_token = token_data.get('access_token', None)
if access_token is not None:
self._access_token = access_token[0]
now_s = get_time()
# set expiration to an hour less than value received because Pokemon OAuth
# login servers return an access token with an explicit expiry time of
# three hours, however, the token stops being valid after two hours.
# See issue #86
expires = int(token_data.get('expires', [0])[0]) - 3600
if expires > 0:
self._access_token_expiry = expires + now_s
else:
self._access_token_expiry = 0
self._login = True
self.log.info('PTC Access Token successfully retrieved.')
self.log.debug('PTC Access Token: %s...', self._access_token[:25])
else:
self._access_token = None
self._login = False
raise AuthException("Could not retrieve a PTC Access Token")
|
torch/onnx/symbolic_opset12.py | Hacky-DH/pytorch | 60,067 | 11071794 | import torch
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_helper import parse_args, _parse_arg, _unimplemented
from torch.onnx.utils import _add_block, _add_input_to_block, _add_output_to_block
from sys import maxsize
from torch.onnx.symbolic_opset9 import permute, _reshape_from_tensor
import warnings
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 12
@parse_args("s", "v")
def einsum(g, equation, tensor_list):
tensors = sym_help._unpack_list(tensor_list)
return g.op("Einsum", *tensors, equation_s=equation)
@parse_args("v", "v")
def outer(g, input, other):
# make sure to cast other to self's type
if other.type().scalarType() != input.type().scalarType():
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[input.type().scalarType()])
return g.op("Einsum", input, other, equation_s="i,j->ij")
@parse_args("v", "f", "i")
def dropout(g, input, p, train):
sym_help.check_training_mode(train, "dropout")
# in eval mode, dropout is non-op - if the node's train param is set to False, dropout is non-op
if not train:
return input
warnings.warn("Dropout is a training op and should not be exported in inference mode. "
"For inference, make sure to call eval() on the model and to export it with param training=False.")
p = g.op("Constant", value_t=torch.tensor(p))
t = g.op("Constant", value_t=torch.tensor(True))
r, _ = g.op("Dropout", input, p, t, outputs=2)
return r
def nll_loss(g, self, target, weight, reduction, ignore_index):
# none reduction : onnx::Constant[value={0}]
# mean reduction : onnx::Constant[value={1}]
# sum reduction : onnx::Constant[value={2}]
reduction = sym_help._maybe_get_const(reduction, "i")
reduction_vals = ["none", "mean", "sum"]
reduction = reduction_vals[reduction]
# in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value.
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
ignore_index = sym_help._maybe_get_const(ignore_index, "i")
if weight.node().mustBeNone():
nllloss = g.op("NegativeLogLikelihoodLoss", self, target, reduction_s=reduction, ignore_index_i=ignore_index)
else:
nllloss = g.op("NegativeLogLikelihoodLoss", self, target, weight, reduction_s=reduction, ignore_index_i=ignore_index)
return nllloss
def nll_loss2d(g, self, target, weight, reduction, ignore_index):
return nll_loss(g, self, target, weight, reduction, ignore_index)
def nll_loss_nd(g, self, target, weight, reduction, ignore_index):
return nll_loss(g, self, target, weight, reduction, ignore_index)
def cross_entropy_loss(g, self, target, weight, reduction, ignore_index, label_smoothing):
# none reduction : onnx::Constant[value={0}]
# mean reduction : onnx::Constant[value={1}]
# sum reduction : onnx::Constant[value={2}]
reduction = sym_help._maybe_get_const(reduction, "i")
reduction_vals = ["none", "mean", "sum"]
reduction = reduction_vals[reduction]
label_smoothing = sym_help._maybe_get_const(label_smoothing, "f")
if label_smoothing > 0.0:
raise RuntimeError("Unsupported: ONNX does not support label_smoothing")
# in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value.
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
ignore_index = sym_help._maybe_get_const(ignore_index, "i")
if weight.node().mustBeNone():
celoss = g.op("SoftmaxCrossEntropyLoss", self, target, reduction_s=reduction, ignore_index_i=ignore_index)
else:
celoss = g.op("SoftmaxCrossEntropyLoss", self, target, weight, reduction_s=reduction, ignore_index_i=ignore_index)
return celoss
@parse_args("v", "v", "v", "v", "i")
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight, reduction):
from torch.onnx.symbolic_opset9 import sigmoid, log, sub, neg, mul, add
p = g.op("Constant", value_t=torch.tensor([1]))
sig_x = sigmoid(g, input)
log_sig_x = log(g, sig_x)
sub_1_x = sub(g, p, sig_x)
sub_1_y = sub(g, p, target)
log_1_x = log(g, sub_1_x)
if pos_weight is None or sym_help._is_none(pos_weight):
output = neg(g, add(g, mul(g, target, log_sig_x), mul(g, sub_1_y, log_1_x)))
else:
output = neg(g, add(g, mul(g, mul(g, target, log_sig_x), pos_weight), mul(g, sub_1_y, log_1_x)))
if weight is not None and not sym_help._is_none(weight):
output = mul(g, weight, output)
reduction = sym_help._maybe_get_const(reduction, "i")
if reduction == 0:
return output
elif reduction == 1:
return g.op("ReduceMean", output)
elif reduction == 2:
return g.op("ReduceSum", output)
else:
return sym_help._onnx_unsupported("binary_cross_entropy_with_logits with reduction other than none, mean, or sum")
def celu(g, self, alpha):
alpha = sym_help._maybe_get_const(alpha, "f")
# if the input is of type double cast it to float
if self.type().scalarType() == "Double":
self = g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx["Float"])
out = g.op("Celu", self, alpha_f=alpha)
return g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx["Double"])
return g.op("Celu", self, alpha_f=alpha)
def argmax(g, input, dim, keepdim):
if sym_help._is_none(dim):
flattened = sym_help._reshape_helper(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op("ArgMax", flattened, axis_i=0, keepdims_i=False, select_last_index_i=False)
else:
dim = _parse_arg(dim, "i")
keepdim = _parse_arg(keepdim, "i")
return g.op("ArgMax", input, axis_i=dim, keepdims_i=keepdim, select_last_index_i=False)
def argmin(g, input, dim, keepdim):
if sym_help._is_none(dim):
flattened = sym_help._reshape_helper(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op("ArgMin", flattened, axis_i=0, keepdims_i=False, select_last_index_i=False)
else:
dim = _parse_arg(dim, "i")
keepdim = _parse_arg(keepdim, "i")
return g.op("ArgMin", input, axis_i=dim, keepdims_i=keepdim, select_last_index_i=False)
def pow(g, self, exponent):
return g.op("Pow", self, exponent)
def ge(g, input, other):
return g.op("GreaterOrEqual", input, other)
def le(g, input, other):
return g.op("LessOrEqual", input, other)
@parse_args("v", "i", "v", "v")
def unfold(g, input, dimension, size, step):
const_size = sym_help._maybe_get_const(size, "i")
const_step = sym_help._maybe_get_const(step, "i")
if not sym_help._is_value(const_size) and not sym_help._is_value(const_step):
from torch.onnx.symbolic_opset9 import unfold as _unfold
return _unfold(g, input, dimension, const_size, const_step)
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, operator_s="unfold", dimension_i=dimension, size_i=size, step_i=step)
sizedim = sym_help._get_tensor_dim_size(input, dimension)
if sizedim is not None:
low_start = g.op("Constant", value_t=torch.tensor(0))
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
low_size = sym_help._size_helper(g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
hi_size = sym_help._size_helper(g, hi_indices, g.op("Constant", value_t=torch.tensor(0)))
ndim = sym_help._get_tensor_rank(input)
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
unsqueeze_list = []
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
loop_len = g.op("Min", low_size, hi_size)
loop = g.op("Loop", loop_len, loop_condition)
loop_block = _add_block(loop.node())
block_input_iter = _add_input_to_block(loop_block)
cond = _add_input_to_block(loop_block)
starts = loop_block.op("Gather", low_indices, block_input_iter)
ends = loop_block.op("Gather", hi_indices, block_input_iter)
axes = loop_block.op("Constant", value_t=torch.tensor([2]))
starts = sym_help._unsqueeze_helper(loop_block, starts, [0])
ends = sym_help._unsqueeze_helper(loop_block, ends, [0])
stack = loop_block.op("Slice", input, starts, ends, axes)
unsqueeze = sym_help._unsqueeze_helper(loop_block, loop_block.op("Transpose", stack, perm_i=perm), [dimension])
unsqueeze_list.append(unsqueeze)
concat = loop_block.op("Concat", *unsqueeze_list, axis_i=0)
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
_add_output_to_block(loop_block, cond_out)
_add_output_to_block(loop_block, concat)
loop_output = loop.node().output()
perm = [0, 1, 2, 3, 4]
perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0]
transpose = g.op("Transpose", loop_output, perm_i=perm)
squeeze = sym_help._squeeze_helper(g, transpose, [0])
return squeeze
else:
return _unimplemented("Unfold", "input size not accessible")
@parse_args("v", "v", "is", "is", "v")
def tensordot(g, input_a, input_b, dims_a, dims_b, out=None):
if out is not None:
_unimplemented("Tensordot", "Out parameter is not supported for tensordot.")
dim_count_a = sym_help._get_tensor_rank(input_a)
if dim_count_a is None:
raise RuntimeError("Unsupported: ONNX export of tensordot for tensor(input_a) of unknown rank.")
dim_count_b = sym_help._get_tensor_rank(input_b)
if dim_count_b is None:
raise RuntimeError("Unsupported: ONNX export of tensordot for tensor(input_b) of unknown rank.")
dims_a = [(dims_a[i] + dim_count_a) if (dims_a[i] < 0) else dims_a[i] for i in range(len(dims_a))]
dims_b = [(dims_b[i] + dim_count_b) if (dims_b[i] < 0) else dims_b[i] for i in range(len(dims_b))]
left_dims_a = [i for i in range(dim_count_a) if (i not in dims_a)]
left_dims_b = [i for i in range(dim_count_b) if (i not in dims_b)]
new_input_a = permute(g, input_a, left_dims_a + dims_a)
new_input_b = permute(g, input_b, dims_b + left_dims_b)
input_shape = g.op("Shape", new_input_a)
left_sizes_a = sym_help._slice_helper(g, input_shape, axes=[0], starts=[0], ends=[len(left_dims_a)])
shape_sizes = [left_sizes_a, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long))]
output_a = _reshape_from_tensor(g, new_input_a, shape_sizes)
input_shape = g.op("Shape", output_a)
slices = sym_help._slice_helper(g, input_shape, axes=[0], starts=[-1], ends=[maxsize])
shape_sizes = [g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), slices]
output_a = _reshape_from_tensor(g, new_input_a, shape_sizes)
input_shape = g.op("Shape", new_input_b)
left_sizes_b = sym_help._slice_helper(g, input_shape, axes=[0], starts=[len(dims_b)], ends=[maxsize])
slices = sym_help._slice_helper(g, input_shape, axes=[0], starts=[0], ends=[len(dims_b)])
shape_sizes = [slices, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long))]
output_b = _reshape_from_tensor(g, new_input_b, shape_sizes)
input_shape = g.op("Shape", output_b)
slices = sym_help._slice_helper(g, input_shape, axes=[0], starts=[-1], ends=[maxsize])
shape_sizes = [g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), slices]
output_b = _reshape_from_tensor(g, new_input_b, shape_sizes)
output = einsum(g, "ij,jk->ik", g.op("prim::ListConstruct", *[output_a, output_b]))
shape_sizes = [left_sizes_a, left_sizes_b]
return _reshape_from_tensor(g, output, shape_sizes)
|
amazon_paapi/sdk/models/item.py | frenners/python-amazon-paapi | 121 | 11071809 | # coding: utf-8
"""
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .browse_node_info import BrowseNodeInfo # noqa: F401,E501
from .customer_reviews import CustomerReviews # noqa: F401,E501
from .images import Images # noqa: F401,E501
from .item_info import ItemInfo # noqa: F401,E501
from .offers import Offers # noqa: F401,E501
from .rental_offers import RentalOffers # noqa: F401,E501
from .variation_attribute import VariationAttribute # noqa: F401,E501
class Item(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'asin': 'str',
'browse_node_info': 'BrowseNodeInfo',
'customer_reviews': 'CustomerReviews',
'detail_page_url': 'str',
'images': 'Images',
'item_info': 'ItemInfo',
'offers': 'Offers',
'parent_asin': 'str',
'rental_offers': 'RentalOffers',
'score': 'float',
'variation_attributes': 'list[VariationAttribute]'
}
attribute_map = {
'asin': 'ASIN',
'browse_node_info': 'BrowseNodeInfo',
'customer_reviews': 'CustomerReviews',
'detail_page_url': 'DetailPageURL',
'images': 'Images',
'item_info': 'ItemInfo',
'offers': 'Offers',
'parent_asin': 'ParentASIN',
'rental_offers': 'RentalOffers',
'score': 'Score',
'variation_attributes': 'VariationAttributes'
}
def __init__(self, asin=None, browse_node_info=None, customer_reviews=None, detail_page_url=None, images=None, item_info=None, offers=None, parent_asin=None, rental_offers=None, score=None, variation_attributes=None): # noqa: E501
"""Item - a model defined in Swagger""" # noqa: E501
self._asin = None
self._browse_node_info = None
self._customer_reviews = None
self._detail_page_url = None
self._images = None
self._item_info = None
self._offers = None
self._parent_asin = None
self._rental_offers = None
self._score = None
self._variation_attributes = None
self.discriminator = None
if asin is not None:
self.asin = asin
if browse_node_info is not None:
self.browse_node_info = browse_node_info
if customer_reviews is not None:
self.customer_reviews = customer_reviews
if detail_page_url is not None:
self.detail_page_url = detail_page_url
if images is not None:
self.images = images
if item_info is not None:
self.item_info = item_info
if offers is not None:
self.offers = offers
if parent_asin is not None:
self.parent_asin = parent_asin
if rental_offers is not None:
self.rental_offers = rental_offers
if score is not None:
self.score = score
if variation_attributes is not None:
self.variation_attributes = variation_attributes
@property
def asin(self):
"""Gets the asin of this Item. # noqa: E501
:return: The asin of this Item. # noqa: E501
:rtype: str
"""
return self._asin
@asin.setter
def asin(self, asin):
"""Sets the asin of this Item.
:param asin: The asin of this Item. # noqa: E501
:type: str
"""
self._asin = asin
@property
def browse_node_info(self):
"""Gets the browse_node_info of this Item. # noqa: E501
:return: The browse_node_info of this Item. # noqa: E501
:rtype: BrowseNodeInfo
"""
return self._browse_node_info
@browse_node_info.setter
def browse_node_info(self, browse_node_info):
"""Sets the browse_node_info of this Item.
:param browse_node_info: The browse_node_info of this Item. # noqa: E501
:type: BrowseNodeInfo
"""
self._browse_node_info = browse_node_info
@property
def customer_reviews(self):
"""Gets the customer_reviews of this Item. # noqa: E501
:return: The customer_reviews of this Item. # noqa: E501
:rtype: CustomerReviews
"""
return self._customer_reviews
@customer_reviews.setter
def customer_reviews(self, customer_reviews):
"""Sets the customer_reviews of this Item.
:param customer_reviews: The customer_reviews of this Item. # noqa: E501
:type: CustomerReviews
"""
self._customer_reviews = customer_reviews
@property
def detail_page_url(self):
"""Gets the detail_page_url of this Item. # noqa: E501
:return: The detail_page_url of this Item. # noqa: E501
:rtype: str
"""
return self._detail_page_url
@detail_page_url.setter
def detail_page_url(self, detail_page_url):
"""Sets the detail_page_url of this Item.
:param detail_page_url: The detail_page_url of this Item. # noqa: E501
:type: str
"""
self._detail_page_url = detail_page_url
@property
def images(self):
"""Gets the images of this Item. # noqa: E501
:return: The images of this Item. # noqa: E501
:rtype: Images
"""
return self._images
@images.setter
def images(self, images):
"""Sets the images of this Item.
:param images: The images of this Item. # noqa: E501
:type: Images
"""
self._images = images
@property
def item_info(self):
"""Gets the item_info of this Item. # noqa: E501
:return: The item_info of this Item. # noqa: E501
:rtype: ItemInfo
"""
return self._item_info
@item_info.setter
def item_info(self, item_info):
"""Sets the item_info of this Item.
:param item_info: The item_info of this Item. # noqa: E501
:type: ItemInfo
"""
self._item_info = item_info
@property
def offers(self):
"""Gets the offers of this Item. # noqa: E501
:return: The offers of this Item. # noqa: E501
:rtype: Offers
"""
return self._offers
@offers.setter
def offers(self, offers):
"""Sets the offers of this Item.
:param offers: The offers of this Item. # noqa: E501
:type: Offers
"""
self._offers = offers
@property
def parent_asin(self):
"""Gets the parent_asin of this Item. # noqa: E501
:return: The parent_asin of this Item. # noqa: E501
:rtype: str
"""
return self._parent_asin
@parent_asin.setter
def parent_asin(self, parent_asin):
"""Sets the parent_asin of this Item.
:param parent_asin: The parent_asin of this Item. # noqa: E501
:type: str
"""
self._parent_asin = parent_asin
@property
def rental_offers(self):
"""Gets the rental_offers of this Item. # noqa: E501
:return: The rental_offers of this Item. # noqa: E501
:rtype: RentalOffers
"""
return self._rental_offers
@rental_offers.setter
def rental_offers(self, rental_offers):
"""Sets the rental_offers of this Item.
:param rental_offers: The rental_offers of this Item. # noqa: E501
:type: RentalOffers
"""
self._rental_offers = rental_offers
@property
def score(self):
"""Gets the score of this Item. # noqa: E501
:return: The score of this Item. # noqa: E501
:rtype: float
"""
return self._score
@score.setter
def score(self, score):
"""Sets the score of this Item.
:param score: The score of this Item. # noqa: E501
:type: float
"""
self._score = score
@property
def variation_attributes(self):
"""Gets the variation_attributes of this Item. # noqa: E501
:return: The variation_attributes of this Item. # noqa: E501
:rtype: list[VariationAttribute]
"""
return self._variation_attributes
@variation_attributes.setter
def variation_attributes(self, variation_attributes):
"""Sets the variation_attributes of this Item.
:param variation_attributes: The variation_attributes of this Item. # noqa: E501
:type: list[VariationAttribute]
"""
self._variation_attributes = variation_attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Item, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Item):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
env/Lib/site-packages/OpenGL/GLES2/OES/texture_cube_map_array.py | 5gconnectedbike/Navio2 | 210 | 11071822 | '''OpenGL extension OES.texture_cube_map_array
This module customises the behaviour of the
OpenGL.raw.GLES2.OES.texture_cube_map_array to provide a more
Python-friendly API
Overview (from the spec)
OpenGL ES 3.1 supports two-dimensional array textures. An array texture
is an ordered set of images with the same size and format. Each image in
an array texture has a unique level. This extension expands texture
array support to include cube map textures.
A cube map array texture is a two-dimensional array texture that may
contain many cube map layers. Each cube map layer is a unique cube map
image set. Images in a cube map array have the same size and format
limitations as two-dimensional array textures. A cube map array texture
is specified using TexImage3D or TexStorage3D in a similar manner to
two-dimensional arrays. Cube map array textures can be bound to a render
targets of a frame buffer object just as two-dimensional arrays are,
using FramebufferTextureLayer.
When accessed by a shader, a cube map array texture acts as a single
unit. The "s", "t", "r" texture coordinates are treated as a regular
cube map texture fetch. The "q" texture is treated as an unnormalized
floating-point value identifying the layer of the cube map array
texture. Cube map array texture lookups do not filter between layers.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/texture_cube_map_array.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.texture_cube_map_array import *
from OpenGL.raw.GLES2.OES.texture_cube_map_array import _EXTENSION_NAME
def glInitTextureCubeMapArrayOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
backend/test/integration/test_comment.py | restato/bunnybook | 131 | 11071825 | <filename>backend/test/integration/test_comment.py
import pytest
@pytest.mark.asyncio
async def test_create_comment(ben):
content = "Test"
post_id = (await ben.conn.post("/posts", json={"content": content})) \
.json()["id"]
new_comment_request = await ben.conn.post(f"/posts/{post_id}/comments",
json={"content": "Test"})
assert new_comment_request.status_code == 201
new_comment = new_comment_request.json()
assert new_comment["content"] == content
comments_request = await ben.conn.get(f"/posts/{post_id}/comments")
assert comments_request.status_code == 200
|
mach_nix/generators/__init__.py | fiksn/mach-nix | 490 | 11071826 | <reponame>fiksn/mach-nix
from abc import ABC, abstractmethod
from typing import List
from mach_nix.requirements import Requirement
from mach_nix.resolver import Resolver
class ExpressionGenerator(ABC):
def __init__(self, resolver: Resolver):
self.resolver = resolver
@abstractmethod
def generate(self, reqs: List[Requirement]) -> str:
pass
|
insights/parsers/tests/test_postconf.py | haithcockce/insights-core | 121 | 11071836 | <filename>insights/parsers/tests/test_postconf.py
import pytest
import doctest
from insights.core import ContentException
from insights.parsers import postconf, SkipException
from insights.parsers.postconf import PostconfBuiltin, Postconf, _Postconf
from insights.tests import context_wrap
V_OUT1 = """
""".strip()
V_OUT2 = """
smtpd_tls_loglevel = 0
smtpd_tls_mandatory_ciphers = medium
smtpd_tls_mandatory_exclude_ciphers =
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1
""".strip()
V_OUT3 = """
command not found
""".strip()
def test_PostconfBuiltin():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap(V_OUT1))
with pytest.raises(ContentException):
PostconfBuiltin(context_wrap(V_OUT3))
p = PostconfBuiltin(context_wrap(V_OUT2))
assert p['smtpd_tls_loglevel'] == '0'
assert p['smtpd_tls_mandatory_ciphers'] == 'medium'
assert p['smtpd_tls_mandatory_exclude_ciphers'] == ''
assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1'
def test_Postconf():
with pytest.raises(SkipException):
Postconf(context_wrap(V_OUT1))
with pytest.raises(ContentException):
Postconf(context_wrap(V_OUT3))
p = Postconf(context_wrap(V_OUT2))
assert p['smtpd_tls_loglevel'] == '0'
assert p['smtpd_tls_mandatory_ciphers'] == 'medium'
assert p['smtpd_tls_mandatory_exclude_ciphers'] == ''
assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1'
def test_empty():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap(""))
with pytest.raises(SkipException):
Postconf(context_wrap(""))
def test_invalid():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap("asdf"))
with pytest.raises(SkipException):
Postconf(context_wrap("asdf"))
def test_doc_examples():
env = {
'postconfb': PostconfBuiltin(context_wrap(V_OUT2)),
'postconf': Postconf(context_wrap(V_OUT2)),
'_postconf': _Postconf(context_wrap(V_OUT2)),
}
failed, total = doctest.testmod(postconf, globs=env)
assert failed == 0
# TODO
# env = {
# 'postconf': Postconf(context_wrap(V_OUT2)),
# }
# failed, total = doctest.testmod(postconf, globs=env)
# assert failed == 0
|
homeassistant/components/wiffi/__init__.py | andersop91/core | 22,481 | 11071845 | """Component for wiffi support."""
from datetime import timedelta
import errno
import logging
from wiffi import WiffiTcpServer
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PORT, CONF_TIMEOUT, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.dt import utcnow
from .const import (
CHECK_ENTITIES_SIGNAL,
CREATE_ENTITY_SIGNAL,
DEFAULT_TIMEOUT,
DOMAIN,
UPDATE_ENTITY_SIGNAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up wiffi from a config entry, config_entry contains data from config entry database."""
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
# create api object
api = WiffiIntegrationApi(hass)
api.async_setup(entry)
# store api object
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = api
try:
await api.server.start_server()
except OSError as exc:
if exc.errno != errno.EADDRINUSE:
_LOGGER.error("Start_server failed, errno: %d", exc.errno)
return False
_LOGGER.error("Port %s already in use", entry.data[CONF_PORT])
raise ConfigEntryNotReady from exc
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
api: WiffiIntegrationApi = hass.data[DOMAIN][entry.entry_id]
await api.server.close_server()
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
api = hass.data[DOMAIN].pop(entry.entry_id)
api.shutdown()
return unload_ok
def generate_unique_id(device, metric):
"""Generate a unique string for the entity."""
return f"{device.mac_address.replace(':', '')}-{metric.name}"
class WiffiIntegrationApi:
"""API object for wiffi handling. Stored in hass.data."""
def __init__(self, hass):
"""Initialize the instance."""
self._hass = hass
self._server = None
self._known_devices = {}
self._periodic_callback = None
def async_setup(self, config_entry):
"""Set up api instance."""
self._server = WiffiTcpServer(config_entry.data[CONF_PORT], self)
self._periodic_callback = async_track_time_interval(
self._hass, self._periodic_tick, timedelta(seconds=10)
)
def shutdown(self):
"""Shutdown wiffi api.
Remove listener for periodic callbacks.
"""
if (remove_listener := self._periodic_callback) is not None:
remove_listener()
async def __call__(self, device, metrics):
"""Process callback from TCP server if new data arrives from a device."""
if device.mac_address not in self._known_devices:
# add empty set for new device
self._known_devices[device.mac_address] = set()
for metric in metrics:
if metric.id not in self._known_devices[device.mac_address]:
self._known_devices[device.mac_address].add(metric.id)
async_dispatcher_send(self._hass, CREATE_ENTITY_SIGNAL, device, metric)
else:
async_dispatcher_send(
self._hass,
f"{UPDATE_ENTITY_SIGNAL}-{generate_unique_id(device, metric)}",
device,
metric,
)
@property
def server(self):
"""Return TCP server instance for start + close."""
return self._server
@callback
def _periodic_tick(self, now=None):
"""Check if any entity has timed out because it has not been updated."""
async_dispatcher_send(self._hass, CHECK_ENTITIES_SIGNAL)
class WiffiEntity(Entity):
"""Common functionality for all wiffi entities."""
def __init__(self, device, metric, options):
"""Initialize the base elements of a wiffi entity."""
self._id = generate_unique_id(device, metric)
self._device_info = DeviceInfo(
connections={(device_registry.CONNECTION_NETWORK_MAC, device.mac_address)},
identifiers={(DOMAIN, device.mac_address)},
manufacturer="stall.biz",
model=device.moduletype,
name=f"{device.moduletype} {device.mac_address}",
sw_version=device.sw_version,
configuration_url=device.configuration_url,
)
self._name = metric.description
self._expiration_date = None
self._value = None
self._timeout = options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{UPDATE_ENTITY_SIGNAL}-{self._id}",
self._update_value_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHECK_ENTITIES_SIGNAL, self._check_expiration_date
)
)
@property
def should_poll(self):
"""Disable polling because data driven ."""
return False
@property
def device_info(self):
"""Return wiffi device info which is shared between all entities of a device."""
return self._device_info
@property
def unique_id(self):
"""Return unique id for entity."""
return self._id
@property
def name(self):
"""Return entity name."""
return self._name
@property
def available(self):
"""Return true if value is valid."""
return self._value is not None
def reset_expiration_date(self):
"""Reset value expiration date.
Will be called by derived classes after a value update has been received.
"""
self._expiration_date = utcnow() + timedelta(minutes=self._timeout)
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity."""
@callback
def _check_expiration_date(self):
"""Periodically check if entity value has been updated.
If there are no more updates from the wiffi device, the value will be
set to unavailable.
"""
if (
self._value is not None
and self._expiration_date is not None
and utcnow() > self._expiration_date
):
self._value = None
self.async_write_ha_state()
def _is_measurement_entity(self):
"""Measurement entities have a value in present time."""
return not self._name.endswith("_gestern") and not self._is_metered_entity()
def _is_metered_entity(self):
"""Metered entities have a value that keeps increasing until reset."""
return self._name.endswith("_pro_h") or self._name.endswith("_heute")
|
xsssniper/core/scanner.py | Marzooq13579/Hack-Gadgets | 370 | 11071863 | <gh_stars>100-1000
#!/usr/bin/env python
try:
from mechanize import Request, urlopen, URLError, HTTPError,ProxyHandler, build_opener, install_opener, Browser
except ImportError:
print "\n[X] Please install mechanize module:"
print " http://wwwsearch.sourceforge.net/mechanize/\n"
exit()
import random
import threading
import string
from core.constants import USER_AGENTS
from core.result import Result
from core.target import Target
from core.payload import Payload
class Scanner(threading.Thread):
def __init__(self, engine, queue):
threading.Thread.__init__(self)
self.queue = queue
self.engine = engine
self.results = []
self.errors = {}
def _addError(self, key, value):
if self.errors.has_key(key):
self.errors[key].append(value)
else:
self.errors[key] = [value]
def processResponse(self, response, payload):
"""
Given a response object it search and return XSS injection.
How it works: we parse the response sequentially
looking for the seed while keeping
a state of the current position to determine if we have
a valid injection and where.
This is based on ratproxy XSS scanning technique so
all the props to @lcamtuf for this.
"""
# It only works for payloads of type taint (temporary)
if payload.taint:
htmlstate = 0
htmlurl = 0
index = 0
result = []
# Building the taint and the response
# I want everything lowercase because I don't want to handle
# cases when the payload is upper/lowercased by the webserver
seed_len = payload.seed_len
seed = payload.seed
# htmlstate legend:
# - 1 index is in tag
# - 2 index is inside double quotes
# - 4 index is inside single quotes
# - 8 index is inside html comment
# - 16 index is inside cdata
while index <= len(response)-1:
# Exit cases for a match against the taint
# If conditions are a little messy...
# TODO: utf-7 xss
if response[index:index+seed_len] == seed:
# XSS found in tag
# <tag foo=bar onload=...>
# type 1
if htmlstate == 1 and response[index+seed_len:index+seed_len+seed_len+1] == " " + seed + "=":
index = index + seed_len
result.append([1, "Payload found inside tag"])
continue
# XSS found in url
# <tag src=foo:bar ...>
# type 2
if htmlurl and response[index+seed_len:index+seed_len+seed_len+1] == ":" + seed:
index = index + seed_len
result.append([2, "Payload found inside url tag"])
continue
# XSS found freely in response
# <tag><script>...
# type 3
if htmlstate == 0 and response[index+seed_len:index+seed_len+seed_len+1] == "<" + seed:
index = index + seed_len
result.append([3, "Payload found free in html"])
continue
# XSS found inside double quotes
# <tag foo="bar"onload=...>
# type 4
if (htmlstate == 1 or htmlstate == 2) and response[index+seed_len:index+seed_len+seed_len] == "\"" + seed:
index = index + seed_len
result.append([4, "Payload found inside tag escaped from double quotes"])
continue
# XSS found inside single quotes
# <tag foo='bar'onload=...>
# type 5
if (htmlstate == 1 or htmlstate == 4) and response[index+seed_len:index+seed_len+seed_len] == "'" + seed:
index = index + seed_len
result.append([5, "Payload found inside tag escaped from single quotes"])
continue
else:
# We are in a CDATA block
if htmlstate == 0 and response[index:index+9] == "<![CDATA[":
htmlstate = 16
index = index + 9
continue
if htmlstate == 16 and response[index:index+3] == "]]>":
htmlstate = 0
index = index + 3
continue
# We are in a html comment
if htmlstate == 0 and response[index:index+4] == "<!--":
htmlstate = 8
index = index + 4
continue
if htmlstate == 8 and response[index:index+3] == "-->":
htmlstate = 0
index = index + 3
continue
# We are in a tag
if htmlstate == 0 and response[index] == "<" and (response[index+1] == "!" or response[index+1] == "?" or response[index+1].isalpha()):
htmlstate = 1
index = index + 1
continue
if htmlstate == 1 and response[index] == ">":
htmlstate = 0
htmlurl = 0
index = index + 1
continue
# We are inside a double quote
if htmlstate == 1 and response[index] == '"' and response[index-1] == '=':
htmlstate = 2
index = index + 1
continue
if (htmlstate == 1 or htmlstate == 2) and response[index] == '"':
htmlstate = 1
index = index + 1
continue
# We are inside a single quote
if htmlstate == 1 and response[index] == '\'' and response[index-1] == '=':
htmlstate = 4
index = index + 1
continue
if (htmlstate == 1 or htmlstate == 4) and response[index] == '\'':
htmlstate = 1
index = index + 1
continue
# We are inside an url
if htmlstate == 1 and response[index-1] == " " and response[index:index+5] == "href=":
htmlurl = 1
index = index + 5
continue
if htmlstate == 1 and response[index-1] == " " and response[index:index+5] == "src=":
htmlurl = 1
index = index + 4
continue
# In case the url isn't correctly closed
if htmlurl == 1:
htmlurl = 0
# Move on
index = index +1
# End of response parsing
return result
else:
# No a taint payload
return None
def _performInjections(self, target):
# Check every parameter
for k, v in target.params.iteritems():
pl = Payload(taint=True)
url, data = target.getPayloadedUrl(k, pl.payload)
# In case of proxy
if self.engine.getOption('http-proxy') is not None:
proxy = ProxyHandler({'http': self.engine.getOption('http-proxy')})
opener = build_opener(proxy)
install_opener(opener)
# Some headers
if self.engine.getOption('ua') is not None:
if self.engine.getOption('ua') is "RANDOM":
headers = {'User-Agent': random.choice(USER_AGENTS)}
else:
headers = {'User-Agent': self.engine.getOption('ua')}
else:
headers = {}
if self.engine.getOption("cookie") is not None:
headers["Cookie"] = self.engine.getOption("cookie")
# Build the request
req = Request(url, data, headers)
try:
to = 10 if self.engine.getOption('http-proxy') is None else 20
response = urlopen(req, timeout=to)
except HTTPError, e:
self._addError(e.code, target.getAbsoluteUrl())
return
except URLError, e:
self._addError(e.reason, target.getAbsoluteUrl())
return
except:
self._addError('Unknown', target.getAbsoluteUrl())
return
else:
result = self.processResponse(response.read().lower(), pl)
for r in result:
self.results.append(Result(target, k, pl, r))
def _checkStoredInjections(self):
for r in self.results:
# At this state injections in Result obj are not
# compacted yet so it will only be 1st injected param
url, data = r.target.getPayloadedUrl(r.first_param, "")
# In case of proxy
if self.engine.getOption('http-proxy') is not None:
proxy = ProxyHandler({'http': self.engine.getOption('http-proxy')})
opener = build_opener(proxy)
install_opener(opener)
# Some headers
if self.engine.getOption('ua') is not None:
if self.engine.getOption('ua') is "RANDOM":
headers = {'User-Agent': random.choice(USER_AGENTS)}
else:
headers = {'User-Agent': self.engine.getOption('ua')}
else:
headers = {}
if self.engine.getOption("cookie") is not None:
headers["Cookie"] = self.engine.getOption("cookie")
# Build the request
req = Request(url, data, headers)
try:
to = 10 if self.engine.getOption('http-proxy') is None else 20
response = urlopen(req, timeout=to)
except HTTPError, e:
self._addError(e.code, r.target.getAbsoluteUrl())
continue
except URLError, e:
self._addError(e.reason, r.target.getAbsoluteUrl())
continue
except:
self._addError('Unknown', r.target.getAbsoluteUrl())
continue
else:
result = self.processResponse(response.read().lower(), r.first_pl)
if len(result) is not 0:
# looks like it's stored
oldinjtype = r.injections[r.first_param]
oldinjtype[0][0][0] = "stored"
r.injections[r.first_param] = oldinjtype
def run(self):
""" Main code of the thread """
while True:
try:
target = self.queue.get(timeout = 1)
except:
try:
self.queue.task_done()
except ValueError:
pass
else:
# No GET/POST parameters? Skip to next url
if len(target.params) == 0:
# print "[X] No paramaters to inject"
self.queue.task_done()
continue
self._performInjections(target)
self._checkStoredInjections()
# Scan complete
try:
self.queue.task_done()
except ValueError:
pass
|
django_grpc/serializers/__init__.py | nielsvaneck/django-grpc | 164 | 11071869 | <filename>django_grpc/serializers/__init__.py
from .base import BaseModelSerializer, message_to_python
def serialize_model(message_class, instance, serializers):
"""
Shortcut
"""
return BaseModelSerializer.serialize_model(message_class, instance, serializers)
def deserialize_message(message) -> dict:
return message_to_python(message)
|
qmpy/analysis/database.py | tachyontraveler/qmpy | 103 | 11071878 | import qmpy
import qmpy.data as data
import logging
logger = logging.getLogger(__name__)
def apply_filter(words, keyword):
"""
Assign a keyword to all entries whose references contain any of the given
list of words.
Examples:
>>> apply_filter(["pressure", "mpa", "gpa", "kbar"], "high pressure")
"""
entries = qmpy.Entry.objects.none()
for word in words:
entries |= qmpy.Entry.objects.filter(title__contains=word)
kw = qmpy.MetaData.get("keyword", keyword)
kw.entry_set.add(*entries)
def is_likely_high_pressure(structure):
pressure_words = ["pressure", "anvil cell", "dac", "mpa", "gpa", "mbar", "kbar"]
# add test for volume deviation from vagards law
if structure.pressure > 102:
return True
if structure.reference:
if structure.reference.title:
title = structure.reference.title.lower()
for w in pressure_words:
if w in title:
return True
return False
|
benchmark/torch/AlphaZero/Coach.py | lp2333/PARL | 3,172 | 11071884 | <reponame>lp2333/PARL
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
from pickle import Pickler, Unpickler
from random import shuffle
from parl.utils import tensorboard
import numpy as np
from tqdm import tqdm
import parl
from parl.utils import logger
from actor import Actor
from utils import split_group, get_test_dataset
from alphazero_agent import create_agent
class Coach():
"""
This class executes the self-play, learning and evaluating.
"""
def __init__(self, game, args):
self.game = game
self.args = args
# neural network of current generation
self.current_agent = create_agent(self.game)
# neural network of previous generation
self.previous_agent = create_agent(self.game)
# history of examples from args.numItersForTrainExamplesHistory latest iterations
self.trainExamplesHistory = []
self.test_dataset = get_test_dataset()
def _create_remote_actors(self):
# connect to xparl cluster to submit jobs
parl.connect(self.args.master_address)
# creating the actors synchronizely.
self.remote_actors = [Actor(self.game, self.args, seed) \
for seed in range(self.args.actors_num)]
def learn(self):
"""Each iteration:
1. Performs numEps episodes of self-play.
2. Retrains neural network with examples in trainExamplesHistory
(which has a maximum length of numItersForTrainExamplesHistory).
3. Evaluates the new neural network with the test dataset.
4. Pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
# create remote actors to run tasks (self-play/pitting/evaluate_test_dataset) in parallel.
self._create_remote_actors()
for iteration in range(1, self.args.numIters + 1):
logger.info('Starting Iter #{} ...'.format(iteration))
####################
logger.info('Step1: self-play in parallel...')
iterationTrainExamples = []
# update weights of remote actors to the latest weights, and ask them to run self-play task
episode_num_each_actor = self.args.numEps // self.args.actors_num
weights = self.current_agent.get_weights()
future_object_ids = [remote_actor.self_play(
weights, episode_num_each_actor) \
for remote_actor in self.remote_actors]
results = [
future_object.get() for future_object in future_object_ids
]
for result in results:
iterationTrainExamples.extend(result)
# save the iteration examples to the history
self.trainExamplesHistory.append(iterationTrainExamples)
if len(self.trainExamplesHistory
) > self.args.numItersForTrainExamplesHistory:
logger.warning("Removing the oldest entry in trainExamples.")
self.trainExamplesHistory.pop(0)
self.saveTrainExamples(iteration) # backup history to a file
####################
logger.info('Step2: train neural network...')
# shuffle examples before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
# training new network, keeping a copy of the old one
self.current_agent.save(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
self.previous_agent.restore(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
self.current_agent.learn(trainExamples)
####################
logger.info('Step3: evaluate test dataset in parallel...')
cnt = 0
# update weights of remote actors to the latest weights, and ask them to evaluate assigned test dataset
split_datas = []
for i, data in enumerate(
split_group(
self.test_dataset,
len(self.test_dataset) // self.args.actors_num)):
split_datas.append(data)
cnt += len(data)
weights = self.current_agent.get_weights()
future_object_ids = [remote_actor.evaluate_test_dataset(
weights, data) \
for data, remote_actor in zip(split_datas, self.remote_actors)]
results = [
future_object.get() for future_object in future_object_ids
]
perfect_moves_cnt, good_moves_cnt = 0, 0
# wait for all remote actors (a total of self.args.actors_num) to return the evaluating results
for result in results:
(perfect_moves, good_moves) = result
perfect_moves_cnt += perfect_moves
good_moves_cnt += good_moves
logger.info('perfect moves rate: {}, good moves rate: {}'.format(
perfect_moves_cnt / cnt, good_moves_cnt / cnt))
tensorboard.add_scalar('perfect_moves_rate',
perfect_moves_cnt / cnt, iteration)
tensorboard.add_scalar('good_moves_rate', good_moves_cnt / cnt,
iteration)
####################
logger.info(
'Step4: pitting against previous generation in parallel...')
# transfer weights of previous generation and current generation to the remote actors, and ask them to pit.
games_num_each_actor = self.args.arenaCompare // self.args.actors_num
pre_weights = self.previous_agent.get_weights()
cur_weights = self.current_agent.get_weights()
future_object_ids = [remote_actor.pitting(
pre_weights,
cur_weights, games_num_each_actor) \
for remote_actor in self.remote_actors]
results = [
future_object.get() for future_object in future_object_ids
]
previous_wins, current_wins, draws = 0, 0, 0
for result in results:
(pwins_, cwins_, draws_) = result
previous_wins += pwins_
current_wins += cwins_
draws += draws_
logger.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' %
(current_wins, previous_wins, draws))
if previous_wins + current_wins == 0 or float(current_wins) / (
previous_wins + current_wins) < self.args.updateThreshold:
logger.info('REJECTING NEW MODEL')
self.current_agent.restore(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
else:
logger.info('ACCEPTING NEW MODEL')
self.current_agent.save(
os.path.join(self.args.checkpoint, 'best.pth.tar'))
self.current_agent.save(
os.path.join(self.args.checkpoint,
self.getCheckpointFile(iteration)))
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration) + '.pth.tar'
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(
folder,
self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadModel(self):
self.current_agent.restore(
os.path.join(self.args.load_folder_file[0],
self.args.load_folder_file[1]))
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0],
self.args.load_folder_file[1])
examplesFile = modelFile + ".examples"
if not os.path.isfile(examplesFile):
logger.warning(
"File {} with trainExamples not found!".format(examplesFile))
r = input("Continue? [y|n]")
if r != "y":
sys.exit()
else:
logger.info("File with trainExamples found. Loading it...")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
logger.info('Loading done!')
|
pororo/tasks/morph_inflection.py | jayten42/pororo | 1,137 | 11071885 | """Morphological Inflection related modeling class"""
import pickle
from typing import Optional
from pororo.tasks.utils.base import PororoFactoryBase, PororoSimpleBase
from pororo.tasks.utils.download_utils import download_or_load
class PororoInflectionFactory(PororoFactoryBase):
"""
Conduct Morphological inflection
English (`enparadigm`)
- dataset: TBU
- metric: N/A
Korean (`koparadigm`)
- dataset: KoParadigm (Park et al. 2020)
- metric: N/A
Japanese (`japaradigm`)
- dataset: TBU
- metric: N/A
Args:
text (str): input text to be morphologically inflected
Returns:
List[Tuple[str, Tuple[str, str]]]: morphogical inflection token list
Examples:
>>> inflection = Pororo(task="inflection", lang="ko")
>>> inflection("곱")
[['Action Verb', [('거나', '곱거나'), ('거늘', '곱거늘'), ('거니', '곱거니') ...]]]
>>> inflection = Pororo(task="inflection", lang="en")
>>> inflection("love")
{'NN': [('loves', 'NNS')], 'VB': [('loves', 'VBZ'), ('loved', 'VBD'), ('loved', 'VBN'), ('loving', 'VBG')]}
>>> inflection = Pororo(task="inflection", lang="ja")
>>> inflection("あえぐ")
{'verb': [('あえが', '未然形'), ('あえご', '未然ウ接続'), ('あえぎ', '連用形'), ('あえい', '連用タ接続'), ('あえげ', '仮定形'), ('あえげ', '命令e'), ('あえぎゃ', '仮定縮約1')]}
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["en", "ko", "ja"]
@staticmethod
def get_available_models():
return {
"en": ["enparadigm"],
"ko": ["koparadigm"],
"ja": ["japaradigm"],
}
def load(self, device: int):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
if self.config.n_model == "koparadigm":
try:
from koparadigm import Paradigm
except ModuleNotFoundError as error:
raise error.__class__(
"Please install koparadigm with: `pip install koparadigm`")
model = Paradigm()
return PororoKoParadigm(model, self.config)
if self.config.n_model in ["enparadigm", "japaradigm"]:
model_path = download_or_load(
f"misc/inflection.{self.config.lang}.pickle",
self.config.lang,
)
with open(model_path, "rb") as handle:
model = dict(pickle.load(handle))
return PororoParadigm(model, self.config)
class PororoKoParadigm(PororoSimpleBase):
def __init__(self, model, config):
super().__init__(config)
self._model = model
def predict(self, text: str):
"""
Conduct korean morphological inflection
Args:
text (str): input text to be morphologically inflected
Returns:
List[Tuple[str, Tuple[str, str]]]: morphogical inflection token list
"""
return self._model.conjugate(text)
class PororoParadigm(PororoSimpleBase):
def __init__(self, model, config):
super().__init__(config)
self._model = model
def predict(self, text: str, **kwargs):
"""
Conduct morphological inflection
Args:
text (str): input text to be morphologically inflected
Returns:
List[Tuple[str, Tuple[str, str]]]: morphogical inflection token list
"""
try:
return self._model[text]
except KeyError:
raise KeyError("Un-registered key !")
|
Crypto Git Repository/print_repo_log.py | DazEB2/SimplePyScripts | 117 | 11071894 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
if __name__ == '__main__':
import api
api.print_log(reverse=True)
|
src/test/python/test_preinits.py | seletz/jep | 210 | 11071913 | import unittest
import sys
from jep_pipe import jep_pipe
from jep_pipe import build_java_process_cmd
def containsBug46006():
# cpython 3.10.0 and 3.10.1 are affected by https://bugs.python.org/issue46006.
# Since this bug should not occur in most environments we do not want to stop
# jep from building by failing.
# More discussion can be found at https://github.com/ninia/jep/issues/358
# At the time of this writing the bug has not been fixed and may affect
# upcoming releases such as 3.10.2 and 3.11.
if sys.version_info.major == 3 and sys.version_info.minor == 10:
return sys.version_info.micro == 0 or sys.version_info.micro == 1
class TestPreInits(unittest.TestCase):
@unittest.skipIf(containsBug46006, 'Python version contains cpython bug 46006 and may not work with DontWriteBytecodeFlag')
def test_inits(self):
jep_pipe(build_java_process_cmd('jep.test.TestPreInitVariables'))
|
tests/test_dcc.py | gtmanfred/irc3 | 178 | 11071914 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from irc3.testing import BotTestCase
from irc3.compat import asyncio
from irc3.dcc.client import DCCSend
from irc3.dcc.optim import DCCSend as DCCSendOptim
from irc3.plugins.dcc import dcc_command
from irc3 import dcc_event
from irc3 import utils
import tempfile
import pytest
import shutil
import os
log = {'in': [], 'out': []}
def get_extra_info(*args):
return ('127.0.0.1', 4567)
@dcc_event('(?P<data>.*)')
def log_in(bot, client=None, data=None):
log['in'].append((client, data))
@dcc_event('(?P<data>.*)', iotype='out')
def log_out(bot, client=None, data=None):
log['out'].append((client, data))
@dcc_command
def syn(bot, mask, client, args):
"""Ok
%%syn
"""
client.send_line('ack')
def chat_ready(client):
client = client.result()
client.actions(client.mask)
client.send('\x01ACTION syn\x01')
client.send('\x01ACTION help\x01')
client.loop.call_later(.1, client.idle_timeout_reached)
@pytest.mark.usefixtures('cls_event_loop')
class TestChat(BotTestCase):
config = dict(includes=['irc3.plugins.dcc'],
dcc={'ip': '127.0.0.1'})
mask = utils.IrcString('gawel@gawel!<EMAIL>')
dmask = utils.IrcString('gawel@gawel!127.0.0.1')
def callDCCFTU(self, *args, **kwargs):
self.bot = self.callFTU()
self.bot.protocol.transport.get_extra_info = get_extra_info
self.bot.dispatch(':%s PRIVMSG irc3 :!chat' % self.mask)
self.future = asyncio.Future(loop=self.loop)
self.loop.call_later(.1, self.created)
def created(self):
servers = self.bot.dcc.connections['chat']['masks'][self.mask]
self.server = list(servers.values())[0]
print(self.server)
self.client = self.bot.dcc.create(
'chat', self.dmask,
host='127.0.0.1', port=self.server.port)
self.client.ready.add_done_callback(chat_ready)
self.client.closed.add_done_callback(self.future.set_result)
def test_create(self):
self.callDCCFTU()
self.bot.include('irc3.plugins.dcc')
self.bot.include(__name__)
self.loop.run_until_complete(self.future)
proto = self.client
assert proto.transport is not None
info = self.bot.dcc.connections['chat']['masks']['gawel']
assert proto not in info.values()
assert proto.started.result() is proto
assert proto.closed.done()
# the timeout message is sent or not regarding python version.
# we tolerate both
assert len(log['in']) in (5, 6)
assert len(log['out']) == 6
@pytest.mark.usefixtures('cls_event_loop')
class DCCTestCase(BotTestCase):
dmask = utils.IrcString('gawel@gawel!127.0.0.1')
def callDCCFTU(self, *args, **kwargs):
bot = self.callFTU()
self.future = asyncio.Future(loop=self.loop)
bot.protocol.transport.get_extra_info = get_extra_info
self.manager = manager = bot.dcc
self.server = manager.create(*args, **kwargs)
self.server.ready.add_done_callback(self.created)
def createFiles(self):
self.wd = tempfile.mkdtemp(prefix='irc3dcc')
self.addCleanup(shutil.rmtree, self.wd)
self.dst = os.path.join(self.wd, 'dst')
self.src = os.path.join(self.wd, 'src')
with open(self.src, 'wb') as fd:
fd.write(('start%ssend' % ('---' * (1024 * 1024))).encode('ascii'))
def assertFileSent(self):
getsize = os.path.getsize
assert getsize(self.dst), getsize(self.src)
assert getsize(self.dst), getsize(self.src)
with open(self.src, 'rb') as fd:
src = fd.read()
with open(self.dst, 'rb') as fd:
dest = fd.read()
assert src == dest
class TestSend(DCCTestCase):
send_class = DCCSend
def created(self, f):
self.client = self.manager.create(
'get', utils.IrcString('gawel!gawel@host'),
host='127.0.0.1', port=self.server.port,
idle_timeout=10, filepath=self.dst)
self.client.closed.add_done_callback(self.future.set_result)
def test_create(self):
self.createFiles()
self.callDCCFTU(self.send_class, self.dmask, filepath=self.src)
self.loop.run_until_complete(self.future)
proto = self.client
assert proto.transport is not None
info = self.manager.connections['get']['masks'][self.dmask]
assert proto not in info.values()
assert proto.started.result() is proto
assert proto.closed.done()
self.assertFileSent()
class TestSendOptim(TestSend):
send_class = DCCSendOptim
class TestResume(DCCTestCase):
send_class = DCCSend
def created(self, f):
with open(self.dst, 'wb') as fd:
with open(self.src, 'rb') as fdd:
fd.write(fdd.read(1345))
self.client = self.manager.create(
'get', self.dmask,
host='127.0.0.1', port=self.server.port,
idle_timeout=10, filepath=self.dst)
self.client.resume = True
self.manager.resume(self.dmask, self.server.filename_safe,
self.server.port, self.client.offset)
self.client.closed.add_done_callback(self.future.set_result)
def test_create(self):
self.createFiles()
self.callDCCFTU(self.send_class, self.dmask, filepath=self.src)
self.loop.run_until_complete(self.future)
proto = self.client
assert proto.transport is not None
info = self.manager.connections['get']['masks'][self.dmask]
assert proto not in info.values()
assert proto.started.result() is proto
assert proto.closed.done()
self.assertFileSent()
class TestResumeOptim(TestResume):
send_class = DCCSendOptim
class TestSendWithLimit(DCCTestCase):
send_class = DCCSend
def created(self, f):
self.client = self.manager.create(
'get', self.dmask,
host='127.0.0.1', port=self.server.port,
idle_timeout=10, filepath=self.dst)
self.client.closed.add_done_callback(self.future.set_result)
def test_create(self):
self.createFiles()
self.callDCCFTU(self.send_class, self.dmask,
filepath=self.src, limit_rate=64)
self.loop.run_until_complete(self.future)
proto = self.client
assert proto.transport is not None
info = self.manager.connections['get']['masks'][self.dmask]
assert proto not in info.values()
assert proto.started.result() is proto
assert proto.closed.done()
self.assertFileSent()
class TestSendWithLimitOptim(TestSendWithLimit):
send_class = DCCSendOptim
|
models/official/detection/utils/shape_utils.py | GerinTang/tpu | 5,098 | 11071923 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def get_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_shape = tensor.get_shape().as_list()
if all(static_shape):
return static_shape
combined_shape = []
dynamic_shape = tf.shape(tensor)
for i, d in enumerate(static_shape):
if d is not None:
combined_shape.append(d)
else:
combined_shape.append(dynamic_shape[i])
return combined_shape
|
prepare_distill_dataset.py | CaimdotAIAccount/youtube8mchallenge | 144 | 11071965 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for combine model output and model input into one set of files."""
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import json
import utils
import readers
FLAGS = flags.FLAGS
if __name__ == '__main__':
flags.DEFINE_string("output_dir", "",
"The file to save the predictions to.")
flags.DEFINE_string(
"input_data_pattern", "",
"File globs defining the input dataset in tensorflow.SequenceExample format.")
flags.DEFINE_string("input_feature_names", "mean_rgb,mean_audio", "Name of the feature "
"to use for training.")
flags.DEFINE_string("input_feature_sizes", "1024,128", "Length of the feature vectors.")
flags.DEFINE_string("prediction_feature_names", "predictions", "Name of the feature "
"to use for training.")
flags.DEFINE_integer("batch_size", 256,
"How many examples to process per batch.")
flags.DEFINE_integer("file_size", 512,
"Number of samples per record file.")
flags.DEFINE_string("model_file", "", "Seed model used to do inference.")
flags.DEFINE_integer("num_readers", 12,
"How many threads to use for reading input files.")
def get_input_evaluation_tensors(reader,
data_pattern,
batch_size=1024,
num_readers=1):
"""Creates the section of the graph which reads the evaluation data.
Args:
reader: A class which parses the training data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.batch_join(
eval_data,
batch_size=batch_size,
capacity=4 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
# Prepare the inputs
def fetc_inputs(reader,
eval_data_pattern,
batch_size=1024,
num_readers=1):
video_id_batch, model_input_raw, labels_batch, num_frames, quant_feats = get_input_evaluation_tensors(reader,
eval_data_pattern,
batch_size=batch_size,
num_readers=num_readers)
return video_id_batch, model_input_raw, labels_batch, num_frames, quant_feats
# Builds the record strucutre
def get_output_feature(video_id, video_label, video_rgb, video_audio, video_prediction, video_num_frame):
_bytes_feature_list = lambda x: tf.train.Feature(bytes_list=tf.train.BytesList(value=[x.tobytes()]))
example = tf.train.SequenceExample(
context = tf.train.Features(feature={
"video_id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[video_id])),
"labels": tf.train.Feature(int64_list=tf.train.Int64List(value=video_label)),
"predictions": tf.train.Feature(float_list=tf.train.FloatList(value=video_prediction))
}),
feature_lists = tf.train.FeatureLists(feature_list={
"rgb": tf.train.FeatureList(feature=map(_bytes_feature_list, video_rgb[:video_num_frame])),
"audio": tf.train.FeatureList(feature=map(_bytes_feature_list, video_audio[:video_num_frame])),
})
)
return example
# Write the records
def write_to_record(video_ids, video_labels, video_rgbs, video_audios, video_predictions,
video_num_frames, filenum, num_examples_processed):
writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%05d.tfrecord' % filenum)
for i in range(num_examples_processed):
video_id = video_ids[i]
video_label = np.nonzero(video_labels[i,:])[0]
video_rgb = video_rgbs[i,:]
video_audio = video_audios[i,:]
video_prediction = video_predictions[i]
video_num_frame = video_num_frames[i]
example = get_output_feature(video_id, video_label, video_rgb, video_audio, video_prediction, video_num_frame)
serialized = example.SerializeToString()
writer.write(serialized)
writer.close()
def inference_loop():
model_path = FLAGS.model_file
assert os.path.isfile(model_path + ".meta"), "Specified model does not exist."
model_flags_path = os.path.join(os.path.dirname(model_path), "model_flags.json")
directory = FLAGS.output_dir # We will store the predictions here.
if not os.path.exists(directory):
os.makedirs(directory)
else:
raise IOError("Output path exists! path='" + directory + "'")
if not os.path.exists(model_flags_path):
raise IOError(("Cannot find file %s. Did you run train.py on the same "
"--train_dir?") % model_flags_path)
flags_dict = json.loads(open(model_flags_path).read())
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(flags_dict["feature_names"],
flags_dict["feature_sizes"])
if flags_dict["frame_features"]:
reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
feature_sizes=feature_sizes,
prepare_distill=True)
else:
raise NotImplementedError
video_ids_batch, inputs_batch, labels_batch, num_frames, quant_inpt = fetc_inputs(reader,
FLAGS.input_data_pattern,
FLAGS.batch_size,
FLAGS.num_readers)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
meta_graph_location = model_path + ".meta"
logging.info("loading meta-graph: " + meta_graph_location)
with tf.device("/gpu:0"):
saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)
saver.restore(sess, model_path)
input_tensor = tf.get_collection("input_batch_raw")[0]
num_frames_tensor = tf.get_collection("num_frames")[0]
predictions_batch = tf.get_collection("predictions")[0]
# Workaround for num_epochs issue.
def set_up_init_ops(variables):
init_op_list = []
for variable in list(variables):
if "train_input" in variable.name:
init_op_list.append(tf.assign(variable, 1))
variables.remove(variable)
init_op_list.append(tf.variables_initializer(variables))
return init_op_list
sess.run(set_up_init_ops(tf.get_collection_ref(tf.GraphKeys.LOCAL_VARIABLES)))
# Start the queue runners.
fetches1 = [video_ids_batch, labels_batch, inputs_batch, num_frames, quant_inpt]
fetches2 = [predictions_batch]
coord = tf.train.Coordinator()
start_time = time.time()
video_ids = []
video_labels = []
video_rgbs = []
video_audios = []
video_predictions = []
video_num_frames = []
filenum = 0
num_examples_processed = 0
total_num_examples_processed = 0
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(
sess, coord=coord, daemon=True,
start=True))
while not coord.should_stop():
ids_val = None
ids_val, labels_val, inputs_val, num_frames_val, quant_inpt_val = sess.run(fetches1)
rgbs_val, audios_val = quant_inpt_val[:, :, :1024].copy(), quant_inpt_val[:, :, 1024:].copy()
predictions_val = sess.run(fetches2, feed_dict={input_tensor: inputs_val,
num_frames_tensor: num_frames_val})[0]
video_ids.append(ids_val)
video_labels.append(labels_val)
video_rgbs.append(rgbs_val)
video_audios.append(audios_val)
video_predictions.append(predictions_val)
video_num_frames.append(num_frames_val)
num_examples_processed += len(ids_val)
ids_shape = ids_val.shape[0]
inputs_shape = inputs_val.shape[0]
predictions_shape = predictions_val.shape[0]
assert ids_shape == inputs_shape == predictions_shape, "tensor ids(%d), inputs(%d) and predictions(%d) should have equal rows" % (
ids_shape, inputs_shape, predictions_shape)
ids_val = None
if num_examples_processed >= FLAGS.file_size:
assert num_examples_processed == FLAGS.file_size, "num_examples_processed should be equal to %d" % FLAGS.file_size
video_ids = np.concatenate(video_ids, axis=0)
video_labels = np.concatenate(video_labels, axis=0)
video_rgbs = np.concatenate(video_rgbs, axis=0)
video_audios = np.concatenate(video_audios, axis=0)
video_num_frames = np.concatenate(video_num_frames, axis=0)
video_predictions = np.concatenate(video_predictions, axis=0)
write_to_record(video_ids, video_labels, video_rgbs, video_audios, video_predictions,
video_num_frames, filenum, num_examples_processed)
video_ids = []
video_labels = []
video_rgbs = []
video_audios = []
video_predictions = []
video_num_frames = []
filenum += 1
total_num_examples_processed += num_examples_processed
now = time.time()
logging.info("num examples processed: " + str(
num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(now - start_time))
num_examples_processed = 0
except tf.errors.OutOfRangeError as e:
if ids_val is not None:
video_ids.append(ids_val)
video_labels.append(labels_val)
video_rgbs.append(rgbs_val)
video_audios.append(audios_val)
video_predictions.append(predictions_val)
video_num_frames.append(num_frames_val)
num_examples_processed += len(ids_val)
if 0 < num_examples_processed <= FLAGS.file_size:
video_ids = np.concatenate(video_ids, axis=0)
video_labels = np.concatenate(video_labels, axis=0)
video_rgbs = np.concatenate(video_rgbs, axis=0)
video_audios = np.concatenate(video_audios, axis=0)
video_num_frames = np.concatenate(video_num_frames, axis=0)
video_predictions = np.concatenate(video_predictions, axis=0)
write_to_record(video_ids, video_labels, video_rgbs, video_audios, video_predictions,
video_num_frames, filenum, num_examples_processed)
total_num_examples_processed += num_examples_processed
now = time.time()
logging.info(
"num examples processed: " + str(total_num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(
now - start_time))
logging.info(
"Done with inference. %d samples was written to %s" % (total_num_examples_processed, FLAGS.output_dir))
# except Exception as e: # pylint: disable=broad-except
# logging.info("Unexpected exception: " + str(e))
finally:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
inference_loop()
if __name__ == "__main__":
app.run()
|
payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/OpenApiRefundFundDetailPojo.py | cuhk-mobitec/S3KVetter | 213 | 11071983 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OpenApiRefundFundDetailPojo(object):
def __init__(self):
self._funds = None
self._trans_in = None
self._trans_in_type = None
self._type = None
@property
def funds(self):
return self._funds
@funds.setter
def funds(self, value):
if isinstance(value, list):
self._funds = list()
for i in value:
self._funds.append(i)
@property
def trans_in(self):
return self._trans_in
@trans_in.setter
def trans_in(self, value):
self._trans_in = value
@property
def trans_in_type(self):
return self._trans_in_type
@trans_in_type.setter
def trans_in_type(self, value):
self._trans_in_type = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.funds:
if isinstance(self.funds, list):
for i in range(0, len(self.funds)):
element = self.funds[i]
if hasattr(element, 'to_alipay_dict'):
self.funds[i] = element.to_alipay_dict()
if hasattr(self.funds, 'to_alipay_dict'):
params['funds'] = self.funds.to_alipay_dict()
else:
params['funds'] = self.funds
if self.trans_in:
if hasattr(self.trans_in, 'to_alipay_dict'):
params['trans_in'] = self.trans_in.to_alipay_dict()
else:
params['trans_in'] = self.trans_in
if self.trans_in_type:
if hasattr(self.trans_in_type, 'to_alipay_dict'):
params['trans_in_type'] = self.trans_in_type.to_alipay_dict()
else:
params['trans_in_type'] = self.trans_in_type
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OpenApiRefundFundDetailPojo()
if 'funds' in d:
o.funds = d['funds']
if 'trans_in' in d:
o.trans_in = d['trans_in']
if 'trans_in_type' in d:
o.trans_in_type = d['trans_in_type']
if 'type' in d:
o.type = d['type']
return o
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2018_09_30_preview/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 11072009 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._container_service_client_enums import *
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the Container service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.containerservice.v2018_09_30_preview.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class NetworkProfile(msrest.serialization.Model):
"""Represents the OpenShift networking configuration.
:param vnet_cidr: CIDR for the OpenShift Vnet.
:type vnet_cidr: str
:param peer_vnet_id: CIDR of the Vnet to peer.
:type peer_vnet_id: str
"""
_attribute_map = {
'vnet_cidr': {'key': 'vnetCidr', 'type': 'str'},
'peer_vnet_id': {'key': 'peerVnetId', 'type': 'str'},
}
def __init__(
self,
*,
vnet_cidr: Optional[str] = "10.0.0.0/8",
peer_vnet_id: Optional[str] = None,
**kwargs
):
super(NetworkProfile, self).__init__(**kwargs)
self.vnet_cidr = vnet_cidr
self.peer_vnet_id = peer_vnet_id
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class OpenShiftManagedCluster(Resource):
"""OpenShift Managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param plan: Define the resource plan as required by ARM for billing purposes.
:type plan: ~azure.mgmt.containerservice.v2018_09_30_preview.models.PurchasePlan
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param open_shift_version: Version of OpenShift specified when creating the cluster.
:type open_shift_version: str
:param public_hostname: Optional user-specified FQDN for OpenShift API server.
:type public_hostname: str
:param fqdn: User-specified FQDN for OpenShift API server loadbalancer internal hostname.
:type fqdn: str
:param network_profile: Configuration for OpenShift networking.
:type network_profile: ~azure.mgmt.containerservice.v2018_09_30_preview.models.NetworkProfile
:param router_profiles: Configuration for OpenShift router(s).
:type router_profiles:
list[~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftRouterProfile]
:param master_pool_profile: Configuration for OpenShift master VMs.
:type master_pool_profile:
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedClusterMasterPoolProfile
:param agent_pool_profiles: Configuration of OpenShift cluster VMs.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedClusterAgentPoolProfile]
:param auth_profile: Configures OpenShift authentication.
:type auth_profile:
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedClusterAuthProfile
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'PurchasePlan'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'open_shift_version': {'key': 'properties.openShiftVersion', 'type': 'str'},
'public_hostname': {'key': 'properties.publicHostname', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'router_profiles': {'key': 'properties.routerProfiles', 'type': '[OpenShiftRouterProfile]'},
'master_pool_profile': {'key': 'properties.masterPoolProfile', 'type': 'OpenShiftManagedClusterMasterPoolProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[OpenShiftManagedClusterAgentPoolProfile]'},
'auth_profile': {'key': 'properties.authProfile', 'type': 'OpenShiftManagedClusterAuthProfile'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["PurchasePlan"] = None,
open_shift_version: Optional[str] = None,
public_hostname: Optional[str] = None,
fqdn: Optional[str] = None,
network_profile: Optional["NetworkProfile"] = None,
router_profiles: Optional[List["OpenShiftRouterProfile"]] = None,
master_pool_profile: Optional["OpenShiftManagedClusterMasterPoolProfile"] = None,
agent_pool_profiles: Optional[List["OpenShiftManagedClusterAgentPoolProfile"]] = None,
auth_profile: Optional["OpenShiftManagedClusterAuthProfile"] = None,
**kwargs
):
super(OpenShiftManagedCluster, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.provisioning_state = None
self.open_shift_version = open_shift_version
self.public_hostname = public_hostname
self.fqdn = fqdn
self.network_profile = network_profile
self.router_profiles = router_profiles
self.master_pool_profile = master_pool_profile
self.agent_pool_profiles = agent_pool_profiles
self.auth_profile = auth_profile
class OpenShiftManagedClusterBaseIdentityProvider(msrest.serialization.Model):
"""Structure for any Identity provider.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: OpenShiftManagedClusterAADIdentityProvider.
All required parameters must be populated in order to send to Azure.
:param kind: Required. The kind of the provider.Constant filled by server.
:type kind: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'AADIdentityProvider': 'OpenShiftManagedClusterAADIdentityProvider'}
}
def __init__(
self,
**kwargs
):
super(OpenShiftManagedClusterBaseIdentityProvider, self).__init__(**kwargs)
self.kind = None # type: Optional[str]
class OpenShiftManagedClusterAADIdentityProvider(OpenShiftManagedClusterBaseIdentityProvider):
"""Defines the Identity provider for MS AAD.
All required parameters must be populated in order to send to Azure.
:param kind: Required. The kind of the provider.Constant filled by server.
:type kind: str
:param client_id: The clientId password associated with the provider.
:type client_id: str
:param secret: The secret password associated with the provider.
:type secret: str
:param tenant_id: The tenantId associated with the provider.
:type tenant_id: str
:param customer_admin_group_id: The groupId to be granted cluster admin role.
:type customer_admin_group_id: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'customer_admin_group_id': {'key': 'customerAdminGroupId', 'type': 'str'},
}
def __init__(
self,
*,
client_id: Optional[str] = None,
secret: Optional[str] = None,
tenant_id: Optional[str] = None,
customer_admin_group_id: Optional[str] = None,
**kwargs
):
super(OpenShiftManagedClusterAADIdentityProvider, self).__init__(**kwargs)
self.kind = 'AADIdentityProvider' # type: str
self.client_id = client_id
self.secret = secret
self.tenant_id = tenant_id
self.customer_admin_group_id = customer_admin_group_id
class OpenShiftManagedClusterAgentPoolProfile(msrest.serialization.Model):
"""Defines the configuration of the OpenShift cluster VMs.
All required parameters must be populated in order to send to Azure.
:param name: Required. Unique name of the pool profile in the context of the subscription and
resource group.
:type name: str
:param count: Required. Number of agents (VMs) to host docker containers.
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include: "Standard_D2s_v3",
"Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3",
"Standard_D64s_v3", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_F8s_v2",
"Standard_F16s_v2", "Standard_F32s_v2", "Standard_F64s_v2", "Standard_F72s_v2", "Standard_F8s",
"Standard_F16s", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", "Standard_E20s_v3",
"Standard_E32s_v3", "Standard_E64s_v3", "Standard_GS2", "Standard_GS3", "Standard_GS4",
"Standard_GS5", "Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2",
"Standard_L4s", "Standard_L8s", "Standard_L16s", "Standard_L32s".
:type vm_size: str or
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftContainerServiceVMSize
:param subnet_cidr: Subnet CIDR for the peering.
:type subnet_cidr: str
:param os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2018_09_30_preview.models.OSType
:param role: Define the role of the AgentPoolProfile. Possible values include: "compute",
"infra".
:type role: str or
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftAgentPoolProfileRole
"""
_validation = {
'name': {'required': True},
'count': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'subnet_cidr': {'key': 'subnetCidr', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
count: int,
vm_size: Union[str, "OpenShiftContainerServiceVMSize"],
subnet_cidr: Optional[str] = "10.0.0.0/24",
os_type: Optional[Union[str, "OSType"]] = "Linux",
role: Optional[Union[str, "OpenShiftAgentPoolProfileRole"]] = None,
**kwargs
):
super(OpenShiftManagedClusterAgentPoolProfile, self).__init__(**kwargs)
self.name = name
self.count = count
self.vm_size = vm_size
self.subnet_cidr = subnet_cidr
self.os_type = os_type
self.role = role
class OpenShiftManagedClusterAuthProfile(msrest.serialization.Model):
"""Defines all possible authentication profiles for the OpenShift cluster.
:param identity_providers: Type of authentication profile to use.
:type identity_providers:
list[~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedClusterIdentityProvider]
"""
_attribute_map = {
'identity_providers': {'key': 'identityProviders', 'type': '[OpenShiftManagedClusterIdentityProvider]'},
}
def __init__(
self,
*,
identity_providers: Optional[List["OpenShiftManagedClusterIdentityProvider"]] = None,
**kwargs
):
super(OpenShiftManagedClusterAuthProfile, self).__init__(**kwargs)
self.identity_providers = identity_providers
class OpenShiftManagedClusterIdentityProvider(msrest.serialization.Model):
"""Defines the configuration of the identity providers to be used in the OpenShift cluster.
:param name: Name of the provider.
:type name: str
:param provider: Configuration of the provider.
:type provider:
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedClusterBaseIdentityProvider
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'OpenShiftManagedClusterBaseIdentityProvider'},
}
def __init__(
self,
*,
name: Optional[str] = None,
provider: Optional["OpenShiftManagedClusterBaseIdentityProvider"] = None,
**kwargs
):
super(OpenShiftManagedClusterIdentityProvider, self).__init__(**kwargs)
self.name = name
self.provider = provider
class OpenShiftManagedClusterListResult(msrest.serialization.Model):
"""The response from the List OpenShift Managed Clusters operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of OpenShift managed clusters.
:type value:
list[~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftManagedCluster]
:ivar next_link: The URL to get the next set of OpenShift managed cluster results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OpenShiftManagedCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OpenShiftManagedCluster"]] = None,
**kwargs
):
super(OpenShiftManagedClusterListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class OpenShiftManagedClusterMasterPoolProfile(msrest.serialization.Model):
"""OpenShiftManagedClusterMaterPoolProfile contains configuration for OpenShift master VMs.
All required parameters must be populated in order to send to Azure.
:param name: Unique name of the master pool profile in the context of the subscription and
resource group.
:type name: str
:param count: Required. Number of masters (VMs) to host docker containers. The default value is
3.
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include: "Standard_D2s_v3",
"Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3",
"Standard_D64s_v3", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_F8s_v2",
"Standard_F16s_v2", "Standard_F32s_v2", "Standard_F64s_v2", "Standard_F72s_v2", "Standard_F8s",
"Standard_F16s", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", "Standard_E20s_v3",
"Standard_E32s_v3", "Standard_E64s_v3", "Standard_GS2", "Standard_GS3", "Standard_GS4",
"Standard_GS5", "Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2",
"Standard_L4s", "Standard_L8s", "Standard_L16s", "Standard_L32s".
:type vm_size: str or
~azure.mgmt.containerservice.v2018_09_30_preview.models.OpenShiftContainerServiceVMSize
:param subnet_cidr: Subnet CIDR for the peering.
:type subnet_cidr: str
:param os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2018_09_30_preview.models.OSType
"""
_validation = {
'count': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'subnet_cidr': {'key': 'subnetCidr', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
}
def __init__(
self,
*,
count: int,
vm_size: Union[str, "OpenShiftContainerServiceVMSize"],
name: Optional[str] = None,
subnet_cidr: Optional[str] = None,
os_type: Optional[Union[str, "OSType"]] = "Linux",
**kwargs
):
super(OpenShiftManagedClusterMasterPoolProfile, self).__init__(**kwargs)
self.name = name
self.count = count
self.vm_size = vm_size
self.subnet_cidr = subnet_cidr
self.os_type = os_type
class OpenShiftRouterProfile(msrest.serialization.Model):
"""Represents an OpenShift router.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Name of the router profile.
:type name: str
:param public_subdomain: DNS subdomain for OpenShift router.
:type public_subdomain: str
:ivar fqdn: Auto-allocated FQDN for the OpenShift router.
:vartype fqdn: str
"""
_validation = {
'fqdn': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'public_subdomain': {'key': 'publicSubdomain', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
public_subdomain: Optional[str] = None,
**kwargs
):
super(OpenShiftRouterProfile, self).__init__(**kwargs)
self.name = name
self.public_subdomain = public_subdomain
self.fqdn = None
class PurchasePlan(msrest.serialization.Model):
"""Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
:param name: The plan ID.
:type name: str
:param product: Specifies the product of the image from the marketplace. This is the same value
as Offer under the imageReference element.
:type product: str
:param promotion_code: The promotion code.
:type promotion_code: str
:param publisher: The plan ID.
:type publisher: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
publisher: Optional[str] = None,
**kwargs
):
super(PurchasePlan, self).__init__(**kwargs)
self.name = name
self.product = product
self.promotion_code = promotion_code
self.publisher = publisher
class TagsObject(msrest.serialization.Model):
"""Tags object for patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TagsObject, self).__init__(**kwargs)
self.tags = tags
|
tools/cygprofile/patch_orderfile.py | google-ar/chromium | 777 | 11072028 | <gh_stars>100-1000
#!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
sections), matches the symbols in the orderfile and augments each symbol with
the symbols residing at the same address (due to having identical code). The
output is a list of section matching rules appropriate for the linker option
-section-ordering-file. These section matching rules include both actual
section names and names with wildcard (*) suffixes.
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." and other
suffixes)
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile, with several different prefixes
and suffixes
6. Output catch-all section matching rules for unprofiled methods.
"""
import collections
import logging
import optparse
import sys
import cyglog_to_orderfile
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
_PREFIXES = ('.text.startup.', '.text.hot.', '.text.unlikely.', '.text.')
# Suffixes for the symbols. These are due to method splitting for inlining and
# method cloning for various reasons including constant propagation and
# inter-procedural optimization.
_SUFFIXES = ('.clone.', '.part.', '.isra.', '.constprop.')
def RemoveSuffixes(name):
"""Strips method name suffixes from cloning and splitting.
.clone. comes from cloning in -O3.
.part. comes from partial method splitting for inlining.
.isra. comes from inter-procedural optimizations.
.constprop. is cloning for constant propagation.
"""
for suffix in _SUFFIXES:
name = name.split(suffix)[0]
return name
def _UniqueGenerator(generator):
"""Converts a generator to skip yielding elements already seen.
Example:
@_UniqueGenerator
def Foo():
yield 1
yield 2
yield 1
yield 3
Foo() yields 1,2,3.
"""
def _FilteringFunction(*args, **kwargs):
returned = set()
for item in generator(*args, **kwargs):
if item in returned:
continue
returned.add(item)
yield item
return _FilteringFunction
def _GroupSymbolInfos(symbol_infos):
"""Groups the symbol infos by name and offset.
Args:
symbol_infos: an iterable of SymbolInfo
Returns:
The same output as _GroupSymbolInfosFromBinary.
"""
# Map the addresses to symbols.
offset_to_symbol_infos = collections.defaultdict(list)
name_to_symbol_infos = collections.defaultdict(list)
for symbol in symbol_infos:
symbol = symbol_extractor.SymbolInfo(name=RemoveSuffixes(symbol.name),
offset=symbol.offset,
size=symbol.size,
section=symbol.section)
offset_to_symbol_infos[symbol.offset].append(symbol)
name_to_symbol_infos[symbol.name].append(symbol)
return (dict(offset_to_symbol_infos), dict(name_to_symbol_infos))
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
return _GroupSymbolInfos(symbol_infos)
def _StripPrefix(line):
"""Strips the linker section name prefix from a symbol line.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
for prefix in _PREFIXES:
if line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _SectionNameToSymbols(section_name, section_to_symbols_map):
"""Yields all symbols which could be referred to by section_name.
If the section name is present in the map, the names in the map are returned.
Otherwise, any clone annotations and prefixes are stripped from the section
name and the remainder is returned.
"""
if (not section_name or
section_name == '.text' or
section_name.endswith('*')):
return # Don't return anything for catch-all sections
if section_name in section_to_symbols_map:
for symbol in section_to_symbols_map[section_name]:
yield symbol
else:
name = _StripPrefix(section_name)
if name:
yield name
def GetSectionsFromOrderfile(filename):
"""Yields the sections from an orderfile.
Args:
filename: The name of the orderfile.
Yields:
A list of symbol names.
"""
with open(filename, 'r') as f:
for line in f.xreadlines():
line = line.rstrip('\n')
if line:
yield line
@_UniqueGenerator
def GetSymbolsFromOrderfile(filename, section_to_symbols_map):
"""Yields the symbols from an orderfile. Output elements do not repeat.
Args:
filename: The name of the orderfile.
section_to_symbols_map: The mapping from section to symbol names. If a
section name is missing from the mapping, the
symbol name is assumed to be the section name with
prefixes and suffixes stripped.
Yields:
A list of symbol names.
"""
# TODO(lizeb,pasko): Move this method to symbol_extractor.py
for section in GetSectionsFromOrderfile(filename):
for symbol in _SectionNameToSymbols(RemoveSuffixes(section),
section_to_symbols_map):
yield symbol
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expands a symbol to include all symbols with the same offset.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if profiled_symbol not in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
@_UniqueGenerator
def _SectionMatchingRules(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets the set of section matching rules for section_name.
These rules will include section_name, but also any sections which may
contain the same code due to cloning, splitting, or identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section names including at least section_name.
"""
for name in _ExpandSection(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map):
yield name
# Since only a subset of methods (mostly those compiled with O2) ever get
# suffixes, don't emit the wildcards for ones where it won't be helpful.
# Otherwise linking takes too long.
if name in suffixed_sections:
# TODO(lizeb,pasko): instead of just appending .*, append .suffix.* for
# _SUFFIXES. We can't do this right now because that many wildcards
# seems to kill the linker (linking libchrome takes 3 hours). This gets
# almost all the benefit at a much lower link-time cost, but could cause
# problems with unexpected suffixes.
yield name + '.*'
def _ExpandSection(section_name, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map):
"""Yields the set of section names for section_name.
This set will include section_name, but also any sections which may contain
the same code due to identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
Yields:
Section names including at least section_name.
"""
yield section_name
for first_sym in _SectionNameToSymbols(section_name,
section_to_symbols_map):
for symbol in _SymbolsWithSameOffset(first_sym, name_to_symbol_infos,
offset_to_symbol_infos):
if symbol in symbol_to_sections_map:
for section in symbol_to_sections_map[symbol]:
yield section
for prefix in _PREFIXES:
yield prefix + symbol
@_UniqueGenerator
def _ExpandSections(section_names, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets an ordered set of section matching rules for a list of sections.
Rules will not be repeated.
Args:
section_names: The sections to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
_GroupSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol names.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section matching rules including at least section_names.
"""
for profiled_section in section_names:
for section in _SectionMatchingRules(
profiled_section, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed_sections):
yield section
def _CombineSectionListsByPrimaryName(symbol_to_sections_map):
"""Combines values of the symbol_to_sections_map by stripping suffixes.
Example:
{foo: [.text.foo, .text.bar.part.1],
foo.constprop.4: [.text.baz.constprop.3]} ->
{foo: [.text.foo, .text.bar, .text.baz]}
Args:
symbol_to_sections_map: Mapping from symbol name to list of section names
Returns:
The same mapping, but with symbol and section names suffix-stripped.
"""
simplified = {}
for suffixed_symbol, suffixed_sections in symbol_to_sections_map.iteritems():
symbol = RemoveSuffixes(suffixed_symbol)
sections = [RemoveSuffixes(section) for section in suffixed_sections]
simplified.setdefault(symbol, []).extend(sections)
return simplified
def _SectionsWithSuffixes(symbol_to_sections_map):
"""Finds sections which have suffixes applied.
Args:
symbol_to_sections_map: a map where the values are lists of section names.
Returns:
A set containing all section names which were seen with suffixes applied.
"""
sections_with_suffixes = set()
for suffixed_sections in symbol_to_sections_map.itervalues():
for suffixed_section in suffixed_sections:
section = RemoveSuffixes(suffixed_section)
if section != suffixed_section:
sections_with_suffixes.add(section)
return sections_with_suffixes
def _StripSuffixes(section_list):
"""Remove all suffixes on items in a list of sections or symbols."""
return [RemoveSuffixes(section) for section in section_list]
def main(argv):
parser = optparse.OptionParser(usage=
'usage: %prog [options] <unpatched_orderfile> <library>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
options, argv = parser.parse_args(argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
orderfile_filename = argv[1]
binary_filename = argv[2]
symbol_extractor.SetArchitecture(options.arch)
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
binary_filename)
obj_dir = cygprofile_utils.GetObjDir(binary_filename)
raw_symbol_map = cyglog_to_orderfile.GetSymbolToSectionsMapFromObjectFiles(
obj_dir)
suffixed = _SectionsWithSuffixes(raw_symbol_map)
symbol_to_sections_map = _CombineSectionListsByPrimaryName(raw_symbol_map)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
profiled_sections = _StripSuffixes(
GetSectionsFromOrderfile(orderfile_filename))
expanded_sections = _ExpandSections(
profiled_sections, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed)
for section in expanded_sections:
print section
# The following is needed otherwise Gold only applies a partial sort.
print '.text' # gets methods not in a section, such as assembly
for prefix in _PREFIXES:
print prefix + '*' # gets everything else
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv))
|
ioflo/base/completing.py | BradyHammond/ioflo | 128 | 11072029 | <reponame>BradyHammond/ioflo
"""completing.py done action module
"""
#print("module {0}".format(__name__))
import time
import struct
from collections import deque
import inspect
from ..aid.sixing import *
from ..aid import odict, oset
from ..aid import aiding
from . import excepting
from . import registering
from . import storing
from . import acting
from . import tasking
from . import framing
from ..aid.consoling import getConsole
console = getConsole()
class Complete(acting.Actor):
"""Complete Class for indicating tasker done state
"""
Registry = odict()
def _resolve(self, taskers, **kwa):
"""Resolves value (taskers) list of link names that is passed in as parm
resolved links are passed back to ._act to store in parms
"""
parms = super(Complete, self)._resolve( **kwa)
links = oset() # preserve ordering
for tasker in taskers:
if tasker == 'me':
tasker = self._act.frame.framer
links.add(tasker)
else:
tasker = tasking.resolveTasker(tasker,
who=self.name,
desc='tasker',
contexts=[AUX, SLAVE],
human=self._act.human,
count=self._act.count)
links.add(tasker)
parms['taskers'] = links #replace with valid list/oset ordered
return parms
class CompleteDone(Complete):
"""CompleteDone Complete
"""
def action(self, taskers, **kw):
"""set done state to True for aux or slave framer
"""
for tasker in taskers:
tasker.done = True
console.profuse(" Done {0}\n".format(tasker.name))
return None
|
src/greplin/scales/meter.py | frenzymadness/scales | 273 | 11072044 | # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for metering values"""
try:
from UserDict import UserDict
except ImportError:
from collections import UserDict
from greplin.scales import Stat
from greplin.scales.timer import RepeatTimer
from greplin.scales.util import EWMA
TICKERS = []
TICKER_THREAD = RepeatTimer(5, lambda: [t() for t in TICKERS])
class MeterStatDict(UserDict):
"""Stores the meters for MeterStat. Expects to be ticked every 5 seconds."""
def __init__(self):
UserDict.__init__(self)
self._m1 = EWMA.oneMinute()
self._m5 = EWMA.fiveMinute()
self._m15 = EWMA.fifteenMinute()
self._meters = (self._m1, self._m5, self._m15)
TICKERS.append(self.tick)
self['unit'] = 'per second'
self['count'] = 0
def __getitem__(self, item):
if item in self:
return UserDict.__getitem__(self, item)
else:
return 0.0
def tick(self):
"""Updates meters"""
for m in self._meters:
m.tick()
self['m1'] = self._m1.rate
self['m5'] = self._m5.rate
self['m15'] = self._m15.rate
def mark(self, value=1):
"""Updates the dictionary."""
self['count'] += value
for m in self._meters:
m.update(value)
class MeterStat(Stat):
"""A stat that stores m1, m5, m15. Updated every 5 seconds via TICKER_THREAD."""
def __init__(self, name, _=None):
Stat.__init__(self, name, None)
def _getDefault(self, _):
"""Returns a default MeterStatDict"""
return MeterStatDict()
def __set__(self, instance, value):
self.__get__(instance, None).mark(value)
class MeterDict(UserDict):
"""Dictionary of meters."""
def __init__(self, parent, instance):
UserDict.__init__(self)
self.parent = parent
self.instance = instance
def __getitem__(self, item):
if item in self:
return UserDict.__getitem__(self, item)
else:
meter = MeterStatDict()
self[item] = meter
return meter
class MeterDictStat(Stat):
"""Dictionary stat value class."""
def _getDefault(self, instance):
return MeterDict(self, instance)
|
src/backend/common/models/webcast.py | ofekashery/the-blue-alliance | 266 | 11072052 | <gh_stars>100-1000
from typing import TypedDict
from backend.common.consts.webcast_type import WebcastType
class _WebcastRequired(TypedDict, total=True):
type: WebcastType
channel: str
class _WebcastOptional(TypedDict, total=False):
file: str
date: str
class Webcast(_WebcastRequired, _WebcastOptional):
pass
|
flow/scenarios/bottleneck.py | SHITIANYU-hue/flow | 805 | 11072057 | """Pending deprecation file.
To view the actual content, go to: flow/networks/bottleneck.py
"""
from flow.utils.flow_warnings import deprecated
from flow.networks.bottleneck import BottleneckNetwork
from flow.networks.bottleneck import ADDITIONAL_NET_PARAMS # noqa: F401
@deprecated('flow.scenarios.bottleneck',
'flow.networks.bottleneck.BottleneckNetwork')
class BottleneckScenario(BottleneckNetwork):
"""See parent class."""
pass
|
pygmt/tests/test_grdcontour.py | weiji14/gmt-python | 168 | 11072089 | <reponame>weiji14/gmt-python
"""
Test Figure.grdcontour.
"""
import os
import numpy as np
import pytest
from pygmt import Figure
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers.testing import load_static_earth_relief
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_CONTOUR_FILE = os.path.join(TEST_DATA_DIR, "contours.txt")
@pytest.fixture(scope="module", name="grid")
def fixture_grid():
"""
Load the grid data from the static_earth_relief file.
"""
return load_static_earth_relief()
@pytest.mark.mpl_image_compare
def test_grdcontour(grid):
"""
Plot a contour image using an xarray grid with fixed contour interval.
"""
fig = Figure()
fig.grdcontour(
grid=grid, interval=50, annotation=200, projection="M10c", frame=True
)
return fig
@pytest.mark.mpl_image_compare
def test_grdcontour_labels(grid):
"""
Plot a contour image using a xarray grid with contour labels and alternate
colors.
"""
fig = Figure()
fig.grdcontour(
grid=grid,
interval=50,
annotation=200,
projection="M10c",
pen=["a1p,red", "c0.5p,black"],
label_placement="d6c",
frame=True,
)
return fig
@pytest.mark.mpl_image_compare
def test_grdcontour_slice(grid):
"""
Plot an contour image using an xarray grid that has been sliced.
"""
grid_ = grid.sel(lat=slice(-20, -10))
fig = Figure()
fig.grdcontour(grid=grid_, interval=100, projection="M10c", frame=True)
return fig
@pytest.mark.mpl_image_compare
def test_grdcontour_interval_file_full_opts(grid):
"""
Plot based on external contour level file.
"""
fig = Figure()
comargs = {
"region": [-53, -49, -20, -17],
"interval": TEST_CONTOUR_FILE,
"grid": grid,
"resample": 100,
"projection": "M10c",
"cut": 10,
}
# Plot contours below 650 in blue
fig.grdcontour(
**comargs, limit=(0, 649), pen=["a1p,blue", "c0.5p,blue"], frame=True
)
# Plot contours above 650 in black
fig.grdcontour(**comargs, limit=(650, 1000), pen=["a1p,black", "c0.5p,black"])
return fig
def test_grdcontour_fails():
"""
Should fail for unrecognized input.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.grdcontour(np.arange(20).reshape((4, 5)))
|
hubspot3/tickets.py | benaduggan/hubspot3 | 137 | 11072099 | """
hubspot tickets api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
from typing import Dict, List
TICKETS_API_VERSION = "1"
class TicketsClient(BaseClient):
"""
hubspot3 Tickets client
:see: https://developers.hubspot.com/docs/methods/tickets/tickets-overview
"""
def __init__(self, *args, **kwargs):
"""initialize a tickets client"""
super(TicketsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.tickets")
def _get_path(self, subpath):
"""tickets subpath generator"""
return f"crm-objects/v{TICKETS_API_VERSION}/{subpath}"
def create(
self, pipeline: str, stage: str, properties: Dict = None, **options
) -> Dict:
"""
create a ticket.
pass in a pipeline and stage, then a key value pair of other properties
properties will be converted to the name=, value=, format automatically
:see: https://developers.hubspot.com/docs/methods/tickets/create-ticket
"""
if not pipeline or not stage:
raise Exception("pipeline and stage are required to create a ticket!")
if not properties:
properties = {}
ticket_data = [{"name": x, "value": y} for x, y in properties.items()]
ticket_data.append({"name": "hs_pipeline", "value": pipeline})
ticket_data.append({"name": "hs_pipeline_stage", "value": stage})
return self._call("objects/tickets", data=ticket_data, method="POST", **options)
def update(self, ticket_id: str, data: Dict, **options) -> Dict:
"""
update a ticket by its ticket id, with the given data
:see: https://developers.hubspot.com/docs/methods/tickets/update-ticket
"""
ticket_data = [{"name": x, "value": y} for x, y in data.items()]
return self._call(
f"objects/tickets/{ticket_id}", method="PUT", data=ticket_data, **options
)
def get(
self,
ticket_id: str,
properties: List[str] = None,
include_deleted: bool = False,
**options,
) -> Dict:
"""
get a ticket by its ticket_id
:see: https://developers.hubspot.com/docs/methods/tickets/get_ticket_by_id
"""
properties = properties or [
"subject",
"content",
"hs_pipeline",
"hs_pipeline_stage",
]
params = options.pop("params", {})
params.update({"includeDeletes": include_deleted})
options.update({"params": params})
return self._call(
f"objects/tickets/{ticket_id}",
method="GET",
properties=properties,
**options,
)
def get_all(self, properties: List[str] = None, limit: int = -1, **options) -> list:
"""
Get all tickets in hubspot
:see: https://developers.hubspot.com/docs/methods/tickets/get-all-tickets
"""
properties = properties or [
"subject",
"content",
"hs_pipeline",
"hs_pipeline_stage",
]
finished = False
output = [] # type: list
offset = 0
limited = limit > 0
while not finished:
batch = self._call(
"objects/tickets/paged",
method="GET",
params={"offset": offset},
properties=properties,
**options,
)
output.extend(batch["objects"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output if not limited else output[:limit]
|
typed_python/lib/sorted_dict.py | APrioriInvestments/typed_python | 105 | 11072102 | from typed_python import (
TypeFunction, Class, Member, Final, Entrypoint, OneOf, Generator, Tuple,
Forward, ListOf
)
def less(x, y):
return x < y
@TypeFunction
def SortedDict(K, V, comparator=less):
Node = Forward("Node")
@Node.define
class Node(Class, Final):
key = Member(K)
value = Member(V)
left = Member(OneOf(None, Node), nonempty=True)
right = Member(OneOf(None, Node), nonempty=True)
count = Member(int, nonempty=True)
def __contains__(self, k: K) -> bool:
if comparator(k, self.key):
if self.left is not None:
return k in self.left
else:
return False
elif comparator(self.key, k):
if self.right is not None:
return k in self.right
else:
return False
else:
return True
def get(self, k: K) -> V:
if comparator(k, self.key):
if self.left is None:
raise KeyError(k)
return self.left.get(k)
elif comparator(self.key, k):
if self.right is None:
raise KeyError(k)
return self.right.get(k)
else:
return self.value
def set(self, k: K, v: V) -> bool:
if comparator(k, self.key):
if self.left is None:
self.left = Node(key=k, value=v, count=1)
self.count += 1
return True
else:
if self.left.set(k, v):
self.count += 1
self.rebalance()
return True
return False
elif comparator(self.key, k):
if self.right is None:
self.right = Node(key=k, value=v, count=1)
self.count += 1
return True
else:
if self.right.set(k, v):
self.count += 1
self.rebalance()
return True
return False
else:
self.value = v
return False
def first(self) -> K:
if self.left is not None:
return self.left.first()
return self.key
def last(self) -> K:
if self.right is not None:
return self.right.last()
return self.key
def become(self, otherNode: Node):
self.key = otherNode.key
self.value = otherNode.value
self.left = otherNode.left
self.right = otherNode.right
self.count = otherNode.count
def _checkInvariants(self):
assert self.count == (
1 + (0 if not self.left else self.left.count)
+ (0 if not self.right else self.right.count)
)
if self.left:
assert comparator(self.left.key, self.key)
self.left._checkInvariants()
if self.right:
assert comparator(self.key, self.right.key)
self.right._checkInvariants()
def remove(self, k: K) -> bool:
"""Remove 'k' and return True if we are now empty."""
if comparator(k, self.key):
if self.left is None:
raise KeyError(k)
if self.left.remove(k):
self.left = None
self.count -= 1
self.rebalance()
return False
elif comparator(self.key, k):
if self.right is None:
raise KeyError(k)
if self.right.remove(k):
self.right = None
self.count -= 1
self.rebalance()
return False
else:
if self.count == 1:
return True # just remove us
if self.left is not None and self.right is None:
# just become 'left'
self.become(self.left)
return False
if self.right is not None and self.left is None:
self.become(self.right)
return False
if self.right.count < self.left.count:
self.key = self.left.last()
self.value = self.left.get(self.key)
if self.left.remove(self.key):
self.left = None
self.count -= 1
self.rebalance()
return False
else:
self.key = self.right.first()
self.value = self.right.get(self.key)
if self.right.remove(self.key):
self.right = None
self.count -= 1
self.rebalance()
return False
def rebalance(self):
if self.left is None and self.right is None:
assert self.count == 1
return
if self.left is None and self.right is not None:
if self.right.count <= 2:
return
k = self.key
v = self.value
self.right.set(k, v)
self.become(self.right)
return
if self.right is None and self.left is not None:
if self.left.count <= 2:
return
k = self.key
v = self.value
self.left.set(k, v)
self.become(self.left)
return
if self.right is not None and self.left is not None:
# both are populated. we should have that the
# left count and right count imbalance is no greater than
# a factor of two
ll = 0 if self.left.left is None else self.left.left.count
lr = 0 if self.left.right is None else self.left.right.count
rl = 0 if self.right.left is None else self.right.left.count
rr = 0 if self.right.right is None else self.right.right.count
# if ll is much bigger than it should be, make 'll' the
# new left side
if ll > (3 + lr + rl + rr) * 2:
leftKey = self.left.key
leftVal = self.left.value
rootKey = self.key
rootValue = self.value
lNode = self.left
rNode = self.right
llNode = self.left.left
lrNode = self.left.right
self.left = llNode
self.key = leftKey
self.value = leftVal
self.right = Node(
key=rootKey,
value=rootValue,
left=lrNode,
right=rNode,
count=1 + lr + rNode.count
)
self.count = 1 + self.left.count + self.right.count
elif rr > (3 + rl + lr + ll) * 2:
rightKey = self.right.key
rightVal = self.right.value
rootKey = self.key
rootValue = self.value
lNode = self.left
rNode = self.right
rlNode = self.right.left
rrNode = self.right.right
self.right = rrNode
self.key = rightKey
self.value = rightVal
self.left = Node(
key=rootKey,
value=rootValue,
left=lNode,
right=rlNode,
count=1 + rl + lNode.count
)
self.count = 1 + self.left.count + self.right.count
def height(self):
return max(
0,
1 + (0 if self.left is None else self.left.height()),
1 + (0 if self.right is None else self.right.height())
)
class SortedDict_(Class, Final):
_root = Member(OneOf(None, Node), nonempty=True)
def __init__(self):
pass
def __init__(self, other): # noqa
for key in other:
self[key] = other[key]
def height(self):
if self._root is None:
return 0
return self._root.height()
@Entrypoint
def __getitem__(self, key) -> V:
if self._root is None:
raise KeyError(key)
return self._root.get(key)
@Entrypoint
def __contains__(self, key) -> bool:
if self._root is None:
return False
return key in self._root
@Entrypoint
def __setitem__(self, k: K, v: V) -> None:
if self._root is None:
self._root = Node(key=k, value=v, count=1)
else:
self._root.set(k, v)
@Entrypoint
def __delitem__(self, k: K) -> None:
if self._root is None:
raise KeyError(k)
if self._root.remove(k):
self._root = None
@Entrypoint
def pop(self, k: K) -> V:
if self._root is None:
raise KeyError(k)
res = self._root.get(k)
if self._root.remove(k):
self._root = None
return res
@Entrypoint
def pop(self, k: K, v: V) -> V: # noqa
if self._root is None or k not in self._root:
return v
res = self._root.get(k)
if self._root.remove(k):
self._root = None
return res
@Entrypoint
def first(self) -> OneOf(None, K):
if self._root is None:
return None
return self._root.first()
@Entrypoint
def last(self) -> OneOf(None, K):
if self._root is None:
return None
return self._root.last()
@Entrypoint
def get(self, k: K) -> V:
return self[k]
@Entrypoint
def get(self, k: K, v: V) -> V: # noqa
if k in self:
return self[k]
return v
@Entrypoint
def setdefault(self, k: K) -> V:
if k not in self:
self[k] = V()
return self[k]
@Entrypoint
def setdefault(self, k: K, v: V) -> V: # noqa
if k not in self:
self[k] = v
return self[k]
@Entrypoint
def __str__(self):
return '{' + ",".join(f'{k}: {v}' for k, v in self.items()) + '}'
@Entrypoint
def __repr__(self):
return '{' + ",".join(f'{k}: {v}' for k, v in self.items()) + '}'
def __len__(self):
return self._root.count if self._root is not None else 0
@Entrypoint
def _checkInvariants(self):
if not self._root:
return
self._root._checkInvariants()
@Entrypoint
def items(self) -> Generator(Tuple(K, V)):
stack = ListOf(Tuple(Node, bool))()
if self._root is None:
return
stack.append((self._root, True))
while stack:
node, wayDown = stack.pop()
if wayDown:
if node.left:
stack.append((node, False))
stack.append((node.left, True))
else:
yield (node.key, node.value)
if node.right:
stack.append((node.right, True))
else:
yield (node.key, node.value)
if node.right:
stack.append((node.right, True))
@Entrypoint
def __iter__(self) -> Generator(K):
stack = ListOf(Tuple(Node, bool))()
if self._root is None:
return
stack.append((self._root, True))
while stack:
node, wayDown = stack.pop()
if wayDown:
if node.left:
stack.append((node, False))
stack.append((node.left, True))
else:
yield node.key
if node.right:
stack.append((node.right, True))
else:
yield node.key
if node.right:
stack.append((node.right, True))
return SortedDict_
|
tests/numpy/sum.py | Fryguy/py2rb | 124 | 11072122 | # coding: utf-8
import numpy as np
x = np.array([1, 5])
w = np.array([2, 9])
b = 5
z = np.sum(w*x) + b
print(z)
z = (w * 3).sum()
print(z)
|
strawberry/printer.py | ddirkson/strawberry | 2,062 | 11072135 | from itertools import chain
from typing import Optional, cast
from graphql.type import is_object_type, is_specified_directive
from graphql.utilities.print_schema import (
is_defined_type,
print_args,
print_block,
print_deprecated,
print_description,
print_directive,
print_implemented_interfaces,
print_schema_definition,
print_type as original_print_type,
)
from strawberry.field import StrawberryField
from strawberry.types.types import TypeDefinition
from .schema import BaseSchema
def print_federation_field_directive(field: Optional[StrawberryField]) -> str:
if not field:
return ""
out = ""
if field.federation.provides:
out += f' @provides(fields: "{" ".join(field.federation.provides)}")'
if field.federation.requires:
out += f' @requires(fields: "{" ".join(field.federation.requires)}")'
if field.federation.external:
out += " @external"
return out
def print_fields(type_, schema: BaseSchema) -> str:
strawberry_type = cast(TypeDefinition, schema.get_type_by_name(type_.name))
fields = []
for i, (name, field) in enumerate(type_.fields.items()):
python_name = field.extensions and field.extensions.get("python_name")
strawberry_field = (
strawberry_type.get_field(python_name)
if strawberry_type and python_name
else None
)
fields.append(
print_description(field, " ", not i)
+ f" {name}"
+ print_args(field.args, " ")
+ f": {field.type}"
+ print_federation_field_directive(strawberry_field)
+ print_deprecated(field.deprecation_reason)
)
return print_block(fields)
def print_federation_key_directive(type_, schema: BaseSchema):
strawberry_type = cast(TypeDefinition, schema.get_type_by_name(type_.name))
if not strawberry_type:
return ""
keys = strawberry_type.federation.keys
parts = []
for key in keys:
parts.append(f'@key(fields: "{key}")')
if not parts:
return ""
return " " + " ".join(parts)
def print_extends(type_, schema: BaseSchema):
strawberry_type = cast(TypeDefinition, schema.get_type_by_name(type_.name))
if strawberry_type and strawberry_type.federation.extend:
return "extend "
return ""
def _print_object(type_, schema: BaseSchema) -> str:
return (
print_description(type_)
+ print_extends(type_, schema)
+ f"type {type_.name}"
+ print_implemented_interfaces(type_)
+ print_federation_key_directive(type_, schema)
+ print_fields(type_, schema)
)
def _print_type(field, schema: BaseSchema) -> str:
if is_object_type(field):
return _print_object(field, schema)
return original_print_type(field)
def print_schema(schema: BaseSchema) -> str:
graphql_core_schema = schema._schema # type: ignore
directives = filter(
lambda n: not is_specified_directive(n), graphql_core_schema.directives
)
type_map = graphql_core_schema.type_map
types = filter(is_defined_type, map(type_map.get, sorted(type_map))) # type: ignore
return "\n\n".join(
chain(
filter(None, [print_schema_definition(graphql_core_schema)]),
(print_directive(directive) for directive in directives),
(_print_type(type_, schema) for type_ in types), # type: ignore
)
)
|
tests/test_as_channel_first.py | dylanbuchi/MONAI | 2,971 | 11072148 | <reponame>dylanbuchi/MONAI<filename>tests/test_as_channel_first.py<gh_stars>1000+
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import AsChannelFirst
from tests.utils import TEST_NDARRAYS, assert_allclose
TESTS = []
for p in TEST_NDARRAYS:
TESTS.append([p, {"channel_dim": -1}, (4, 1, 2, 3)])
TESTS.append([p, {"channel_dim": 3}, (4, 1, 2, 3)])
TESTS.append([p, {"channel_dim": 2}, (3, 1, 2, 4)])
class TestAsChannelFirst(unittest.TestCase):
@parameterized.expand(TESTS)
def test_value(self, in_type, input_param, expected_shape):
test_data = in_type(np.random.randint(0, 2, size=[1, 2, 3, 4]))
result = AsChannelFirst(**input_param)(test_data)
self.assertTupleEqual(result.shape, expected_shape)
if isinstance(test_data, torch.Tensor):
test_data = test_data.cpu().numpy()
expected = np.moveaxis(test_data, input_param["channel_dim"], 0)
assert_allclose(result, expected, type_test=False)
if __name__ == "__main__":
unittest.main()
|
pyatv/protocols/mrp/protobuf/SetVolumeMessage_pb2.py | Jacobs4/pyatv | 532 | 11072155 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/SetVolumeMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/protocols/mrp/protobuf/SetVolumeMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3pyatv/protocols/mrp/protobuf/SetVolumeMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\";\n\x10SetVolumeMessage\x12\x0e\n\x06volume\x18\x01 \x01(\x02\x12\x17\n\x0foutputDeviceUID\x18\x02 \x01(\t:=\n\x10setVolumeMessage\x12\x10.ProtocolMessage\x18\x37 \x01(\x0b\x32\x11.SetVolumeMessage'
,
dependencies=[pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,])
SETVOLUMEMESSAGE_FIELD_NUMBER = 55
setVolumeMessage = _descriptor.FieldDescriptor(
name='setVolumeMessage', full_name='setVolumeMessage', index=0,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_SETVOLUMEMESSAGE = _descriptor.Descriptor(
name='SetVolumeMessage',
full_name='SetVolumeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='volume', full_name='SetVolumeMessage.volume', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='outputDeviceUID', full_name='SetVolumeMessage.outputDeviceUID', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=107,
serialized_end=166,
)
DESCRIPTOR.message_types_by_name['SetVolumeMessage'] = _SETVOLUMEMESSAGE
DESCRIPTOR.extensions_by_name['setVolumeMessage'] = setVolumeMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SetVolumeMessage = _reflection.GeneratedProtocolMessageType('SetVolumeMessage', (_message.Message,), {
'DESCRIPTOR' : _SETVOLUMEMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.SetVolumeMessage_pb2'
# @@protoc_insertion_point(class_scope:SetVolumeMessage)
})
_sym_db.RegisterMessage(SetVolumeMessage)
setVolumeMessage.message_type = _SETVOLUMEMESSAGE
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(setVolumeMessage)
# @@protoc_insertion_point(module_scope)
|
env/Lib/site-packages/_plotly_utils/colors/carto.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 11,750 | 11072171 | <gh_stars>1000+
"""
Color sequences and scales from CARTO's CartoColors
Learn more at https://github.com/CartoDB/CartoColor
CARTOColors are made available under a Creative Commons Attribution license: https://creativecommons.org/licenses/by/3.0/us/
"""
from ._swatches import _swatches
def swatches(template=None):
return _swatches(__name__, globals(), template)
swatches.__doc__ = _swatches.__doc__
Burg = [
"rgb(255, 198, 196)",
"rgb(244, 163, 168)",
"rgb(227, 129, 145)",
"rgb(204, 96, 125)",
"rgb(173, 70, 108)",
"rgb(139, 48, 88)",
"rgb(103, 32, 68)",
]
Burgyl = [
"rgb(251, 230, 197)",
"rgb(245, 186, 152)",
"rgb(238, 138, 130)",
"rgb(220, 113, 118)",
"rgb(200, 88, 108)",
"rgb(156, 63, 93)",
"rgb(112, 40, 74)",
]
Redor = [
"rgb(246, 210, 169)",
"rgb(245, 183, 142)",
"rgb(241, 156, 124)",
"rgb(234, 129, 113)",
"rgb(221, 104, 108)",
"rgb(202, 82, 104)",
"rgb(177, 63, 100)",
]
Oryel = [
"rgb(236, 218, 154)",
"rgb(239, 196, 126)",
"rgb(243, 173, 106)",
"rgb(247, 148, 93)",
"rgb(249, 123, 87)",
"rgb(246, 99, 86)",
"rgb(238, 77, 90)",
]
Peach = [
"rgb(253, 224, 197)",
"rgb(250, 203, 166)",
"rgb(248, 181, 139)",
"rgb(245, 158, 114)",
"rgb(242, 133, 93)",
"rgb(239, 106, 76)",
"rgb(235, 74, 64)",
]
Pinkyl = [
"rgb(254, 246, 181)",
"rgb(255, 221, 154)",
"rgb(255, 194, 133)",
"rgb(255, 166, 121)",
"rgb(250, 138, 118)",
"rgb(241, 109, 122)",
"rgb(225, 83, 131)",
]
Mint = [
"rgb(228, 241, 225)",
"rgb(180, 217, 204)",
"rgb(137, 192, 182)",
"rgb(99, 166, 160)",
"rgb(68, 140, 138)",
"rgb(40, 114, 116)",
"rgb(13, 88, 95)",
]
Blugrn = [
"rgb(196, 230, 195)",
"rgb(150, 210, 164)",
"rgb(109, 188, 144)",
"rgb(77, 162, 132)",
"rgb(54, 135, 122)",
"rgb(38, 107, 110)",
"rgb(29, 79, 96)",
]
Darkmint = [
"rgb(210, 251, 212)",
"rgb(165, 219, 194)",
"rgb(123, 188, 176)",
"rgb(85, 156, 158)",
"rgb(58, 124, 137)",
"rgb(35, 93, 114)",
"rgb(18, 63, 90)",
]
Emrld = [
"rgb(211, 242, 163)",
"rgb(151, 225, 150)",
"rgb(108, 192, 139)",
"rgb(76, 155, 130)",
"rgb(33, 122, 121)",
"rgb(16, 89, 101)",
"rgb(7, 64, 80)",
]
Aggrnyl = [
"rgb(36, 86, 104)",
"rgb(15, 114, 121)",
"rgb(13, 143, 129)",
"rgb(57, 171, 126)",
"rgb(110, 197, 116)",
"rgb(169, 220, 103)",
"rgb(237, 239, 93)",
]
Bluyl = [
"rgb(247, 254, 174)",
"rgb(183, 230, 165)",
"rgb(124, 203, 162)",
"rgb(70, 174, 160)",
"rgb(8, 144, 153)",
"rgb(0, 113, 139)",
"rgb(4, 82, 117)",
]
Teal = [
"rgb(209, 238, 234)",
"rgb(168, 219, 217)",
"rgb(133, 196, 201)",
"rgb(104, 171, 184)",
"rgb(79, 144, 166)",
"rgb(59, 115, 143)",
"rgb(42, 86, 116)",
]
Tealgrn = [
"rgb(176, 242, 188)",
"rgb(137, 232, 172)",
"rgb(103, 219, 165)",
"rgb(76, 200, 163)",
"rgb(56, 178, 163)",
"rgb(44, 152, 160)",
"rgb(37, 125, 152)",
]
Purp = [
"rgb(243, 224, 247)",
"rgb(228, 199, 241)",
"rgb(209, 175, 232)",
"rgb(185, 152, 221)",
"rgb(159, 130, 206)",
"rgb(130, 109, 186)",
"rgb(99, 88, 159)",
]
Purpor = [
"rgb(249, 221, 218)",
"rgb(242, 185, 196)",
"rgb(229, 151, 185)",
"rgb(206, 120, 179)",
"rgb(173, 95, 173)",
"rgb(131, 75, 160)",
"rgb(87, 59, 136)",
]
Sunset = [
"rgb(243, 231, 155)",
"rgb(250, 196, 132)",
"rgb(248, 160, 126)",
"rgb(235, 127, 134)",
"rgb(206, 102, 147)",
"rgb(160, 89, 160)",
"rgb(92, 83, 165)",
]
Magenta = [
"rgb(243, 203, 211)",
"rgb(234, 169, 189)",
"rgb(221, 136, 172)",
"rgb(202, 105, 157)",
"rgb(177, 77, 142)",
"rgb(145, 53, 125)",
"rgb(108, 33, 103)",
]
Sunsetdark = [
"rgb(252, 222, 156)",
"rgb(250, 164, 118)",
"rgb(240, 116, 110)",
"rgb(227, 79, 111)",
"rgb(220, 57, 119)",
"rgb(185, 37, 122)",
"rgb(124, 29, 111)",
]
Agsunset = [
"rgb(75, 41, 145)",
"rgb(135, 44, 162)",
"rgb(192, 54, 157)",
"rgb(234, 79, 136)",
"rgb(250, 120, 118)",
"rgb(246, 169, 122)",
"rgb(237, 217, 163)",
]
Brwnyl = [
"rgb(237, 229, 207)",
"rgb(224, 194, 162)",
"rgb(211, 156, 131)",
"rgb(193, 118, 111)",
"rgb(166, 84, 97)",
"rgb(129, 55, 83)",
"rgb(84, 31, 63)",
]
# Diverging schemes
Armyrose = [
"rgb(121, 130, 52)",
"rgb(163, 173, 98)",
"rgb(208, 211, 162)",
"rgb(253, 251, 228)",
"rgb(240, 198, 195)",
"rgb(223, 145, 163)",
"rgb(212, 103, 128)",
]
Fall = [
"rgb(61, 89, 65)",
"rgb(119, 136, 104)",
"rgb(181, 185, 145)",
"rgb(246, 237, 189)",
"rgb(237, 187, 138)",
"rgb(222, 138, 90)",
"rgb(202, 86, 44)",
]
Geyser = [
"rgb(0, 128, 128)",
"rgb(112, 164, 148)",
"rgb(180, 200, 168)",
"rgb(246, 237, 189)",
"rgb(237, 187, 138)",
"rgb(222, 138, 90)",
"rgb(202, 86, 44)",
]
Temps = [
"rgb(0, 147, 146)",
"rgb(57, 177, 133)",
"rgb(156, 203, 134)",
"rgb(233, 226, 156)",
"rgb(238, 180, 121)",
"rgb(232, 132, 113)",
"rgb(207, 89, 126)",
]
Tealrose = [
"rgb(0, 147, 146)",
"rgb(114, 170, 161)",
"rgb(177, 199, 179)",
"rgb(241, 234, 200)",
"rgb(229, 185, 173)",
"rgb(217, 137, 148)",
"rgb(208, 88, 126)",
]
Tropic = [
"rgb(0, 155, 158)",
"rgb(66, 183, 185)",
"rgb(167, 211, 212)",
"rgb(241, 241, 241)",
"rgb(228, 193, 217)",
"rgb(214, 145, 193)",
"rgb(199, 93, 171)",
]
Earth = [
"rgb(161, 105, 40)",
"rgb(189, 146, 90)",
"rgb(214, 189, 141)",
"rgb(237, 234, 194)",
"rgb(181, 200, 184)",
"rgb(121, 167, 172)",
"rgb(40, 135, 161)",
]
# Qualitative palettes
Antique = [
"rgb(133, 92, 117)",
"rgb(217, 175, 107)",
"rgb(175, 100, 88)",
"rgb(115, 111, 76)",
"rgb(82, 106, 131)",
"rgb(98, 83, 119)",
"rgb(104, 133, 92)",
"rgb(156, 156, 94)",
"rgb(160, 97, 119)",
"rgb(140, 120, 93)",
"rgb(124, 124, 124)",
]
Bold = [
"rgb(127, 60, 141)",
"rgb(17, 165, 121)",
"rgb(57, 105, 172)",
"rgb(242, 183, 1)",
"rgb(231, 63, 116)",
"rgb(128, 186, 90)",
"rgb(230, 131, 16)",
"rgb(0, 134, 149)",
"rgb(207, 28, 144)",
"rgb(249, 123, 114)",
"rgb(165, 170, 153)",
]
Pastel = [
"rgb(102, 197, 204)",
"rgb(246, 207, 113)",
"rgb(248, 156, 116)",
"rgb(220, 176, 242)",
"rgb(135, 197, 95)",
"rgb(158, 185, 243)",
"rgb(254, 136, 177)",
"rgb(201, 219, 116)",
"rgb(139, 224, 164)",
"rgb(180, 151, 231)",
"rgb(179, 179, 179)",
]
Prism = [
"rgb(95, 70, 144)",
"rgb(29, 105, 150)",
"rgb(56, 166, 165)",
"rgb(15, 133, 84)",
"rgb(115, 175, 72)",
"rgb(237, 173, 8)",
"rgb(225, 124, 5)",
"rgb(204, 80, 62)",
"rgb(148, 52, 110)",
"rgb(111, 64, 112)",
"rgb(102, 102, 102)",
]
Safe = [
"rgb(136, 204, 238)",
"rgb(204, 102, 119)",
"rgb(221, 204, 119)",
"rgb(17, 119, 51)",
"rgb(51, 34, 136)",
"rgb(170, 68, 153)",
"rgb(68, 170, 153)",
"rgb(153, 153, 51)",
"rgb(136, 34, 85)",
"rgb(102, 17, 0)",
"rgb(136, 136, 136)",
]
Vivid = [
"rgb(229, 134, 6)",
"rgb(93, 105, 177)",
"rgb(82, 188, 163)",
"rgb(153, 201, 69)",
"rgb(204, 97, 176)",
"rgb(36, 121, 108)",
"rgb(218, 165, 27)",
"rgb(47, 138, 196)",
"rgb(118, 78, 159)",
"rgb(237, 100, 90)",
"rgb(165, 170, 153)",
]
# Prefix variable names with _ so that they will not be added to the swatches
_contents = dict(globals())
for _k, _cols in _contents.items():
if _k.startswith("_") or _k.startswith("swatches") or _k.endswith("_r"):
continue
globals()[_k + "_r"] = _cols[::-1]
|
bfxapi/websockets/order_manager.py | kirinchen/bitfinex-api-py | 162 | 11072195 | <filename>bfxapi/websockets/order_manager.py
"""
Module used to house all of the functions/classes used to handle orders
"""
import time
import asyncio
from ..utils.custom_logger import CustomLogger
from ..models import Order
from ..utils.auth import calculate_order_flags, gen_unique_cid
class OrderManager:
"""
Handles all of the functionality for opening, updating and closing order.
Also contains state such as all of your open orders and orders that have
closed.
"""
def __init__(self, bfxapi, logLevel='INFO'):
self.bfxapi = bfxapi
self.pending_orders = {}
self.closed_orders = {}
self.open_orders = {}
self.pending_order_close_callbacks = {}
self.pending_order_confirm_callbacks = {}
self.pending_update_confirm_callbacks = {}
self.pending_cancel_confirm_callbacks = {}
self.logger = CustomLogger('BfxOrderManager', logLevel=logLevel)
def get_open_orders(self):
return list(self.open_orders.values())
def get_closed_orders(self):
return list(self.closed_orders.values())
def get_pending_orders(self):
return list(self.pending_orders.values())
async def confirm_order_closed(self, raw_ws_data):
order = Order.from_raw_order(raw_ws_data[2])
order.set_open_state(False)
if order.id in self.open_orders:
del self.open_orders[order.id]
if order.cid in self.pending_orders:
del self.pending_orders[order.cid]
self.closed_orders[order.id] = order
if not order.is_confirmed():
order.set_confirmed()
self.bfxapi._emit('order_confirmed', order)
await self._execute_callback(order, self.pending_order_confirm_callbacks)
await self._execute_callback(order, self.pending_cancel_confirm_callbacks)
await self._execute_callback(order, self.pending_update_confirm_callbacks)
await self._execute_callback(order, self.pending_order_close_callbacks)
self.logger.info("Order closed: {} {}".format(
order.symbol, order.status))
self.bfxapi._emit('order_closed', order)
async def build_from_order_snapshot(self, raw_ws_data):
"""
Rebuild the user orderbook based on an incoming snapshot
"""
osData = raw_ws_data[2]
self.open_orders = {}
for raw_order in osData:
order = Order.from_raw_order(raw_order)
order.set_open_state(True)
self.open_orders[order.id] = order
self.bfxapi._emit('order_snapshot', self.get_open_orders())
async def confirm_order_update(self, raw_ws_data):
order = Order.from_raw_order(raw_ws_data[2])
order.set_open_state(True)
self.open_orders[order.id] = order
await self._execute_callback(order, self.pending_update_confirm_callbacks)
self.logger.info("Order update: {}".format(order))
self.bfxapi._emit('order_update', order)
async def confirm_order_new(self, raw_ws_data):
order = Order.from_raw_order(raw_ws_data[2])
order.set_open_state(True)
if order.cid in self.pending_orders:
del self.pending_orders[order.cid]
self.open_orders[order.id] = order
order.set_confirmed()
self.bfxapi._emit('order_confirmed', order)
await self._execute_callback(order, self.pending_order_confirm_callbacks)
self.logger.info("Order new: {}".format(order))
self.bfxapi._emit('order_new', order)
async def confirm_order_error(self, raw_ws_data):
cid = raw_ws_data[2][4][2]
if cid in self.pending_orders:
del self.pending_orders[cid]
self.logger.info("Deleted Order CID {} from pending orders".format(cid))
async def submit_order(self, symbol, price, amount, market_type=Order.Type.LIMIT,
hidden=False, price_trailing=None, price_aux_limit=None,
oco_stop_price=None, close=False, reduce_only=False,
post_only=False, oco=False, aff_code=None, time_in_force=None,
leverage=None, onConfirm=None, onClose=None, gid=None, *args, **kwargs):
"""
Submit a new order
@param gid: assign the order to a group identifier
@param symbol: the name of the symbol i.e 'tBTCUSD
@param price: the price you want to buy/sell at (must be positive)
@param amount: order size: how much you want to buy/sell,
a negative amount indicates a sell order and positive a buy order
@param market_type Order.Type: please see Order.Type enum
amount decimal string Positive for buy, Negative for sell
@param hidden: if True, order should be hidden from orderbooks
@param price_trailing: decimal trailing price
@param price_aux_limit: decimal auxiliary Limit price (only for STOP LIMIT)
@param oco_stop_price: set the oco stop price (requires oco = True)
@param close: if True, close position if position present
@param reduce_only: if True, ensures that the executed order does not flip the opened position
@param post_only: if True, ensures the limit order will be added to the order book and not
match with a pre-existing order
@param oco: cancels other order option allows you to place a pair of orders stipulating
that if one order is executed fully or partially, then the other is automatically canceled
@param aff_code: bitfinex affiliate code
@param time_in_force: datetime for automatic order cancellation ie. 2020-01-01 10:45:23
@param leverage: the amount of leverage to apply to the order as an integer
@param onConfirm: function called when the bitfinex websocket receives signal that the order
was confirmed
@param onClose: function called when the bitfinex websocket receives signal that the order
was closed due to being filled or cancelled
"""
cid = self._gen_unique_cid()
# create base payload with required data
payload = {
"cid": cid,
"type": str(market_type),
"symbol": symbol,
"amount": str(amount),
"price": str(price),
"meta": {}
}
# calculate and add flags
flags = calculate_order_flags(hidden, close, reduce_only, post_only, oco)
payload['flags'] = flags
# add extra parameters
if price_trailing != None:
payload['price_trailing'] = price_trailing
if price_aux_limit != None:
payload['price_aux_limit'] = price_aux_limit
if oco_stop_price != None:
payload['price_oco_stop'] = str(oco_stop_price)
if time_in_force != None:
payload['tif'] = time_in_force
if gid != None:
payload['gid'] = gid
if leverage != None:
payload['lev'] = str(leverage)
if aff_code != None:
payload['meta']['aff_code'] = str(aff_code)
# submit the order
self.pending_orders[cid] = payload
self._create_callback(cid, onConfirm, self.pending_order_confirm_callbacks)
self._create_callback(cid, onClose, self.pending_order_close_callbacks)
await self.bfxapi._send_auth_command('on', payload)
self.logger.info("Order cid={} ({} {} @ {}) dispatched".format(
cid, symbol, amount, price))
async def update_order(self, orderId, price=None, amount=None, delta=None, price_aux_limit=None,
price_trailing=None, hidden=False, close=False, reduce_only=False,
post_only=False, time_in_force=None, leverage=None, onConfirm=None):
"""
Update an existing order
@param orderId: the id of the order that you want to update
@param price: the price you want to buy/sell at (must be positive)
@param amount: order size: how much you want to buy/sell,
a negative amount indicates a sell order and positive a buy order
@param delta: change of amount
@param price_trailing: decimal trailing price
@param price_aux_limit: decimal auxiliary Limit price (only for STOP LIMIT)
@param hidden: if True, order should be hidden from orderbooks
@param close: if True, close position if position present
@param reduce_only: if True, ensures that the executed order does not flip the opened position
@param post_only: if True, ensures the limit order will be added to the order book and not
match with a pre-existing order
@param time_in_force: datetime for automatic order cancellation ie. 2020-01-01 10:45:23
@param leverage: the amount of leverage to apply to the order as an integer
@param onConfirm: function called when the bitfinex websocket receives signal that the order
was confirmed
@param onClose: function called when the bitfinex websocket receives signal that the order
was closed due to being filled or cancelled
"""
self._create_callback(orderId, onConfirm, self.pending_update_confirm_callbacks)
payload = {"id": orderId}
if price != None:
payload['price'] = str(price)
if amount != None:
payload['amount'] = str(amount)
if delta != None:
payload['delta'] = str(delta)
if price_aux_limit != None:
payload['price_aux_limit'] = str(price_aux_limit)
if price_trailing != None:
payload['price_trailing'] = str(price_trailing)
if time_in_force != None:
payload['tif'] = str(time_in_force)
if leverage != None:
payload['lev'] = str(leverage)
flags = calculate_order_flags(
hidden, close, reduce_only, post_only, False)
payload['flags'] = flags
await self.bfxapi._send_auth_command('ou', payload)
self.logger.info("Update Order order_id={} dispatched".format(orderId))
async def cancel_order(self, orderId, onConfirm=None):
"""
Cancel an existing open order
@param orderId: the id of the order that you want to update
@param onConfirm: function called when the bitfinex websocket receives signal that the
order was confirmed
"""
self._create_callback(orderId, onConfirm, self.pending_cancel_confirm_callbacks)
await self.bfxapi._send_auth_command('oc', {'id': orderId})
self.logger.info("Order cancel order_id={} dispatched".format(orderId))
async def cancel_all_orders(self):
"""
Cancel all existing open orders
This function closes all open orders.
"""
await self.bfxapi._send_auth_command('oc_multi', { 'all': 1 })
async def cancel_order_group(self, gid, onConfirm=None):
"""
Cancel a set of orders using a single group id.
"""
self._create_callback(gid, onConfirm, self.pending_cancel_confirm_callbacks)
await self.bfxapi._send_auth_command('oc_multi', { 'gid': [gid] })
async def cancel_order_multi(self, ids=None, gids=None):
"""
Cancel existing open orders as a batch
@param ids: an array of order ids
@param gids: an array of group ids
"""
payload = {}
if ids:
payload['id'] = ids
if gids:
payload['gid'] = gids
await self.bfxapi._send_auth_command('oc_multi', payload)
def _create_callback(self, identifier, func, callback_storage):
if not func:
return
if identifier in callback_storage:
callback_storage[identifier] += [func]
else:
callback_storage[identifier] = [func]
async def _execute_callback(self, order, callback_storage):
idents = [order.id, order.cid, order.gid]
tasks = []
key = None
for k in callback_storage.keys():
if k in idents:
key = k
# call all callbacks associated with identifier
for callback in callback_storage[k]:
tasks += [callback(order)]
break
# remove from callbacks
if key:
del callback_storage[key]
await asyncio.gather(*tasks)
def _gen_unique_cid(self):
return gen_unique_cid()
|
boto3_type_annotations_with_docs/boto3_type_annotations/directconnect/paginator.py | cowboygneox/boto3_type_annotations | 119 | 11072201 | <reponame>cowboygneox/boto3_type_annotations
from typing import Dict
from botocore.paginate import Paginator
class DescribeDirectConnectGatewayAssociations(Paginator):
def paginate(self, associationId: str = None, associatedGatewayId: str = None, directConnectGatewayId: str = None, virtualGatewayId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DirectConnect.Client.describe_direct_connect_gateway_associations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeDirectConnectGatewayAssociations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
associationId='string',
associatedGatewayId='string',
directConnectGatewayId='string',
virtualGatewayId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'directConnectGatewayAssociations': [
{
'directConnectGatewayId': 'string',
'directConnectGatewayOwnerAccount': 'string',
'associationState': 'associating'|'associated'|'disassociating'|'disassociated'|'updating',
'stateChangeError': 'string',
'associatedGateway': {
'id': 'string',
'type': 'virtualPrivateGateway'|'transitGateway',
'ownerAccount': 'string',
'region': 'string'
},
'associationId': 'string',
'allowedPrefixesToDirectConnectGateway': [
{
'cidr': 'string'
},
],
'virtualGatewayId': 'string',
'virtualGatewayRegion': 'string',
'virtualGatewayOwnerAccount': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **directConnectGatewayAssociations** *(list) --*
Information about the associations.
- *(dict) --*
Information about an association between a Direct Connect gateway and a virtual private gateway or transit gateway.
- **directConnectGatewayId** *(string) --*
The ID of the Direct Connect gateway.
- **directConnectGatewayOwnerAccount** *(string) --*
The ID of the AWS account that owns the associated gateway.
- **associationState** *(string) --*
The state of the association. The following are the possible values:
* ``associating`` : The initial state after calling CreateDirectConnectGatewayAssociation .
* ``associated`` : The Direct Connect gateway and virtual private gateway or transit gateway are successfully associated and ready to pass traffic.
* ``disassociating`` : The initial state after calling DeleteDirectConnectGatewayAssociation .
* ``disassociated`` : The virtual private gateway or transit gateway is disassociated from the Direct Connect gateway. Traffic flow between the Direct Connect gateway and virtual private gateway or transit gateway is stopped.
- **stateChangeError** *(string) --*
The error message if the state of an object failed to advance.
- **associatedGateway** *(dict) --*
Information about the associated gateway.
- **id** *(string) --*
The ID of the associated gateway.
- **type** *(string) --*
The type of associated gateway.
- **ownerAccount** *(string) --*
The ID of the AWS account that owns the associated virtual private gateway or transit gateway.
- **region** *(string) --*
The Region where the associated gateway is located.
- **associationId** *(string) --*
The ID of the Direct Connect gateway association.
- **allowedPrefixesToDirectConnectGateway** *(list) --*
The Amazon VPC prefixes to advertise to the Direct Connect gateway.
- *(dict) --*
Information about a route filter prefix that a customer can advertise through Border Gateway Protocol (BGP) over a public virtual interface.
- **cidr** *(string) --*
The CIDR block for the advertised route. Separate multiple routes using commas. An IPv6 CIDR must use /64 or shorter.
- **virtualGatewayId** *(string) --*
The ID of the virtual private gateway. Applies only to private virtual interfaces.
- **virtualGatewayRegion** *(string) --*
The AWS Region where the virtual private gateway is located.
- **virtualGatewayOwnerAccount** *(string) --*
The ID of the AWS account that owns the virtual private gateway.
- **NextToken** *(string) --*
A token to resume pagination.
:type associationId: string
:param associationId:
The ID of the Direct Connect gateway association.
:type associatedGatewayId: string
:param associatedGatewayId:
The ID of the associated gateway.
:type directConnectGatewayId: string
:param directConnectGatewayId:
The ID of the Direct Connect gateway.
:type virtualGatewayId: string
:param virtualGatewayId:
The ID of the virtual private gateway.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeDirectConnectGatewayAttachments(Paginator):
def paginate(self, directConnectGatewayId: str = None, virtualInterfaceId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DirectConnect.Client.describe_direct_connect_gateway_attachments`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeDirectConnectGatewayAttachments>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
directConnectGatewayId='string',
virtualInterfaceId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'directConnectGatewayAttachments': [
{
'directConnectGatewayId': 'string',
'virtualInterfaceId': 'string',
'virtualInterfaceRegion': 'string',
'virtualInterfaceOwnerAccount': 'string',
'attachmentState': 'attaching'|'attached'|'detaching'|'detached',
'attachmentType': 'TransitVirtualInterface'|'PrivateVirtualInterface',
'stateChangeError': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **directConnectGatewayAttachments** *(list) --*
The attachments.
- *(dict) --*
Information about an attachment between a Direct Connect gateway and a virtual interface.
- **directConnectGatewayId** *(string) --*
The ID of the Direct Connect gateway.
- **virtualInterfaceId** *(string) --*
The ID of the virtual interface.
- **virtualInterfaceRegion** *(string) --*
The AWS Region where the virtual interface is located.
- **virtualInterfaceOwnerAccount** *(string) --*
The ID of the AWS account that owns the virtual interface.
- **attachmentState** *(string) --*
The state of the attachment. The following are the possible values:
* ``attaching`` : The initial state after a virtual interface is created using the Direct Connect gateway.
* ``attached`` : The Direct Connect gateway and virtual interface are attached and ready to pass traffic.
* ``detaching`` : The initial state after calling DeleteVirtualInterface .
* ``detached`` : The virtual interface is detached from the Direct Connect gateway. Traffic flow between the Direct Connect gateway and virtual interface is stopped.
- **attachmentType** *(string) --*
The type of attachment.
- **stateChangeError** *(string) --*
The error message if the state of an object failed to advance.
- **NextToken** *(string) --*
A token to resume pagination.
:type directConnectGatewayId: string
:param directConnectGatewayId:
The ID of the Direct Connect gateway.
:type virtualInterfaceId: string
:param virtualInterfaceId:
The ID of the virtual interface.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeDirectConnectGateways(Paginator):
def paginate(self, directConnectGatewayId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DirectConnect.Client.describe_direct_connect_gateways`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeDirectConnectGateways>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
directConnectGatewayId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'directConnectGateways': [
{
'directConnectGatewayId': 'string',
'directConnectGatewayName': 'string',
'amazonSideAsn': 123,
'ownerAccount': 'string',
'directConnectGatewayState': 'pending'|'available'|'deleting'|'deleted',
'stateChangeError': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **directConnectGateways** *(list) --*
The Direct Connect gateways.
- *(dict) --*
Information about a Direct Connect gateway, which enables you to connect virtual interfaces and virtual private gateway or transit gateways.
- **directConnectGatewayId** *(string) --*
The ID of the Direct Connect gateway.
- **directConnectGatewayName** *(string) --*
The name of the Direct Connect gateway.
- **amazonSideAsn** *(integer) --*
The autonomous system number (ASN) for the Amazon side of the connection.
- **ownerAccount** *(string) --*
The ID of the AWS account that owns the Direct Connect gateway.
- **directConnectGatewayState** *(string) --*
The state of the Direct Connect gateway. The following are the possible values:
* ``pending`` : The initial state after calling CreateDirectConnectGateway .
* ``available`` : The Direct Connect gateway is ready for use.
* ``deleting`` : The initial state after calling DeleteDirectConnectGateway .
* ``deleted`` : The Direct Connect gateway is deleted and cannot pass traffic.
- **stateChangeError** *(string) --*
The error message if the state of an object failed to advance.
- **NextToken** *(string) --*
A token to resume pagination.
:type directConnectGatewayId: string
:param directConnectGatewayId:
The ID of the Direct Connect gateway.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
cacreader/swig-4.0.2/Examples/test-suite/python/template_default_arg_runme.py | kyletanyag/LL-Smartcard | 1,031 | 11072202 | <filename>cacreader/swig-4.0.2/Examples/test-suite/python/template_default_arg_runme.py
import template_default_arg
helloInt = template_default_arg.Hello_int()
helloInt.foo(template_default_arg.Hello_int.hi)
x = template_default_arg.X_int()
if (x.meth(20.0, 200) != 200):
raise RuntimeError, ("X_int test 1 failed")
if (x.meth(20) != 20):
raise RuntimeError, ("X_int test 2 failed")
if (x.meth() != 0):
raise RuntimeError, ("X_int test 3 failed")
y = template_default_arg.Y_unsigned()
if (y.meth(20.0, 200) != 200):
raise RuntimeError, ("Y_unsigned test 1 failed")
if (y.meth(20) != 20):
raise RuntimeError, ("Y_unsigned test 2 failed")
if (y.meth() != 0):
raise RuntimeError, ("Y_unsigned test 3 failed")
x = template_default_arg.X_longlong()
x = template_default_arg.X_longlong(20.0)
x = template_default_arg.X_longlong(20.0, 200L)
x = template_default_arg.X_int()
x = template_default_arg.X_int(20.0)
x = template_default_arg.X_int(20.0, 200)
x = template_default_arg.X_hello_unsigned()
x = template_default_arg.X_hello_unsigned(20.0)
x = template_default_arg.X_hello_unsigned(
20.0, template_default_arg.Hello_int())
y = template_default_arg.Y_hello_unsigned()
y.meth(20.0, template_default_arg.Hello_int())
y.meth(template_default_arg.Hello_int())
y.meth()
fz = template_default_arg.Foo_Z_8()
x = template_default_arg.X_Foo_Z_8()
fzc = x.meth(fz)
# Templated functions
# plain function: int ott(Foo<int>)
if (template_default_arg.ott(template_default_arg.Foo_int()) != 30):
raise RuntimeError, ("ott test 1 failed")
# %template(ott) ott<int, int>
if (template_default_arg.ott() != 10):
raise RuntimeError, ("ott test 2 failed")
if (template_default_arg.ott(1) != 10):
raise RuntimeError, ("ott test 3 failed")
if (template_default_arg.ott(1, 1) != 10):
raise RuntimeError, ("ott test 4 failed")
if (template_default_arg.ott("hi") != 20):
raise RuntimeError, ("ott test 5 failed")
if (template_default_arg.ott("hi", 1) != 20):
raise RuntimeError, ("ott test 6 failed")
if (template_default_arg.ott("hi", 1, 1) != 20):
raise RuntimeError, ("ott test 7 failed")
# %template(ott) ott<const char *>
if (template_default_arg.ottstring(template_default_arg.Hello_int(), "hi") != 40):
raise RuntimeError, ("ott test 8 failed")
if (template_default_arg.ottstring(template_default_arg.Hello_int()) != 40):
raise RuntimeError, ("ott test 9 failed")
# %template(ott) ott<int>
if (template_default_arg.ottint(template_default_arg.Hello_int(), 1) != 50):
raise RuntimeError, ("ott test 10 failed")
if (template_default_arg.ottint(template_default_arg.Hello_int()) != 50):
raise RuntimeError, ("ott test 11 failed")
# %template(ott) ott<double>
if (template_default_arg.ott(template_default_arg.Hello_int(), 1.0) != 60):
raise RuntimeError, ("ott test 12 failed")
if (template_default_arg.ott(template_default_arg.Hello_int()) != 60):
raise RuntimeError, ("ott test 13 failed")
|
bustag/app/local.py | skypjsfly/bustag | 4,197 | 11072219 | <filename>bustag/app/local.py<gh_stars>1000+
'''
handle local file related functions
'''
import re
from peewee import SqliteDatabase, DatabaseError
from bustag.spider.db import Item, LocalItem, ItemRate, RATE_TYPE, RATE_VALUE, db, DBError
from bustag.util import logger, get_data_path
def add_local_fanhao(fanhao, tag_like):
'''
Args:
fanhao:str - ',' separeted (fanhao, path)
'''
rows = fanhao.splitlines()
items = []
missed_fanhaos = []
local_file_added = 0
tag_file_added = 0
pattern = r'([A-Z]+)-?([0-9]+)'
for row in rows:
if ',' in row:
fanhao, path = row.split(',')
else:
fanhao = row
path = None
fanhao = fanhao.strip().upper()
match = re.search(pattern, fanhao)
if match and len(match.groups()) == 2:
series, num = match.groups()
matched_fanhao = f'{series}-{num}'
path = path.strip() if path else None
logger.debug(f'matched fanhao {matched_fanhao}')
items.append((matched_fanhao, path))
with db.atomic():
for item in items:
fanhao, path = item
# if path is not None, add to local item
if path:
local_item = LocalItem.saveit(fanhao, path)
if local_item:
local_file_added += 1
# if tag_like is True, add it to item_rate table
if tag_like:
item_rate = ItemRate.saveit(
RATE_TYPE.USER_RATE, RATE_VALUE.LIKE, fanhao)
if item_rate:
tag_file_added += 1
if not Item.get_by_fanhao(fanhao):
# add to get from spider
missed_fanhaos.append(fanhao)
logger.debug(f'missed_fanhaos:{missed_fanhaos}')
logger.debug(f'tag_file_added:{tag_file_added}')
logger.debug(f'local_file_added:{local_file_added}')
return missed_fanhaos, local_file_added, tag_file_added
def load_tags_db():
'''
load user tags data from uploaded db file
Args:
file: io.BufferedRandom -> uploaded db file stream
'''
db_name = get_data_path('uploaded.db')
try:
db_upload = SqliteDatabase(db_name)
db_upload.get_tables()
except DatabaseError:
raise DBError()
db_is_old = False
tag_data = []
missed_fanhaos = []
tag_file_added = 0
sql_old = '''select item_rate.rate_value, item.fanhao
from item_rate inner
join item on item_rate.item_id = item.id
where item_rate.rate_type=1 '''
sql_new = '''select item_rate.rate_value, item.fanhao
from item_rate inner
join item on item_rate.item_id = item.fanhao
where item_rate.rate_type=1 '''
cursor = db_upload.execute_sql(sql_old)
res = cursor.fetchone()
if res:
db_is_old = True
if db_is_old:
cursor = db_upload.execute_sql(sql_old)
else:
cursor = db_upload.execute_sql(sql_new)
for row in cursor.fetchall():
tag_data.append(row)
with db_upload.atomic():
for rate_value, fanhao in tag_data:
item_rate = ItemRate.saveit(
RATE_TYPE.USER_RATE, rate_value, fanhao)
if item_rate:
tag_file_added += 1
if not Item.get_by_fanhao(fanhao):
# add to get from spider
missed_fanhaos.append(fanhao)
logger.debug(tag_data)
logger.info(f'added user tag rate: {tag_file_added}')
logger.info(f'added fanhao to download: {len(missed_fanhaos)}')
return tag_file_added, missed_fanhaos
|
test/nn/test_sequential.py | Kenneth-Schroeder/pytorch_geometric | 12,651 | 11072227 | import torch
import torch.fx
from torch.nn import Linear, ReLU, Dropout
from torch_sparse import SparseTensor
from torch_geometric.nn import Sequential, MessagePassing
from torch_geometric.nn import GCNConv, JumpingKnowledge, global_mean_pool
def test_sequential():
x = torch.randn(4, 16)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
batch = torch.zeros(4, dtype=torch.long)
model = Sequential('x, edge_index', [
(GCNConv(16, 64), 'x, edge_index -> x'),
ReLU(inplace=True),
(GCNConv(64, 64), 'x, edge_index -> x'),
ReLU(inplace=True),
Linear(64, 7),
])
model.reset_parameters()
assert str(model) == (
'Sequential(\n'
' (0): GCNConv(16, 64)\n'
' (1): ReLU(inplace=True)\n'
' (2): GCNConv(64, 64)\n'
' (3): ReLU(inplace=True)\n'
' (4): Linear(in_features=64, out_features=7, bias=True)\n'
')')
out = model(x, edge_index)
assert out.size() == (4, 7)
model = Sequential('x, edge_index, batch', [
(Dropout(p=0.5), 'x -> x'),
(GCNConv(16, 64), 'x, edge_index -> x1'),
ReLU(inplace=True),
(GCNConv(64, 64), 'x1, edge_index -> x2'),
ReLU(inplace=True),
(lambda x1, x2: [x1, x2], 'x1, x2 -> xs'),
(JumpingKnowledge('cat', 64, num_layers=2), 'xs -> x'),
(global_mean_pool, 'x, batch -> x'),
Linear(2 * 64, 7),
])
model.reset_parameters()
out = model(x, edge_index, batch)
assert out.size() == (1, 7)
def test_sequential_jittable():
x = torch.randn(4, 16)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
adj_t = SparseTensor(row=edge_index[0], col=edge_index[1]).t()
model = Sequential('x: Tensor, edge_index: Tensor', [
(GCNConv(16, 64).jittable(), 'x, edge_index -> x'),
ReLU(inplace=True),
(GCNConv(64, 64).jittable(), 'x, edge_index -> x'),
ReLU(inplace=True),
Linear(64, 7),
])
torch.jit.script(model)(x, edge_index)
model = Sequential('x: Tensor, edge_index: SparseTensor', [
(GCNConv(16, 64).jittable(), 'x, edge_index -> x'),
ReLU(inplace=True),
(GCNConv(64, 64).jittable(), 'x, edge_index -> x'),
ReLU(inplace=True),
Linear(64, 7),
])
torch.jit.script(model)(x, adj_t)
def symbolic_trace(module):
class Tracer(torch.fx.Tracer):
def is_leaf_module(self, module, *args, **kwargs) -> bool:
return (isinstance(module, MessagePassing)
or super().is_leaf_module(module, *args, **kwargs))
return torch.fx.GraphModule(module, Tracer().trace(module))
def test_sequential_tracable():
model = Sequential('x, edge_index', [
(GCNConv(16, 64), 'x, edge_index -> x1'),
ReLU(inplace=True),
(GCNConv(64, 64), 'x1, edge_index -> x2'),
ReLU(inplace=True),
(lambda x1, x2: x1 + x2, 'x1, x2 -> x'),
Linear(64, 7),
])
symbolic_trace(model)
|
irc/tests/test_client.py | larsks/irc | 362 | 11072249 | <reponame>larsks/irc<filename>irc/tests/test_client.py
from unittest import mock
import pytest
import irc.client
def test_version():
assert isinstance(irc._get_version(), str)
@mock.patch('irc.connection.socket')
def test_privmsg_sends_msg(socket_mod):
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
# make sure the mock object doesn't have a write method or it will treat
# it as an SSL connection and never call .send.
del server.socket.write
server.privmsg('#best-channel', 'You are great')
server.socket.send.assert_called_with(b'PRIVMSG #best-channel :You are great\r\n')
@mock.patch('irc.connection.socket')
def test_privmsg_fails_on_embedded_carriage_returns(socket_mod):
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
with pytest.raises(ValueError):
server.privmsg('#best-channel', 'You are great\nSo are you')
class TestHandlers:
def test_handlers_same_priority(self):
"""
Two handlers of the same priority should still compare.
"""
handler1 = irc.client.PrioritizedHandler(1, lambda: None)
handler2 = irc.client.PrioritizedHandler(1, lambda: 'other')
assert not handler1 < handler2
assert not handler2 < handler1
@mock.patch('irc.connection.socket')
def test_command_without_arguments(self):
"A command without arguments should not crash"
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
server._process_line('GLOBALUSERSTATE')
|
src/tests/test_cases/capitalization.py | sanders41/pydocstyle | 776 | 11072278 | <filename>src/tests/test_cases/capitalization.py<gh_stars>100-1000
"""A valid module docstring."""
from .expected import Expectation
expectation = Expectation()
expect = expectation.expect
@expect("D403: First word of the first line should be properly capitalized "
"('Do', not 'do')")
def not_capitalized():
"""do something."""
# Make sure empty docstrings don't generate capitalization errors.
@expect("D103: Missing docstring in public function")
def empty_docstring():
""""""
def all_caps():
"""GET the request."""
def non_letter_characters():
"""Create/Edit the doodli-do."""
def more_non_letter_characters():
"""(Un)register the user."""
def even_more_non_letter():
"""'laser' the planet."""
def dash():
"""git-push it."""
def digit_in_word():
"""sha1 the string."""
@expect("D403: First word of the first line should be properly capitalized "
"(\"Don't\", not \"Don'T\")")
def partial_caps():
"""Don'T do that."""
@expect("D403: First word of the first line should be properly capitalized "
"('Return', not 'ReTurn')")
def more_partial_caps():
"""ReTurn the field."""
@expect("D403: First word of the first line should be properly capitalized "
"('Generate', not 'generate')")
def just_one_more_example():
"""generate a function."""
|
tests/io/test_core.py | daniel-falk/kedro | 2,047 | 11072287 | from decimal import Decimal
from fractions import Fraction
from pathlib import PurePosixPath
from typing import Any, List
import pytest
from kedro.io.core import AbstractDataSet, _parse_filepath, get_filepath_str
# List sourced from https://docs.python.org/3/library/stdtypes.html#truth-value-testing.
# Excludes None, as None values are not shown in the str representation.
FALSE_BUILTINS: List[Any] = [
False,
0,
0.0,
0j,
Decimal(0),
Fraction(0, 1),
"",
(),
[],
{},
set(),
range(0),
]
class MyDataSet(AbstractDataSet):
def __init__(self, var=None):
self.var = var
def _describe(self):
return dict(var=self.var)
def _load(self):
pass # pragma: no cover
def _save(self, data):
pass # pragma: no cover
class TestCoreFunctions:
@pytest.mark.parametrize("var", [1, True] + FALSE_BUILTINS)
def test_str_representation(self, var):
assert str(MyDataSet(var)) == f"MyDataSet(var={var})"
def test_str_representation_none(self):
assert str(MyDataSet()) == "MyDataSet()"
def test_get_filepath_str(self):
path = get_filepath_str(PurePosixPath("example.com/test.csv"), "http")
assert isinstance(path, str)
assert path == "http://example.com/test.csv"
@pytest.mark.parametrize(
"filepath,expected_result",
[
("s3://bucket/file.txt", {"protocol": "s3", "path": "bucket/file.txt"}),
(
"s3://user@BUCKET/file.txt",
{"protocol": "s3", "path": "BUCKET/file.txt"},
),
("gcs://bucket/file.txt", {"protocol": "gcs", "path": "bucket/file.txt"}),
("gs://bucket/file.txt", {"protocol": "gs", "path": "bucket/file.txt"}),
("adl://bucket/file.txt", {"protocol": "adl", "path": "bucket/file.txt"}),
("abfs://bucket/file.txt", {"protocol": "abfs", "path": "bucket/file.txt"}),
(
"abfss://bucket/file.txt",
{"protocol": "abfss", "path": "bucket/file.txt"},
),
(
"hdfs://namenode:8020/file.txt",
{"protocol": "hdfs", "path": "/file.txt"},
),
("file:///tmp/file.txt", {"protocol": "file", "path": "/tmp/file.txt"}),
("/tmp/file.txt", {"protocol": "file", "path": "/tmp/file.txt"}),
(
"C:\\Projects\\file.txt",
{"protocol": "file", "path": "C:\\Projects\\file.txt"},
),
(
"file:///C:\\Projects\\file.txt",
{"protocol": "file", "path": "C:\\Projects\\file.txt"},
),
(
"https://example.com/file.txt",
{"protocol": "https", "path": "https://example.com/file.txt"},
),
(
"http://example.com/file.txt",
{"protocol": "http", "path": "http://example.com/file.txt"},
),
],
)
def test_parse_filepath(self, filepath, expected_result):
assert _parse_filepath(filepath) == expected_result
|
workers/repomirrorworker/models_interface.py | giuseppe/quay | 2,027 | 11072288 | from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class RepoMirrorToken(namedtuple("NextRepoMirrorToken", ["min_id"])):
"""
RepoMirrorToken represents an opaque token that can be passed between runs of the repository
mirror worker to continue mirroring whereever the previous run left off.
Note that the data of the token is *opaque* to the repository mirror worker, and the worker
should *not* pull any data out or modify the token in any way.
"""
@add_metaclass(ABCMeta)
class RepoMirrorWorkerDataInterface(object):
@abstractmethod
def repositories_to_mirror(self, target_time, start_token=None):
"""
Returns a tuple consisting of an iterator of all the candidates to scan and a NextScanToken.
The iterator returns a tuple for each iteration consisting of the candidate Repository, the
abort signal, and the number of remaining candidates. If the iterator returned is None,
there are no candidates to process.
"""
|
speech/melgan/model/res_stack.py | OthmaneJ/deep-tts | 213 | 11072298 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class ResStack(nn.Module):
def __init__(self, channel):
super(ResStack, self).__init__()
self.layers = nn.ModuleList([
nn.Sequential(
nn.LeakyReLU(),
nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=3, dilation=3**i, padding=3**i)),
nn.LeakyReLU(),
nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=3, dilation=1, padding=1)),
)
for i in range(3)
])
def forward(self, x):
for layer in self.layers:
x = x + layer(x)
return x
def remove_weight_norm(self):
for layer in self.layers:
nn.utils.remove_weight_norm(layer[1])
nn.utils.remove_weight_norm(layer[3])
|
New paper writing/train.py | EagleW/PaperRobot-Incremental-Draft-Generation-of-Scientific-Ideas | 453 | 11072301 | import gc
import os
import sys
import time
import torch
import pickle
import argparse
import torch.nn as nn
from collections import OrderedDict
from eval import Evaluate
from loader.logger import Tee
from loader.loader import load_file_with_terms
from loader.preprocessing import prepare_mapping, AssembleMem
from utils.optim import get_optimizer
from memory_generator.seq2seq import Seq2seq
from memory_generator.Encoder import EncoderRNN
from memory_generator.Encoder import TermEncoder
from memory_generator.predictor import Predictor
from memory_generator.Decoder import DecoderRNN
# Read parameters from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--lower", default='0',
type=int, help="Lowercase words (this will not affect character inputs)"
)
parser.add_argument(
"--word_dim", default="128",
type=int, help="Token embedding dimension"
)
parser.add_argument(
"--h", default="8",
type=int, help="No of attention heads"
)
parser.add_argument(
"--hop", default="3",
type=int, help="No of Memory layers"
)
parser.add_argument(
"--dropout", default="0.2",
type=float, help="Dropout on the embeddings (0 = no dropout)"
)
parser.add_argument(
"--layer_dropout", default="0.2",
type=float, help="Dropout on the layer (0 = no dropout)"
)
parser.add_argument(
"--lr_method", default="adam",
help="Learning method (SGD, Adadelta, Adam..)"
)
parser.add_argument(
"--lr_rate", default="0.001",
type=float, help="Learning method (SGD, Adadelta, Adam..)"
)
parser.add_argument(
"--model_dp", default="models/",
help="model directory path"
)
parser.add_argument(
"--pre_emb", default="",
help="Location of pretrained embeddings"
)
parser.add_argument(
"--gpu", default="1",
type=int, help="default is 1. set 0 to disable use gpu."
)
parser.add_argument(
"--num_epochs", default="100",
type=int, help="Number of training epochs"
)
parser.add_argument(
"--batch_size", default="2",
type=int, help="Batch size."
)
parser.add_argument(
"--max_len", default="150",
type=int, help="Max length."
)
parser.add_argument(
"--freq", default="5",
type=int, help="Min freq."
)
parser.add_argument(
"--cont", action='store_true', help="Continue training."
)
parser.add_argument(
"--model", default="models/memory/best_dev_model.pth.tar",
help="Model location"
)
parser.add_argument(
"--load", action='store_true', help="Load dataset."
)
parser.add_argument(
"--data_path", default="data",
help="data directory path"
)
args = parser.parse_args()
# Parse parameters
parameters = OrderedDict()
parameters['lower'] = args.lower == 1
parameters['freq'] = args.freq
parameters['word_dim'] = args.word_dim
parameters['h'] = args.h
parameters['hop'] = args.hop
parameters['pre_emb'] = args.pre_emb
parameters['input_dropout_p'] = args.dropout
parameters['layer_dropout'] = args.layer_dropout
parameters['gpu'] = args.gpu == 1
parameters['batch_size'] = args.batch_size
parameters['max_len'] = args.max_len
parameters['gpu'] = args.gpu == 1
parameters['lr_method'] = args.lr_method
parameters['lr_rate'] = args.lr_rate
parameters['data_path'] = args.data_path
# Check parameters validity
assert os.path.isdir(args.data_path)
assert 0. <= parameters['input_dropout_p'] < 1.0
assert 0. <= parameters['layer_dropout'] < 1.0
assert not parameters['pre_emb'] or parameters['word_dim'] > 0
assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])
model_dir = args.model_dp
model_name = ['memory']
for k, v in parameters.items():
if v == "":
continue
if k == 'pre_emb':
v = os.path.basename(v)
model_name.append('='.join((k, str(v))))
model_dir = os.path.join(model_dir, ','.join(model_name[:-1]))
os.makedirs(model_dir, exist_ok=True)
# register logger to save print(messages to both stdout and disk)
training_log_path = os.path.join(model_dir, 'training_log.txt')
if os.path.exists(training_log_path):
os.remove(training_log_path)
f = open(training_log_path, 'w')
sys.stdout = Tee(sys.stdout, f)
# print model parameters
print("Model location: %s" % model_dir)
print('Model parameters:')
for k, v in parameters.items():
print('%s=%s' % (k, v))
# Data parameters
lower = parameters['lower']
# load previously saved data
if args.load:
state = pickle.load(open(args.data_path + '/dataset.pth', 'rb'))
words = state['words']
r_dataset = state['r_dataset']
v_dataset = state['v_dataset']
t_dataset = state['t_dataset']
else:
words = []
r_words, r_dataset = load_file_with_terms(args.data_path + '/train.txt')
words.extend(r_words)
v_words, v_dataset = load_file_with_terms(args.data_path + '/valid.txt')
t_words, t_dataset = load_file_with_terms(args.data_path + '/test.txt')
state = {
'words': words,
'r_dataset': r_dataset,
'v_dataset': v_dataset,
't_dataset': t_dataset
}
pickle.dump(state, open(args.data_path + '/dataset.pth', "wb"))
mappings, words_freq = prepare_mapping(words, lower, args.freq)
parameters['unk_id'] = mappings['word2id']['<unk>']
parameters['sos_id'] = mappings['word2id']['<sos>']
parameters['eos_id'] = mappings['word2id']['<eos>']
# Index data
r_dataset = AssembleMem(r_dataset, mappings['word2id'], lower, args.batch_size, args.max_len, parameters['gpu'])
v_dataset = AssembleMem(v_dataset, mappings['word2id'], lower, args.batch_size, args.max_len, parameters['gpu'])
print("%i / %i pairs in train / dev." % (r_dataset.len, v_dataset.len))
word2id = mappings['word2id']
id2word = mappings['id2word']
vocab_size = len(mappings['id2word'])
device = torch.device("cuda:0" if torch.cuda.is_available() and parameters['gpu'] else "cpu")
# model initialization
embedding = nn.Embedding(r_dataset.vocab_size, args.word_dim, padding_idx=0)
ref_encoder = EncoderRNN(vocab_size, embedding, parameters['word_dim'], parameters['input_dropout_p'])
term_encoder = TermEncoder(embedding, parameters['input_dropout_p'])
decoder = DecoderRNN(vocab_size, embedding, **parameters)
model = Seq2seq(ref_encoder, term_encoder, decoder)
model = model.to(device)
optimizer = get_optimizer(model, parameters['lr_method'], parameters['lr_rate'])
#
# training starts
#
since = time.time()
best_dev = 0.0
num_epochs = args.num_epochs
epoch_examples_total = r_dataset.len
train_loader = r_dataset.corpus
len_batch_t = len(train_loader)
print('train batches', len_batch_t)
start_epoch = 0
# continue training
if args.cont:
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
state_dict = state['state_dict']
model.load_state_dict(state_dict)
state_dict = state['optimizer']
optimizer.load_state_dict(state_dict)
start_epoch = state['epoch']
best_dev = state['best_prec1']
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + start_epoch, num_epochs - 1))
# epoch start time
time_epoch_start = time.time()
# train
model.train(True)
torch.set_grad_enabled(True)
epoch_loss = 0
for batch_idx in range(len_batch_t):
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, \
batch_o_t = r_dataset.get_batch(batch_idx)
losses = model(batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t,
batch_o_t, teacher_forcing_ratio=1)
batch_loss = losses.mean()
# print(losses)
model.zero_grad()
batch_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
num_examples = batch_s.size(0)
loss = batch_loss.item()
epoch_loss += num_examples * loss
sys.stdout.write(
'%d batches processed. current batch loss: %f\r' %
(batch_idx, loss)
)
sys.stdout.flush()
del batch_s, batch_o_s, batch_t, batch_o_t, source_len, batch_term, batch_o_term
gc.collect()
# torch.cuda.empty_cache()
epoch_loss_avg = epoch_loss / float(epoch_examples_total)
log_msg = "Finished epoch %d: Train %s: %.4f" % (epoch + start_epoch, "Avg NLLLoss", epoch_loss_avg)
print()
print(log_msg)
predictor = Predictor(model, id2word, vocab_size)
eval_f = Evaluate()
print("Start Evaluating")
cand, ref, titles, terms = predictor.preeval_batch(v_dataset)
final_scores = eval_f.evaluate(live=True, cand=cand, ref=ref)
final_scores['Bleu_4'] *= 10.0
epoch_score = 2*final_scores['ROUGE_L']*final_scores['Bleu_4']/(final_scores['Bleu_4']+ final_scores['ROUGE_L'])
if epoch_score > best_dev:
best_dev = epoch_score
print('new best score on dev: %.4f' % best_dev)
print('saving the current model to disk...')
state = {
'epoch': epoch + 1,
'parameters': parameters,
'state_dict': model.state_dict(),
'best_prec1': best_dev,
'optimizer': optimizer.state_dict(),
'mappings': mappings
}
torch.save(state, os.path.join(model_dir, 'best_dev_model.pth.tar'))
print("Examples")
print("Output:", cand[1])
print("Refer:", ref[1])
# epoch end time
time_epoch_end = time.time()
# torch.cuda.empty_cache()
print('epoch training time: %f seconds' % round(
(time_epoch_end - time_epoch_start), 2))
print('best dev: ', best_dev)
|
tests/test_tutorial/test_code_structure/test_tutorial002.py | spawn08/sqlmodel | 5,490 | 11072308 | <reponame>spawn08/sqlmodel
from unittest.mock import patch
from sqlmodel import create_engine
from ...conftest import get_testing_print_function
expected_calls = [
[
"Created hero:",
{
"id": 1,
"name": "Deadpond",
"age": None,
"secret_name": "<NAME>",
"team_id": 1,
},
],
[
"Hero's team:",
{"name": "Z-Force", "headquarters": "<NAME>", "id": 1},
],
]
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.code_structure.tutorial002 import app, database
database.sqlite_url = "sqlite://"
database.engine = create_engine(database.sqlite_url)
app.engine = database.engine
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
app.main()
assert calls == expected_calls
|
components/autofill/core/browser/pattern_provider/transpile_default_regex_patterns.py | zealoussnow/chromium | 14,668 | 11072327 | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import io
import json
import sys
def to_string_literal(json_string_literal):
return json.dumps(json_string_literal)
def build_cpp_map_population(input):
lines = []
def output(line):
lines.append(line)
output('JsonPattern patterns[] = {')
for key1 in input:
for key2 in input[key1]:
for pattern in input[key1][key2]:
name = to_string_literal(key1)
language = to_string_literal(key2)
positive_pattern = pattern['positive_pattern']
negative_pattern = pattern['negative_pattern']
positive_score = pattern['positive_score']
match_field_attributes = pattern['match_field_attributes']
match_field_input_types = pattern['match_field_input_types']
positive_pattern = 'u' + to_string_literal(positive_pattern)
if negative_pattern is None:
negative_pattern = 'nullptr';
else:
negative_pattern = 'u' + to_string_literal(negative_pattern)
# Shift to the right to match the MatchFieldTypes enum, which
# temporarily starts at 1<<2 instead of 1<<0.
match_field_input_types = '{} << 2'.format(match_field_input_types)
output('{')
output('.name = {},'.format(name))
output('.language = {},'.format(language))
output('.positive_pattern = {},'.format(positive_pattern))
output('.negative_pattern = {},'.format(negative_pattern))
output('.positive_score = {},'.format(positive_score))
output('.match_field_attributes = {},'.format(match_field_attributes))
output('.match_field_input_types = {},'.format(match_field_input_types))
output('},')
output('};')
return lines
def build_cpp_function(cpp, output_handle):
def output(s):
# unicode() exists and is necessary only in Python 2, not in Python 3.
if sys.version_info[0] < 3:
s = unicode(s, 'utf-8')
output_handle.write(s)
output('// Copyright 2020 The Chromium Authors. All rights reserved.\n')
output('// Use of this source code is governed by a BSD-style license ')
output('that can be\n')
output('// found in the LICENSE file.\n')
output('\n')
output('#include "components/autofill/core/browser/pattern_provider/'\
'default_regex_patterns.h"\n')
output('#include "components/autofill/core/common/language_code.h"\n')
output('\n')
output('namespace autofill {\n')
output('\n')
output('PatternProvider::Map CreateDefaultRegexPatterns() {\n')
output(' struct JsonPattern {\n')
output(' const char* name;\n')
output(' const char* language;\n')
output(' const char16_t* positive_pattern;\n')
output(' const char16_t* negative_pattern;\n')
output(' float positive_score;\n')
output(' uint8_t match_field_attributes;\n')
output(' uint16_t match_field_input_types;\n')
output(' };\n')
output('\n')
for line in build_cpp_map_population(cpp):
output(line)
output('\n')
output(' PatternProvider::Map map;\n')
output(' size_t len = sizeof(patterns) / sizeof(patterns[0]);\n')
output(' for (size_t i = 0; i < len; ++i) {\n')
output(' const JsonPattern& p = patterns[i];\n')
output(' MatchingPattern mp;\n')
output(' mp.language = LanguageCode(p.language);\n')
output(' mp.positive_pattern = p.positive_pattern;\n')
output(' mp.negative_pattern = '
'p.negative_pattern ? p.negative_pattern : u"";\n')
output(' mp.positive_score = p.positive_score;\n')
output(' mp.match_field_input_types = p.match_field_input_types;\n')
output(' mp.match_field_attributes = p.match_field_attributes;\n')
output(' map[p.name][LanguageCode(p.language)].push_back(mp);\n')
output(' }\n')
output(' return map;\n')
output('}\n')
output('\n')
output('}')
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
with io.open(input_file, 'r', encoding='utf-8') as input_handle:
input_json = json.load(input_handle)
with io.open(output_file, 'w', encoding='utf-8') as output_handle:
build_cpp_function(input_json, output_handle)
|
hs_composite_resource/urls.py | hydroshare/hydroshare | 178 | 11072328 | <filename>hs_composite_resource/urls.py<gh_stars>100-1000
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^_internal/(?P<resource_id>[0-9a-f]+)/(?P<coverage_type>[A-z]+)/update-coverage/$',
views.update_resource_coverage, name="update_resource_coverage"),
url(r'^_internal/(?P<resource_id>[0-9a-f]+)/(?P<coverage_type>[A-z]+)/delete-coverage/$',
views.delete_resource_coverage, name="delete_resource_coverage")
]
|
src/graphql/validation/rules/unique_directives_per_location.py | KingDarBoja/graphql-core | 590 | 11072344 | <reponame>KingDarBoja/graphql-core
from collections import defaultdict
from typing import Any, Dict, List, Union, cast
from ...error import GraphQLError
from ...language import (
DirectiveDefinitionNode,
DirectiveNode,
Node,
SchemaDefinitionNode,
SchemaExtensionNode,
TypeDefinitionNode,
TypeExtensionNode,
is_type_definition_node,
is_type_extension_node,
)
from ...type import specified_directives
from . import ASTValidationRule, SDLValidationContext, ValidationContext
__all__ = ["UniqueDirectivesPerLocationRule"]
class UniqueDirectivesPerLocationRule(ASTValidationRule):
"""Unique directive names per location
A GraphQL document is only valid if all non-repeatable directives at a given
location are uniquely named.
"""
context: Union[ValidationContext, SDLValidationContext]
def __init__(self, context: Union[ValidationContext, SDLValidationContext]):
super().__init__(context)
unique_directive_map: Dict[str, bool] = {}
schema = context.schema
defined_directives = (
schema.directives if schema else cast(List, specified_directives)
)
for directive in defined_directives:
unique_directive_map[directive.name] = not directive.is_repeatable
ast_definitions = context.document.definitions
for def_ in ast_definitions:
if isinstance(def_, DirectiveDefinitionNode):
unique_directive_map[def_.name.value] = not def_.repeatable
self.unique_directive_map = unique_directive_map
self.schema_directives: Dict[str, DirectiveNode] = {}
self.type_directives_map: Dict[str, Dict[str, DirectiveNode]] = defaultdict(
dict
)
# Many different AST nodes may contain directives. Rather than listing them all,
# just listen for entering any node, and check to see if it defines any directives.
def enter(self, node: Node, *_args: Any) -> None:
directives: List[DirectiveNode] = getattr(node, "directives", None)
if not directives:
return
if isinstance(node, (SchemaDefinitionNode, SchemaExtensionNode)):
seen_directives = self.schema_directives
elif is_type_definition_node(node) or is_type_extension_node(node):
node = cast(Union[TypeDefinitionNode, TypeExtensionNode], node)
type_name = node.name.value
seen_directives = self.type_directives_map[type_name]
else:
seen_directives = {}
for directive in directives:
directive_name = directive.name.value
if self.unique_directive_map.get(directive_name):
if directive_name in seen_directives:
self.report_error(
GraphQLError(
f"The directive '@{directive_name}'"
" can only be used once at this location.",
[seen_directives[directive_name], directive],
)
)
else:
seen_directives[directive_name] = directive
|
python/ql/test/experimental/query-tests/Security/CWE-287/auth_good_3.py | madhurimamandal/codeql | 4,036 | 11072358 | from ldap3 import Server, Connection, ALL
from flask import request, Flask
from ldap3.utils.dn import escape_rdn
from ldap3.utils.conv import escape_filter_chars
app = Flask(__name__)
@app.route("/passwordFromEnv")
def passwordFromEnv():
"""
The bind's password argument is a non-empty string
"""
dn = "dc={}".format(escape_rdn(request.args['dc']))
search_filter = "(user={})".format(escape_filter_chars(request.args['search']))
srv = Server('servername', get_info=ALL)
conn = Connection(srv, user='user_dn',
password="<PASSWORD>$$!")
status, result, response, _ = conn.search(dn, search_filter)
# if __name__ == "__main__":
# app.run(debug=True)
|
linear_nets.py | sanketvmehta/continual-learning | 891 | 11072370 | from torch import nn
import numpy as np
import utils
import excitability_modules as em
class fc_layer(nn.Module):
'''Fully connected layer, with possibility of returning "pre-activations".
Input: [batch_size] x ... x [in_size] tensor
Output: [batch_size] x ... x [out_size] tensor'''
def __init__(self, in_size, out_size, nl=nn.ReLU(),
drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, gated=False):
super().__init__()
if drop>0:
self.dropout = nn.Dropout(drop)
self.linear = em.LinearExcitability(in_size, out_size, bias=False if batch_norm else bias,
excitability=excitability, excit_buffer=excit_buffer)
if batch_norm:
self.bn = nn.BatchNorm1d(out_size)
if gated:
self.gate = nn.Linear(in_size, out_size)
self.sigmoid = nn.Sigmoid()
if isinstance(nl, nn.Module):
self.nl = nl
elif not nl=="none":
self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else utils.Identity())
def forward(self, x, return_pa=False):
input = self.dropout(x) if hasattr(self, 'dropout') else x
pre_activ = self.bn(self.linear(input)) if hasattr(self, 'bn') else self.linear(input)
gate = self.sigmoid(self.gate(x)) if hasattr(self, 'gate') else None
gated_pre_activ = gate * pre_activ if hasattr(self, 'gate') else pre_activ
output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
return (output, gated_pre_activ) if return_pa else output
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
return [self.linear, self.gate] if hasattr(self, 'gate') else [self.linear]
class fc_layer_split(nn.Module):
'''Fully connected layer outputting [mean] and [logvar] for each unit.
Input: [batch_size] x ... x [in_size] tensor
Output: tuple with two [batch_size] x ... x [out_size] tensors'''
def __init__(self, in_size, out_size, nl_mean=nn.Sigmoid(), nl_logvar=nn.Hardtanh(min_val=-4.5, max_val=0.),
drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, gated=False):
super().__init__()
self.mean = fc_layer(in_size, out_size, drop=drop, bias=bias, excitability=excitability,
excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_mean)
self.logvar = fc_layer(in_size, out_size, drop=drop, bias=False, excitability=excitability,
excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_logvar)
def forward(self, x):
return (self.mean(x), self.logvar(x))
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
list += self.mean.list_init_layers()
list += self.logvar.list_init_layers()
return list
#-----------------------------------------------------------------------------------------------------------#
class MLP(nn.Module):
'''Module for a multi-layer perceptron (MLP).
Input: [batch_size] x ... x [size_per_layer[0]] tensor
Output: (tuple of) [batch_size] x ... x [size_per_layer[-1]] tensor'''
def __init__(self, input_size=1000, output_size=10, layers=2, hid_size=1000, hid_smooth=None, size_per_layer=None,
drop=0, batch_norm=True, nl="relu", bias=True, excitability=False, excit_buffer=False, gated=False,
output='normal'):
'''sizes: 0th=[input], 1st=[hid_size], ..., 1st-to-last=[hid_smooth], last=[output].
[input_size] # of inputs
[output_size] # of units in final layer
[layers] # of layers
[hid_size] # of units in each hidden layer
[hid_smooth] if None, all hidden layers have [hid_size] units, else # of units linearly in-/decreases s.t.
final hidden layer has [hid_smooth] units (if only 1 hidden layer, it has [hid_size] units)
[size_per_layer] None or <list> with for each layer number of units (1st element = number of inputs)
--> overwrites [input_size], [output_size], [layers], [hid_size] and [hid_smooth]
[drop] % of each layer's inputs that is randomly set to zero during training
[batch_norm] <bool>; if True, batch-normalization is applied to each layer
[nl] <str>; type of non-linearity to be used (options: "relu", "leakyrelu", "none")
[gated] <bool>; if True, each linear layer has an additional learnable gate
[output] <str>; if - "normal", final layer is same as all others
- "BCE", final layer has sigmoid non-linearity'''
super().__init__()
self.output = output
# get sizes of all layers
if size_per_layer is None:
hidden_sizes = []
if layers > 1:
if (hid_smooth is not None):
hidden_sizes = [int(x) for x in np.linspace(hid_size, hid_smooth, num=layers-1)]
else:
hidden_sizes = [int(x) for x in np.repeat(hid_size, layers - 1)]
size_per_layer = [input_size] + hidden_sizes + [output_size]
self.layers = len(size_per_layer)-1
# set label for this module
# -determine "non-default options"-label
nd_label = "{drop}{bias}{exc}{bn}{nl}{gate}{out}".format(
drop="" if drop==0 else "-drop{}".format(drop),
bias="" if bias else "-noBias", exc="-exc" if excitability else "", bn="-bn" if batch_norm else "",
nl="-lr" if nl=="leakyrelu" else "", gate="-gated" if gated else "",
out="" if output=="normal" else "-{}".format(output),
)
# -set label
self.label = "MLP({}{})".format(size_per_layer, nd_label) if self.layers>0 else ""
# set layers
for lay_id in range(1, self.layers+1):
# number of units of this layer's input and output
in_size = size_per_layer[lay_id-1]
out_size = size_per_layer[lay_id]
# define and set the fully connected layer
if lay_id==self.layers and output in ("logistic", "gaussian"):
layer = fc_layer_split(
in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer, drop=drop,
batch_norm=False, gated=gated,
nl_mean=nn.Sigmoid() if output=="logistic" else utils.Identity(),
nl_logvar=nn.Hardtanh(min_val=-4.5, max_val=0.) if output=="logistic" else utils.Identity(),
)
else:
layer = fc_layer(
in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer, drop=drop,
batch_norm=False if (lay_id==self.layers and not output=="normal") else batch_norm, gated=gated,
nl=nn.Sigmoid() if (lay_id==self.layers and not output=="normal") else nl,
)
setattr(self, 'fcLayer{}'.format(lay_id), layer)
# if no layers, add "identity"-module to indicate in this module's representation nothing happens
if self.layers<1:
self.noLayers = utils.Identity()
def forward(self, x):
for lay_id in range(1, self.layers+1):
x = getattr(self, 'fcLayer{}'.format(lay_id))(x)
return x
@property
def name(self):
return self.label
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
for layer_id in range(1, self.layers+1):
list += getattr(self, 'fcLayer{}'.format(layer_id)).list_init_layers()
return list |
xbox/webapi/api/provider/usersearch/__init__.py | Landcross/xbox-webapi-python | 122 | 11072373 | """
Usersearch - Search for gamertags / userprofiles
"""
from xbox.webapi.api.provider.baseprovider import BaseProvider
from xbox.webapi.api.provider.usersearch.models import UserSearchResponse
class UserSearchProvider(BaseProvider):
USERSEARCH_URL = "https://usersearch.xboxlive.com"
HEADERS_USER_SEARCH = {"x-xbl-contract-version": "1"}
async def get_live_search(self, query: str, **kwargs) -> UserSearchResponse:
"""
Get userprofiles for search query
Args:
query: Search query
Returns:
:class:`UserSearchResponse`: User Search Response
"""
url = self.USERSEARCH_URL + "/suggest"
params = {"q": query}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_USER_SEARCH, **kwargs
)
resp.raise_for_status()
return UserSearchResponse.parse_raw(await resp.text())
|
robosuite/utils/robot_utils.py | spatric5/robosuite | 397 | 11072391 | <filename>robosuite/utils/robot_utils.py
# Utilities functions for working with robots
from robosuite.robots import BIMANUAL_ROBOTS
def check_bimanual(robot_name):
"""
Utility function that returns whether the inputted robot_name is a bimanual robot or not
Args:
robot_name (str): Name of the robot to check
Returns:
bool: True if the inputted robot is a bimanual robot
"""
return robot_name.lower() in BIMANUAL_ROBOTS
|
model/third_party/HMNet/Models/Trainers/BaseTrainer.py | NickSchoelkopf/SummerTime | 178 | 11072394 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
class BaseTrainer:
def __init__(self, opt):
self.opt = opt
if self.opt["cuda"] == True:
self.use_cuda = True
print("Using Cuda\n")
else:
self.use_cuda = False
print("Using CPU\n")
self.is_official = "OFFICIAL" in self.opt
self.opt["logFile"] = "log.txt"
self.saveFolder = None
self.logFileHandle = None
self.tb_writer = None
def log(self, s):
# In official case, the program does not output logs
if self.is_official:
return
try:
if self.logFileHandle is None:
self.logFileHandle = open(
os.path.join(self.saveFolder, self.opt["logFile"]), "a"
)
self.logFileHandle.write(s + "\n")
except Exception as e:
print("ERROR while writing log file:", e)
print(s)
def getSaveFolder(self):
runid = 1
while True:
saveFolder = os.path.join(
self.opt["datadir"],
self.opt["basename"] + "_conf~",
"run_" + str(runid),
)
if not os.path.exists(saveFolder):
self.saveFolder = saveFolder
os.makedirs(self.saveFolder)
print("Saving logs, model and evaluation in " + self.saveFolder)
return
runid = runid + 1
# save copy of conf file
def saveConf(self):
# with open(self.opt['confFile'], encoding='utf-8') as f:
# with open(os.path.join(self.saveFolder, 'conf_copy.tsv'), 'w', encoding='utf-8') as fw:
# for line in f:
# fw.write(line)
with open(
os.path.join(self.saveFolder, "conf_copy.tsv"), "w", encoding="utf-8"
) as fw:
for k in self.opt:
fw.write("{0}\t{1}\n".format(k, self.opt[k]))
def train(self):
pass
def load(self):
pass
|
third_party/com_codota.bzl | wix/exodus | 186 | 11072406 | load("//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
def dependencies():
import_external(
name = "com_codota_codota_sdk_java",
artifact = "com.codota:codota-sdk-java:1.0.11",
artifact_sha256 = "134bd08c3763c41aedd9f6a162c1d39d97a3cd5accaf86182b00d0a502856f94",
srcjar_sha256 = "fa2be20b305238f6973da2beffac63aa2bb3027b33469d50918d3ef6f03cbf4c",
deps = [
"@com_google_code_gson_gson",
"@org_apache_httpcomponents_httpclient",
"@org_jetbrains_annotations"
],
)
|
caserec/recommenders/rating_prediction/user_attribute_knn.py | khalillakhdhar/recommander_python | 407 | 11072416 | <gh_stars>100-1000
# coding=utf-8
""""
User Based Collaborative Filtering Recommender with Attributes (User Attribute KNN)
[Rating Prediction]
User-Attribute-kNN predicts a user’s rating according to how similar users rated the same item. The algorithm
matches similar users based on the similarity of their attributes scores. However, instead of traditional UserKNN,
this approach uses a pre-computed similarity matrix based on metadata.
"""
# © 2019. Case Recommender (MIT License)
import numpy as np
from caserec.recommenders.rating_prediction.userknn import UserKNN
from caserec.utils.process_data import ReadFile
__author__ = '<NAME> <<EMAIL>>'
class UserAttributeKNN(UserKNN):
def __init__(self, train_file=None, test_file=None, output_file=None, metadata_file=None, similarity_file=None,
k_neighbors=30, as_similar_first=True, metadata_as_binary=False,
metadata_similarity_sep='\t', similarity_metric="cosine", sep='\t', output_sep='\t'):
"""
User Attribute KNN for Rating Prediction
This algorithm predicts a rating for each pair (user, item) based on the similar items that his neighbors
(similar users) consumed, using a metadata or similarity pre-computed file
Usage::
>> UserAttributeKNN(train, test, similarity_file=sim_matrix, as_similar_first=True).compute()
>> UserAttributeKNN(train, test, metadata_file=metadata, as_similar_first=True).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns
(user metadata).
:type metadata_file: str, default None
:param similarity_file: File which contains the similarity set. This file needs to have at least 3 columns
(user user similarity).
:type similarity_file: str, default None
:param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users))
:type k_neighbors: int, default None
:param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k
most similar users and then take the intersection with the users that
seen that item.
:type as_similar_first: bool, default True
:param metadata_as_binary: f True, the explicit value will be transform to binary
:type metadata_as_binary: bool, default False
:param metadata_similarity_sep: Delimiter for similarity or metadata file
:type metadata_similarity_sep: str, default '\t'
:param similarity_metric:
:type similarity_metric: str, default cosine
:param sep: Delimiter for input files file
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(UserAttributeKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
k_neighbors=k_neighbors, as_similar_first=as_similar_first,
similarity_metric=similarity_metric, sep=sep, output_sep=output_sep)
self.recommender_name = 'User Attribute KNN Algorithm'
self.metadata_file = metadata_file
self.similarity_file = similarity_file
self.metadata_as_binary = metadata_as_binary
self.metadata_similarity_sep = metadata_similarity_sep
def init_model(self):
"""
Method to fit the model. Create and calculate a similarity matrix by metadata file or a pre-computed similarity
matrix
"""
self.users_id_viewed_item = {}
# Set the value for k
if self.k_neighbors is None:
self.k_neighbors = int(np.sqrt(len(self.users)))
for item in self.items:
for user in self.train_set['users_viewed_item'].get(item, []):
self.users_id_viewed_item.setdefault(item, []).append(self.user_to_user_id[user])
if self.metadata_file is not None:
metadata = ReadFile(self.metadata_file, sep=self.metadata_similarity_sep, as_binary=self.metadata_as_binary
).read_metadata_or_similarity()
self.matrix = np.zeros((len(self.users), len(metadata['col_2'])))
meta_to_meta_id = {}
for m, data in enumerate(metadata['col_2']):
meta_to_meta_id[data] = m
for user_m in metadata['col_1']:
for m1 in metadata['dict'][user_m]:
try:
self.matrix[self.user_to_user_id[user_m], meta_to_meta_id[m1]] = metadata['dict'][user_m][m1]
except KeyError:
pass
# create header info for metadata
sparsity = (1 - (metadata['number_interactions'] / (len(metadata['col_1']) * len(metadata['col_2'])))) * 100
self.extra_info_header = ">> metadata:: %d users and %d metadata (%d interactions) | sparsity:: %.2f%%" % \
(len(metadata['col_1']), len(metadata['col_2']), metadata['number_interactions'],
sparsity)
# Create similarity matrix based on metadata or similarity file
self.su_matrix = self.compute_similarity(transpose=False)
elif self.similarity_file is not None:
similarity = ReadFile(self.similarity_file, sep=self.metadata_similarity_sep, as_binary=False
).read_metadata_or_similarity()
self.su_matrix = np.zeros((len(self.users), len(self.users)))
# Fill similarity matrix
for u in similarity['col_1']:
for u_j in similarity['dict'][u]:
self.su_matrix[self.user_to_user_id[u], self.user_to_user_id[int(u_j)]] = similarity['dict'][u][u_j]
# Remove NaNs
self.su_matrix[np.isnan(self.su_matrix)] = 0.0
else:
raise ValueError("This algorithm needs a similarity matrix or a metadata file!")
# Create original matrix user x item for prediction process
self.create_matrix()
|
psqlextra/backend/migrations/operations/add_default_partition.py | adamchainz/django-postgres-extra | 529 | 11072433 | <gh_stars>100-1000
from psqlextra.backend.migrations.state import PostgresPartitionState
from .partition import PostgresPartitionOperation
class PostgresAddDefaultPartition(PostgresPartitionOperation):
"""Adds a new default partition to a :see:PartitionedPostgresModel."""
def state_forwards(self, app_label, state):
model_state = state.models[(app_label, self.model_name_lower)]
model_state.add_partition(
PostgresPartitionState(
app_label=app_label, model_name=self.model_name, name=self.name
)
)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_default_partition(model, self.name)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def describe(self) -> str:
return "Creates default partition '%s' on %s" % (
self.name,
self.model_name,
)
|
setup.py | kayuksel/AdaBound | 3,197 | 11072436 | <filename>setup.py<gh_stars>1000+
from setuptools import setup
__VERSION__ = '0.0.5'
setup(name='adabound',
version=__VERSION__,
description='AdaBound optimization algorithm, build on PyTorch.',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
keywords=['machine learning', 'deep learning'],
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
url='https://github.com/Luolc/AdaBound',
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
packages=['adabound'],
install_requires=[
'torch>=0.4.0',
],
zip_safe=False,
python_requires='>=3.6.0')
|
tests/serializers/test_base64_serializer.py | next-franciscoalgaba/python-benedict | 365 | 11072438 | <reponame>next-franciscoalgaba/python-benedict<gh_stars>100-1000
# -*- coding: utf-8 -*-
from benedict.serializers import Base64Serializer
import unittest
class base64_serializer_test_case(unittest.TestCase):
def test_decode_base64(self):
# TODO
pass
def test_encode_base64(self):
# TODO
pass
|
corehq/apps/accounting/management/commands/make_domain_enterprise_level.py | dimagilg/commcare-hq | 471 | 11072442 | from django.core.management import BaseCommand
from corehq.apps.domain.forms import DimagiOnlyEnterpriseForm
from corehq.apps.domain.models import Domain
from corehq.util.decorators import require_debug_true
class Command(BaseCommand):
help = ('Create a billing account and an enterprise level subscription '
'for the given domain')
def add_arguments(self, parser):
parser.add_argument('domain')
@require_debug_true()
def handle(self, domain, **kwargs):
assert Domain.get_by_name(domain) is not None
DimagiOnlyEnterpriseForm(domain, '<EMAIL>').process_subscription_management()
|
controllers/exploits.py | noraj/Kvasir | 194 | 11072447 | <filename>controllers/exploits.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
##--------------------------------------#
## Kvasir
##
## (c) 2010-2014 Cisco Systems, Inc.
## (c) 2015 <NAME>
##
## Exploits controller
##
## Author: <NAME> <<EMAIL>>
##--------------------------------------#
from skaldship.hosts import host_title_maker, get_host_record, create_hostfilter_query
from NexposeAPI import NexposeAPI
import logging
logger = logging.getLogger("web2py.app.kvasir")
crud.settings.formstyle = formstyle_bootstrap_kvasir
@auth.requires_login()
def index():
return dict()
##-------------------------------------------------------------------------
## exploits
##-------------------------------------------------------------------------
@auth.requires_login()
def add():
if request.extension in ['load', 'json']:
form=SQLFORM(db.t_exploits, buttons=[], _action=URL('add', extension=request.extension), _id="exploit_add_form")
else:
form=SQLFORM(db.t_exploits, _action=URL('add', extension=request.extension), _id="exploit_add_form")
if form.accepts(request.vars, session):
response.flash = 'Exploit added'
response.headers['web2py-component-command'] = "exploitstable.fnReloadAjax();"
return ""
elif form.errors:
response.flash = "Error in form submission"
return TABLE(*[TR(k, v) for k, v in form.errors.items()])
response.title = "%s :: Add Exploit" % (settings.title)
return dict(form=form)
@auth.requires_login()
def detail():
record = db.t_exploits(request.args(0)) or redirect(URL('default', 'error', vars={'msg': T('Exploit record not found')}))
form=crud.read(db.t_exploits,record)
response.title = "%s :: Exploit Detail" % (settings.title)
return dict(form=form)
@auth.requires_login()
def edit():
record = db.t_exploits(request.args(0)) or redirect(URL('default', 'error', vars={'msg': T('Exploit record not found')}))
form=crud.update(db.t_exploits,record,next='detail/[id]',
ondelete=lambda form: redirect(URL('list')),
onaccept=crud.archive)
response.title = "%s :: Update Exploit" % (settings.title)
return dict(form=form)
@auth.requires_signature()
@auth.requires_login()
def delete():
count = 0
for r in request.vars.ids.split('|'):
if r is not None:
db(db.t_exploits.id == r).delete()
count += 1
db.commit()
response.flash = "%s Exploit(s) deleted" % (count)
response.headers['web2py-component-command'] = "exploitstable.fnReloadAjax(); jQuery('.datatable tr.DTTT_selected').removeClass('DTTT_selected');"
@auth.requires_login()
def list():
aaData = []
record = None
if request.extension == "json":
rows = db(db.t_exploits.id > 0).select()
for r in rows:
aTxt = {}
aaData.append({
'0': A('edit', _target="exploits_%s" % (r.id), _href=URL('edit.html', args=r.id)).xml(),
'1': r.f_name,
'2': r.f_title,
'3': r.f_description,
'4': r.f_source,
'5': r.f_rank,
'6': r.f_level,
'7': r.f_vulnid,
'8': r.f_cve,
'DT_RowId': r.id
})
result = { 'sEcho': request.vars.sEcho,
'iTotalRecords': len(aaData),
'aaData': aaData,
}
return result
response.title = "%s :: Exploits" % (settings.title)
return dict()
@auth.requires_login()
def by_vulnid():
"""
Returns a list of exploits for a vulnerability id
"""
rows = db(db.t_exploits.f_vulnid.contains(request.args(0))).select()
response.title = "%s :: Exploits by Vuln ID" % (settings.title)
return rows
##-------------------------------------------------------------------------
## exploit list support (upload xml, match)
##-------------------------------------------------------------------------
@auth.requires_login()
def connect_exploits():
"""
Call the connect_exploits() function which links known vulnerabilities to
exploits based on f_vulnid or f_cve
"""
form = SQLFORM.factory(
Field('f_taskit', type='boolean', default=auth.user.f_scheduler_tasks, label=T('Run in background task')),
)
from skaldship.exploits import connect_exploits
if form.accepts(request.vars, session):
if form.vars.f_taskit:
task = scheduler.queue_task(
connect_exploits,
group_name=settings.scheduler_group_name,
sync_output=5,
timeout=settings.scheduler_timeout,
)
if task.id:
redirect(URL('tasks', 'status', args=task.id))
else:
response.flash = "Error submitting job: %s" % (task.errors)
else:
connect_exploits()
response.flash = "Exploits and vulnerabilities connected"
redirect(URL('list'))
response.title = "%s :: Connect Exploits" % (settings.title)
return dict(form=form)
@auth.requires_login()
def import_canvas_xml():
"""
Process ImmunitySec's Exploit.xml which can be genrated from the URL
http://exploitlist.immunityinc.com/ or by running ./canvasengine.py -e
from your CANVAS directory
http://exploitlist.immunityinc.com/home/serve/live
"""
import os
kvasir_path = os.path.join(request.folder, 'static/etc')
form = SQLFORM.factory(
Field('f_filename', 'upload', uploadfolder=os.path.join(request.folder, 'data/misc'), label=T('XML File')),
Field('f_use_kvasir_local', 'boolean', label=T('Use Kvasir static path')),
Field('f_use_local', 'boolean', label=T('Use local file path')),
Field('f_pathname', 'string', default=kvasir_path, label=T('Local path')),
Field('f_download', 'boolean', label=T('Download')),
Field('f_taskit', type='boolean', default=auth.user.f_scheduler_tasks, label=T('Run in background task')),
col3 = {
'f_use_kvasir_local': 'static/etc/canvas_exploits.xml',
'f_use_local': 'Directory where canvas_exploits.xml is located',
'f_download': 'Download from ImmunitySec website',
}
)
if form.errors:
response.flash = 'Error in form'
elif form.accepts(request.vars, session):
if form.vars.f_use_local:
filename = os.path.join(form.vars.f_pathname, 'canvas_exploits.xml')
elif form.vars.f_use_kvasir_local:
filename = os.path.join(request.folder,'static','etc','canvas_exploits.xml')
elif form.vars.f_download:
filename = None
else:
filename = os.path.join(request.folder,'data','misc',form.vars.f_filename)
if form.vars.f_taskit:
task = scheduler.queue_task(
canvas_exploit_xml,
pargs=[filename],
group_name=settings.scheduler_group_name,
sync_output=5,
timeout=settings.scheduler_timeout,
)
if task.id:
redirect(URL('tasks', 'status', args=task.id))
else:
response.flash = "Error submitting job: %s" % (task.errors)
else:
from skaldship.canvas import process_exploits
from skaldship.exploits import connect_exploits
process_exploits(filename)
connect_exploits()
response.flash = "Canvas Exploit data uploaded"
redirect(URL('list'))
response.title = "%s :: Import ImmunitySec CANVAS Exploits XML" % (settings.title)
return dict(form=form)
@auth.requires_login()
def import_nexpose_xml():
"""
Insert/Update exploit references from Nexpose exploits.xml file
File is located in /opt/rapid7/nexpose/plugins/conf
"""
import os
response.title = "%s :: Import Nexpose Exploits XML" % (settings.title)
form = SQLFORM.factory(
Field('f_filename', 'upload', uploadfolder=os.path.join(request.folder, 'data', 'misc'), label=T('XML File')),
Field('f_use_kvasir_local', 'boolean', label=T('Use Kvasir static path')),
Field('f_use_local', 'boolean', label=T('Use local file path')),
Field('f_pathname', 'string', default="/opt/rapid7/nexpose/plugins/conf", label=T('Local pathname')),
Field('f_taskit', type='boolean', default=auth.user.f_scheduler_tasks, label=T('Run in background task')),
col3 = {
'f_use_kvasir_local': 'static/etc/nexpose_exploits.xml',
'f_use_local': 'Directory where exploits.xml is located',
'f_pathname': 'Requires Nexpose and possibly root access'
}
)
if form.errors:
response.flash = 'Error in form'
elif form.accepts(request.vars, session):
# process nexpose exploits.xml file
if form.vars.f_use_local:
filename = os.path.join(form.vars.f_pathname, 'exploits.xml')
elif form.vars.f_use_kvasir_local:
filename = os.path.join(request.folder,'static','etc','nexpose_exploits.xml')
else:
filename = os.path.join(request.folder,'data', 'misc', form.vars.f_filename)
if form.vars.f_taskit:
task = scheduler.queue_task(
nexpose_exploit_xml,
pargs=[filename],
group_name=settings.scheduler_group_name,
sync_output=5,
timeout=settings.scheduler_timeout,
)
if task.id:
redirect(URL('tasks', 'status', args=task.id))
else:
response.flash = "Error submitting job: %s" % (task.errors)
else:
from skaldship.nexpose import process_exploits
from skaldship.exploits import connect_exploits
process_exploits(filename)
connect_exploits()
redirect(URL('list'))
return dict(form=form)
|
treeio/identities/forms.py | Andrea-MariaDB-2/treeio | 242 | 11072486 | <filename>treeio/identities/forms.py
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Identities module forms
"""
from django import forms
from django.core.files.storage import default_storage
from django.template import defaultfilters
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from treeio.core.conf import settings
from treeio.core.models import AccessEntity, Object, ModuleSetting
from treeio.core.decorators import preprocess_form
from treeio.identities.models import Contact, ContactValue, ContactType, ContactField
from unidecode import unidecode
from PIL import Image
import re
preprocess_form()
class MassActionForm(forms.Form):
""" Mass action form for Reports """
delete = forms.ChoiceField(label=_("With selected"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['delete'] = forms.ChoiceField(label=_("With selected"),
choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
def save(self, *args, **kwargs):
"Process form"
if self.instance:
if self.is_valid():
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class ContactFieldForm(forms.ModelForm):
"Contact Field Form"
def clean_name(self):
"Ensure the name of the field only contains alphanumeric"
name = self.cleaned_data['name']
if not re.match(r'^[a-zA-Z0-9-_]+$', name):
raise forms.ValidationError(
_("Sorry, field names can only contain letters, numbers, hyphens (-) and underscores (_)"))
return name
def __init__(self, *args, **kwargs):
super(ContactFieldForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['label'].label = _("Label")
self.fields['field_type'].label = _("Field type")
self.fields['required'].label = _("Required")
self.fields['details'].label = _("Details")
class Meta:
"Fields Form"
model = ContactField
fields = ('name', 'label', 'field_type', 'required', 'details')
class ContactTypeForm(forms.ModelForm):
"Contact Type Form"
def __init__(self, user, *args, **kwargs):
super(ContactTypeForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['fields'].queryset = Object.filter_permitted(
user, ContactField.objects.all())
self.fields['fields'].help_text = ''
self.fields['fields'].label = _("Fields")
self.fields['details'].label = _("Details")
def clean_name(self):
"Ensures a contact with the same name doesn't already exists"
instance = getattr(self, 'instance', None)
name = self.cleaned_data['name']
if instance and not instance.id:
slug = unicode(name).replace(" ", "-")
slug = defaultfilters.slugify(unidecode(slug))
if ContactType.objects.filter(slug=slug).exists():
raise forms.ValidationError(
_("Contact Type with such name already exists."))
return name
class Meta:
"Contact Type Form"
model = ContactType
fields = ('name', 'fields', 'details')
class ContactForm(forms.Form):
""" ContactForm """
name = forms.CharField(
max_length=256, widget=forms.TextInput(attrs={'size': '50'}))
instance = None
files = {}
def _get_form_field(self, field, value=None):
"Generate a Django-friendly field from Hardtree spec in DB"
form_field = None
if field.field_type == 'text':
form_field = forms.CharField(label=field.label, max_length=512,
widget=forms.TextInput(attrs={'size': '30'}))
elif field.field_type == 'textarea':
form_field = forms.CharField(label=field.label,
widget=forms.Textarea(attrs={'class': 'no-editor'}))
elif field.field_type == 'details':
form_field = forms.CharField(
label=field.label, widget=forms.Textarea())
elif field.field_type == 'email':
form_field = forms.EmailField(
label=field.label, widget=forms.TextInput(attrs={'size': '30'}))
elif field.field_type == 'url':
form_field = forms.URLField(
label=field.label, widget=forms.TextInput(attrs={'size': '50'}))
elif field.field_type == 'phone':
form_field = forms.CharField(label=field.label, max_length=256,
widget=forms.TextInput(attrs={'size': '30'}))
elif field.field_type == 'picture':
form_field = forms.ImageField(
label=field.label, widget=forms.FileInput)
elif field.field_type == 'date':
form_field = forms.DateTimeField(label=field.label)
form_field.widget.attrs.update({'class': 'datetimepicker'})
form_field.required = field.required
if value:
if isinstance(form_field, forms.FileField) and value.value:
form_field = forms.ChoiceField(
label=field.label, widget=forms.RadioSelect())
filename = full_filename = value.value
match = re.match('.*[a-z0-9]{32}__(?P<filename>.+)$', filename)
if match:
filename = match.group('filename')
form_field.choices = ((full_filename, _("Keep existing: ") + unicode(filename)),
('delete', _("Delete ")))
form_field.initial = full_filename
form_field.required = False
else:
form_field.initial = value.value
return form_field
def _get_free_field_name(self, field):
"Generate an available name for a field"
num = 0
field_name = unicode(field.name) + u"___" + unicode(num)
while field_name in self.fields:
num += 1
field_name = unicode(field.name) + u"___" + unicode(num)
return field_name
def _get_upload_name(self, filename):
"Returns an upload_to path to a new file"
import hashlib
import random
while True:
hasher = hashlib.md5()
hasher.update(str(random.random()))
filepath = u"identities/" + hasher.hexdigest() + u"__" + filename
fullpath = settings.MEDIA_ROOT + filepath
if not default_storage.exists(fullpath):
return filepath
def _handle_uploaded_file(self, field_name):
"Process an uploaded file"
try:
file = self.files[field_name]
filepath = self._get_upload_name(file.name)
except KeyError:
return ''
destination = open(settings.MEDIA_ROOT + filepath, 'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
return settings.MEDIA_URL + filepath
def _image_resize(self, filepath):
"Resizes Image if it's over the maximum dimension"
filepath = filepath.replace(settings.MEDIA_URL, '')
filepath = settings.MEDIA_ROOT + filepath
try:
img = Image.open(filepath)
expected_size = getattr(
settings, 'HARDTREE_IMAGE_MAX_SIZE', [400, 300])
if img.size[0] > expected_size[0] or img.size[1] > expected_size[1]:
filter_name = getattr(
settings, 'HARDTREE_IMAGE_RESIZE_FILTER', 'ANTIALIAS')
filter = getattr(Image, filter_name, Image.ANTIALIAS)
aspect = img.size[0] / float(img.size[1])
newsize = list(expected_size)
if img.size[0] > expected_size[0]:
newsize[0] = expected_size[0]
newsize[1] = round(newsize[0] / aspect)
if newsize[1] > expected_size[1]:
newsize[1] = expected_size[1]
newsize[0] = round(newsize[1] * aspect)
img = img.resize(newsize, filter)
img.save(filepath)
except Exception:
pass
def __init__(self, user=None, contact_type=None, *args, **kwargs):
"Populates form with fields from given ContactType"
if 'instance' in kwargs:
self.instance = kwargs['instance']
values = self.instance.contactvalue_set.all()
del kwargs['instance']
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['parent'] = forms.ModelChoiceField(
label='Parent', queryset=[], required=False)
self.fields['parent'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['parent'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['parent'].widget.attrs.update(
{'popuplink': reverse('identities_contact_add')})
self.fields['parent'].label = _('Parent')
self.fields['name'].label = _('Name')
if 'files' in kwargs:
self.files = kwargs['files']
for field in contact_type.fields.all():
if self.instance:
initial_field_name = self._get_free_field_name(field)
self.fields[initial_field_name] = self._get_form_field(field)
for value in values:
if value.field == field:
field_name = self._get_free_field_name(field)
self.fields[field_name] = self._get_form_field(
field, value)
if initial_field_name in self.fields:
del self.fields[initial_field_name]
else:
field_name = self._get_free_field_name(field)
self.fields[field_name] = self._get_form_field(field)
if user.is_admin('treeio.identities'):
self.fields['related_user'] = forms.ModelChoiceField(label=_('Attach to User'),
queryset=[], required=False)
self.fields['related_user'].queryset = AccessEntity.objects.all()
self.fields['related_user'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_access_lookup')})
self.fields['related_user'].label = _('Related user')
if self.instance:
self.fields['name'].initial = self.instance.name
self.fields['parent'].initial = self.instance.parent_id
if 'related_user' in self.fields:
self.fields[
'related_user'].initial = self.instance.related_user_id
def save(self, request, contact_type=None):
"Process form and create DB objects as required"
if self.instance:
contact = self.instance
else:
contact = Contact()
contact.contact_type = contact_type
contact.name = unicode(self.cleaned_data['name'])
if 'parent' in self.cleaned_data:
contact.parent = self.cleaned_data['parent']
if 'related_user' in self.cleaned_data:
contact.related_user = self.cleaned_data['related_user']
contact.save()
if self.instance:
contact.contactvalue_set.all().delete()
for field in contact.contact_type.fields.all():
for form_name in self.cleaned_data:
if re.match(str("^" + field.name + "___\d+$"), form_name):
if isinstance(self.fields[form_name], forms.FileField):
value = ContactValue(field=field, contact=contact,
value=self._handle_uploaded_file(form_name))
if isinstance(self.fields[form_name], forms.ImageField):
self._image_resize(value.value)
else:
if field.field_type == 'picture' and isinstance(self.fields[form_name],
forms.ChoiceField):
if self.cleaned_data[form_name] != 'delete':
value = ContactValue(field=field, contact=contact,
value=self.cleaned_data[form_name])
else:
value = ContactValue(field=field, contact=contact,
value=self.cleaned_data[form_name])
value.save()
return contact
class FilterForm(forms.ModelForm):
""" Filter form definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(FilterForm, self).__init__(*args, **kwargs)
if 'name' in skip:
del self.fields['name']
else:
self.fields['name'].required = False
self.fields['name'].label = _("Name")
if 'contact_type' in skip:
del self.fields['contact_type']
else:
self.fields['contact_type'].queryset = Object.filter_permitted(
user, ContactType.objects)
self.fields['contact_type'].required = True
self.fields['contact_type'].label = _("Contact type")
class Meta:
"Filter"
model = Contact
fields = ('name', 'contact_type')
class SettingsForm(forms.Form):
""" Administration settings form """
default_contact_type = forms.ModelChoiceField(
label=_('Default Contact Type'), queryset=[])
def __init__(self, user, *args, **kwargs):
"Sets choices and initial value"
super(SettingsForm, self).__init__(*args, **kwargs)
self.fields['default_contact_type'].queryset = Object.filter_permitted(
user, ContactType.objects)
try:
conf = ModuleSetting.get_for_module(
'treeio.identities', 'default_contact_type')[0]
default_task_status = ContactType.objects.get(pk=long(conf.value))
self.fields[
'default_contact_type'].initial = default_task_status.id
except Exception:
pass
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('default_contact_type',
self.cleaned_data[
'default_contact_type'].id,
'treeio.identities')
except Exception:
return False
|
tests/testapp/test_nexus_module.py | YPlan/gargoyle | 138 | 11072490 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
from django.contrib.auth.models import User
from django.test import TestCase
from gargoyle import gargoyle
from gargoyle.models import DISABLED, GLOBAL, Switch
class NexusModuleTestCase(TestCase):
def setUp(self):
self.user = User(username='user', is_staff=True)
self.user.set_password('password')
self.user.save()
self.client.login(username='user', password='password')
def test_index(self):
resp = self.client.get('/nexus/gargoyle/')
assert resp.status_code == 200
assert "Gargoyle" in resp.content.decode('utf-8')
def test_add(self):
resp = self.client.post('/nexus/gargoyle/add/', {'key': 'key1'})
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['key'] == 'key1'
switch = Switch.objects.get()
assert switch.key == 'key1'
def test_update(self):
Switch.objects.create(key='key1')
resp = self.client.post('/nexus/gargoyle/update/', {'curkey': 'key1', 'key': 'key2'})
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['key'] == 'key2'
switch = Switch.objects.get()
assert switch.key == 'key2'
def test_status(self):
Switch.objects.create(key='key1', status=DISABLED)
resp = self.client.post('/nexus/gargoyle/status/', {'key': 'key1', 'status': str(GLOBAL)})
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['status'] == GLOBAL
switch = Switch.objects.get()
assert switch.status == GLOBAL
def test_delete(self):
Switch.objects.create(key='key1')
resp = self.client.post('/nexus/gargoyle/delete/', {'key': 'key1'})
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data'] == {}
assert Switch.objects.count() == 0
def test_add_condition(self):
switch = Switch.objects.create(key='key1')
conditions = list(switch.get_active_conditions(gargoyle))
assert len(conditions) == 0
resp = self.client.post(
'/nexus/gargoyle/conditions/add/',
{
'key': 'key1',
'id': 'gargoyle.builtins.IPAddressConditionSet',
'field': 'ip_address',
'ip_address': '1.1.1.1',
},
)
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['key'] == 'key1'
assert len(body['data']['conditions']) == 1
name, value, field_value, exclude, condition_type = body['data']['conditions'][0]['conditions'][0]
assert name == 'ip_address'
assert value == '1.1.1.1'
assert field_value == '1.1.1.1'
assert exclude is False
assert condition_type == 'f'
def test_add_ab_test_condition(self):
switch = Switch.objects.create(key='key1')
conditions = list(switch.get_active_conditions(gargoyle))
assert len(conditions) == 0
resp = self.client.post(
'/nexus/gargoyle/conditions/add/',
{
'key': 'key1',
'id': 'gargoyle.builtins.IPAddressConditionSet',
'field': 'ip_address',
'ip_address': '1.1.1.1',
'is_ab_test': '1',
},
)
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['key'] == 'key1'
assert len(body['data']['conditions']) == 1
name, value, field_value, exclude, condition_type = body['data']['conditions'][0]['conditions'][0]
assert name == 'ip_address'
assert value == '1.1.1.1'
assert field_value == '1.1.1.1'
assert exclude is False
assert condition_type == 't'
def test_remove_condition(self):
switch = Switch.objects.create(key='key1')
switch.add_condition(gargoyle, 'gargoyle.builtins.IPAddressConditionSet', 'ip_address', '1.1.1.1')
conditions = list(switch.get_active_conditions(gargoyle))
assert len(conditions) == 1
resp = self.client.post(
'/nexus/gargoyle/conditions/remove/',
{
'key': 'key1',
'id': 'gargoyle.builtins.IPAddressConditionSet',
'field': 'ip_address',
'value': '1.1.1.1',
},
)
assert resp.status_code == 200
body = json.loads(resp.content.decode('utf-8'))
assert body['success'] is True
assert body['data']['key'] == 'key1'
assert len(body['data']['conditions']) == 0
|
contrib/remote-dev-tool/remote-dev-tool.py | luxius-luminus/pai | 1,417 | 11072496 | <reponame>luxius-luminus/pai
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import re
import os
import sys
import time
import random
import string
import configparser
import subprocess
import logging
import getpass
import socket
import requests
USERNAME = ""
TOKEN = ""
SERVER_IP = ""
USER_DIR = ""
SSH_INFO = ""
LOGGER = ""
SSH_INFO = {}
IS_SHELL = ""
JOB_NAME = ""
def read_env():
global USERNAME, TOKEN, SERVER_IP
LOGGER.debug("read env")
cf = configparser.ConfigParser()
cf.read(".env")
USERNAME = cf.get("PAI_ENV", "username")
TOKEN = cf.get("PAI_ENV", "token")
SERVER_IP = cf.get("PAI_ENV", "serverip")
def check_platform():
LOGGER.debug("check platform")
global IS_SHELL, USER_DIR
if sys.platform.find("win") != -1:
IS_SHELL = False
USER_DIR = os.environ.get('UserProfile')
elif sys.platform.find("linux") != -1:
IS_SHELL = True
USER_DIR = os.environ["HOME"]
else:
LOGGER.debug("unsupported platform")
exit(-1)
def generate_conf():
LOGGER.debug("generate conf files")
pai_dir = os.path.join(USER_DIR, ".openpai/")
if not os.path.exists(pai_dir):
os.makedirs(pai_dir)
conf_files = ['clusters', 'exports']
for conf_file in conf_files:
f1 = open("conf/{}.template".format(conf_file), 'r+')
f2 = open("{}{}.yaml".format(pai_dir, conf_file), 'w+')
for line in f1.readlines():
line = re.sub("\$username", USERNAME, line)
line = re.sub("\$token", TOKEN, line)
line = re.sub("\$serverip", SERVER_IP, line)
f2.write(line)
f1.close()
f2.close()
def init():
global LOGGER
logging.basicConfig(level=logging.INFO, format='%(asctime)s-%(name)s-%(levelname)s-%(message)s')
LOGGER = logging.getLogger(__name__)
script_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_folder)
read_env()
check_platform()
generate_conf()
def read_input(message):
while True:
answer = input("{} (y/n)?".format(message))
if answer in {"Y", "y", "yes", "Yes", "YES"}:
return True
elif answer in {"N", "n", "no", "No", "NO"}:
return False
def run_subprocess(command):
LOGGER.debug("run subprocess {}, shell={}".format(command, IS_SHELL))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=IS_SHELL)
p_output, p_error = p.communicate()
LOGGER.debug(p_output)
if p.returncode != 0:
LOGGER.debug(p_error)
return p_output.decode("utf-8", "ignore")
def get_args():
parser = argparse.ArgumentParser(description="Remote Development Tool")
parser.add_argument("-g", "--getssh", metavar="job_name", help="get ssh info", dest="job_name")
parser.add_argument("-c", "--config", metavar='job_path', help="submit local job", dest="job_path")
parser.add_argument("-s", "--share", metavar="share_path", help="share local folder", dest="share_path")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose mode", dest='need_verbose')
args = parser.parse_args()
if len(sys.argv) < 2 or (len(sys.argv) == 2 and args.need_verbose is True):
parser.print_help()
parser.exit(-1)
if args.job_name is not None and args.job_path is not None:
LOGGER.info("cannot use --submit and --getssh at the same time")
parser.print_help()
parser.exit(-1)
if args.job_name is None and args.job_path is None:
LOGGER.info("cannot use without both --submit and --getssh")
parser.print_help()
parser.exit(-1)
return args
def get_ssh_info(job_name):
global LOGGER, SSH_INFO, USER_DIR, JOB_NAME
LOGGER.debug("get SSH info")
JOB_NAME = job_name
pai_dir = os.path.join(USER_DIR, ".openpai/")
while True:
LOGGER.info("wait for the ssh to start")
output = run_subprocess("opai job status -a remote_dev_bed {} ssh".format(job_name))
ssh_port = re.findall(r"sshPort: '(.+?)'", output)
ssh_ip = re.findall(r"sshIp: (.+?)\n", output)
ssh_link = re.findall(r"privateKeyDirectDownloadLink: (.+?)\n", output)
if len(ssh_ip) != 0 and len(ssh_port) != 0 and len(ssh_link) != 0:
break
time.sleep(10)
ssh_ip = re.sub("\r", "", ssh_ip[0])
ssh_port = ssh_port[0]
ssh_link = re.sub("\r", "", ssh_link[0])
LOGGER.debug("download SSH key")
req = requests.get(ssh_link)
ssh_key = os.path.abspath("{}{}.key".format(pai_dir, job_name))
with open(ssh_key, "w+") as f:
f.write(req.text)
f.close()
if IS_SHELL is True:
run_subprocess("chmod 600 {}".format(ssh_key))
ssh_cmd = "ssh -i {} -p {} -o StrictHostKeyChecking=no root@{}".format(ssh_key, ssh_port, ssh_ip)
LOGGER.info("SSH IP: {}".format(ssh_ip))
LOGGER.info("SSH Port: {}".format(ssh_port))
LOGGER.info("SSH Key: {}".format(ssh_key))
LOGGER.info("SSH CMD: {}".format(ssh_cmd))
SSH_INFO['ip'] = ssh_ip
SSH_INFO['port'] = ssh_port
SSH_INFO['key'] = ssh_key
SSH_INFO['cmd'] = ssh_cmd
configure_vscode()
def share_local_path(local_path):
global SSH_INFO
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.microsoft.com", 80))
host_ip = s.getsockname()[0]
s.close()
if read_input("Is your host ip {}".format(host_ip)) is False:
host_ip = input("Please input your host IP: ")
if IS_SHELL is False:
ms_username = getpass.getuser() + "@microsoft.com"
if read_input("Is your user name {}".format(ms_username)) is False:
ms_username = input("Please input your username: ")
ms_password = getpass.getpass("Please input your password: ")
run_subprocess(r'net share dev="{}" /GRANT:{},FULL'.format(local_path, ms_username))
SSH_INFO['mount'] = "mount -t cifs //{}/dev /mnt/local -o vers=3.0,username={},password={}".format(
host_ip,
ms_username,
ms_password)
else:
run_subprocess(r"docker stop remote_dev_nfs &> /dev/null || true")
run_subprocess(r"docker rm remote_dev_nfs &> /dev/null || true")
run_subprocess(r"docker run -itd --privileged --cap-add SYS_ADMIN --cap-add SYS_MODULE \
-v /lib/modules:/lib/modules:ro \
-v {}:/workspace \
-v ~/.openpai/exports.yaml:/etc/exports:ro\
-p 2049:2049 --name remote_dev_nfs \
erichough/nfs-server &> /dev/null".format(local_path))
SSH_INFO['mount'] = "mount -t nfs4 {}:/ /mnt/local".format(host_ip)
def submit_job(job_path):
global JOB_NAME
LOGGER.debug("submit job")
job_name = "".join(random.sample(string.ascii_letters + string.digits, 10))
job_name = "remote_dev_{}".format(job_name)
JOB_NAME = job_name
run_subprocess(
"opai job submit -a remote_dev_bed --update name={} {}".format(job_name, job_path))
while True:
LOGGER.info("wait for the job to run")
output = run_subprocess("opai job status -a remote_dev_bed {}".format(job_name))
if output.find("RUNNING") != -1:
break
time.sleep(10)
LOGGER.info("job name: {}".format(job_name))
LOGGER.info("job started")
time.sleep(10)
get_ssh_info(job_name)
def configure_vscode():
LOGGER.debug("configure vscode")
vscode_dir = os.path.join(USER_DIR, ".ssh", "config")
with open(vscode_dir, 'a+') as f:
f.write("\nHost {}\n".format(JOB_NAME))
f.write(" Hostname {}\n".format(SSH_INFO["ip"]))
f.write(" Port {}\n".format(SSH_INFO['port']))
f.write(" User root\n")
f.write(" IdentityFile {}".format(SSH_INFO['key']))
f.close()
def start_ssh(share_path):
if share_path is not None:
run_subprocess(r'{} "apt update && apt install -y nfs-common cifs-utils"'.format(SSH_INFO['cmd']))
run_subprocess(r'{} "mkdir -p /mnt/local"'.format(SSH_INFO['cmd']))
run_subprocess(r'{} "{}"'.format(SSH_INFO['cmd'], SSH_INFO['mount']))
subprocess.run(SSH_INFO['cmd'], shell=IS_SHELL)
run_subprocess("net share dev /delete || cd .")
def main():
init()
args = get_args()
if args.need_verbose is True:
LOGGER.setLevel(logging.DEBUG)
if args.job_name is not None:
if args.job_name.find("~") != -1:
split_str = args.job_name.split("~");
if len(split_str) == 2:
get_ssh_info(split_str[1])
else:
LOGGER.error("Wrong job name")
else:
get_ssh_info(args.job_name)
if args.job_path is not None:
submit_job(args.job_path)
if args.share_path is not None:
share_local_path(args.share_path)
time.sleep(5)
start_ssh(args.share_path)
if __name__ == "__main__":
main()
|
venv/Lib/site-packages/pygments/lexers/_vbscript_builtins.py | EkremBayar/bayar | 6,989 | 11072508 | <filename>venv/Lib/site-packages/pygments/lexers/_vbscript_builtins.py
"""
pygments.lexers._vbscript_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are manually translated lists from
http://www.indusoft.com/pdf/VBScript%20Reference.pdf.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = [
'ByRef',
'ByVal',
# dim: special rule
'call',
'case',
'class',
# const: special rule
'do',
'each',
'else',
'elseif',
'end',
'erase',
'execute',
'function',
'exit',
'for',
'function',
'GetRef',
'global',
'if',
'let',
'loop',
'next',
'new',
# option: special rule
'private',
'public',
'redim',
'select',
'set',
'sub',
'then',
'wend',
'while',
'with',
]
BUILTIN_FUNCTIONS = [
'Abs',
'Array',
'Asc',
'Atn',
'CBool',
'CByte',
'CCur',
'CDate',
'CDbl',
'Chr',
'CInt',
'CLng',
'Cos',
'CreateObject',
'CSng',
'CStr',
'Date',
'DateAdd',
'DateDiff',
'DatePart',
'DateSerial',
'DateValue',
'Day',
'Eval',
'Exp',
'Filter',
'Fix',
'FormatCurrency',
'FormatDateTime',
'FormatNumber',
'FormatPercent',
'GetObject',
'GetLocale',
'Hex',
'Hour',
'InStr',
'inStrRev',
'Int',
'IsArray',
'IsDate',
'IsEmpty',
'IsNull',
'IsNumeric',
'IsObject',
'Join',
'LBound',
'LCase',
'Left',
'Len',
'LoadPicture',
'Log',
'LTrim',
'Mid',
'Minute',
'Month',
'MonthName',
'MsgBox',
'Now',
'Oct',
'Randomize',
'RegExp',
'Replace',
'RGB',
'Right',
'Rnd',
'Round',
'RTrim',
'ScriptEngine',
'ScriptEngineBuildVersion',
'ScriptEngineMajorVersion',
'ScriptEngineMinorVersion',
'Second',
'SetLocale',
'Sgn',
'Space',
'Split',
'Sqr',
'StrComp',
'String',
'StrReverse',
'Tan',
'Time',
'Timer',
'TimeSerial',
'TimeValue',
'Trim',
'TypeName',
'UBound',
'UCase',
'VarType',
'Weekday',
'WeekdayName',
'Year',
]
BUILTIN_VARIABLES = [
'Debug',
'Dictionary',
'Drive',
'Drives',
'Err',
'File',
'Files',
'FileSystemObject',
'Folder',
'Folders',
'Match',
'Matches',
'RegExp',
'Submatches',
'TextStream',
]
OPERATORS = [
'+',
'-',
'*',
'/',
'\\',
'^',
'|',
'<',
'<=',
'>',
'>=',
'=',
'<>',
'&',
'$',
]
OPERATOR_WORDS = [
'mod',
'and',
'or',
'xor',
'eqv',
'imp',
'is',
'not',
]
BUILTIN_CONSTANTS = [
'False',
'True',
'vbAbort',
'vbAbortRetryIgnore',
'vbApplicationModal',
'vbArray',
'vbBinaryCompare',
'vbBlack',
'vbBlue',
'vbBoole',
'vbByte',
'vbCancel',
'vbCr',
'vbCritical',
'vbCrLf',
'vbCurrency',
'vbCyan',
'vbDataObject',
'vbDate',
'vbDefaultButton1',
'vbDefaultButton2',
'vbDefaultButton3',
'vbDefaultButton4',
'vbDouble',
'vbEmpty',
'vbError',
'vbExclamation',
'vbFalse',
'vbFirstFullWeek',
'vbFirstJan1',
'vbFormFeed',
'vbFriday',
'vbGeneralDate',
'vbGreen',
'vbIgnore',
'vbInformation',
'vbInteger',
'vbLf',
'vbLong',
'vbLongDate',
'vbLongTime',
'vbMagenta',
'vbMonday',
'vbMsgBoxHelpButton',
'vbMsgBoxRight',
'vbMsgBoxRtlReading',
'vbMsgBoxSetForeground',
'vbNewLine',
'vbNo',
'vbNull',
'vbNullChar',
'vbNullString',
'vbObject',
'vbObjectError',
'vbOK',
'vbOKCancel',
'vbOKOnly',
'vbQuestion',
'vbRed',
'vbRetry',
'vbRetryCancel',
'vbSaturday',
'vbShortDate',
'vbShortTime',
'vbSingle',
'vbString',
'vbSunday',
'vbSystemModal',
'vbTab',
'vbTextCompare',
'vbThursday',
'vbTrue',
'vbTuesday',
'vbUseDefault',
'vbUseSystem',
'vbUseSystem',
'vbVariant',
'vbVerticalTab',
'vbWednesday',
'vbWhite',
'vbYellow',
'vbYes',
'vbYesNo',
'vbYesNoCancel',
]
|
Algo and DSA/LeetCode-Solutions-master/Python/least-number-of-unique-integers-after-k-removals.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11072537 | <reponame>Sourav692/FAANG-Interview-Preparation<gh_stars>1000+
# Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
"""
count = collections.Counter(arr)
result, count_count = len(count), collections.Counter(count.itervalues())
for c in xrange(1, len(arr)+1):
if k < c*count_count[c]:
result -= k//c
break
k -= c*count_count[c]
result -= count_count[c]
return result
|
detectron2/modeling/anchor_generator.py | Nour-7/detectron2 | 171 | 11072538 | <gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from typing import List
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.utils.registry import Registry
ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR")
ANCHOR_GENERATOR_REGISTRY.__doc__ = """
Registry for modules that creates object detection anchors for feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
"""
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers):
super(BufferList, self).__init__()
for i, buffer in enumerate(buffers):
self.register_buffer(str(i), buffer)
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device):
grid_height, grid_width = size
shifts_x = torch.arange(
offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
return shift_x, shift_y
def _broadcast_params(params, num_features, name):
"""
If one size (or aspect ratio) is specified and there are multiple feature
maps, we "broadcast" anchors of that single size (or aspect ratio)
over all feature maps.
If params is list[float], or list[list[float]] with len(params) == 1, repeat
it num_features time.
Returns:
list[list[float]]: param for each feature
"""
assert isinstance(
params, (list, tuple)
), f"{name} in anchor generator has to be a list! Got {params}."
assert len(params), f"{name} in anchor generator cannot be empty!"
if not isinstance(params[0], (list, tuple)): # list[float]
return [params] * num_features
if len(params) == 1:
return list(params) * num_features
assert len(params) == num_features, (
f"Got {name} of length {len(params)} in anchor generator, "
f"but the number of input features is {num_features}!"
)
return params
@ANCHOR_GENERATOR_REGISTRY.register()
class DefaultAnchorGenerator(nn.Module):
"""
Compute anchors in the standard ways described in
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks".
"""
box_dim: int = 4
"""
the dimension of each anchor box.
"""
@configurable
def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5):
"""
This interface is experimental.
Args:
sizes (list[list[float]] or list[float]):
If sizes is list[list[float]], sizes[i] is the list of anchor sizes
(i.e. sqrt of anchor area) to use for the i-th feature map.
If sizes is list[float], the sizes are used for all feature maps.
Anchor sizes are given in absolute lengths in units of
the input image; they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
strides (list[int]): stride of each input feature.
offset (float): Relative offset between the center of the first anchor and the top-left
corner of the image. Value has to be in [0, 1).
Recommend to use 0.5, which means half stride.
"""
super().__init__()
self.strides = strides
self.num_features = len(self.strides)
sizes = _broadcast_params(sizes, self.num_features, "sizes")
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios)
self.offset = offset
assert 0.0 <= self.offset < 1.0, self.offset
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
return {
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
"strides": [x.stride for x in input_shape],
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
}
def _calculate_anchors(self, sizes, aspect_ratios):
cell_anchors = [
self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)
]
return BufferList(cell_anchors)
@property
def num_cell_anchors(self):
"""
Alias of `num_anchors`.
"""
return self.num_anchors
@property
def num_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel
location, on that feature map.
For example, if at every pixel we use anchors of 3 aspect
ratios and 5 sizes, the number of anchors is 15.
(See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config)
In standard RPN models, `num_anchors` on every feature map is the same.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def _grid_anchors(self, grid_sizes: List[List[int]]):
"""
Returns:
list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4
"""
anchors = []
# buffers() not supported by torchscript. use named_buffers() instead
buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()]
for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers):
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
return anchors
def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
"""
Generate a tensor storing canonical anchor boxes, which are all anchor
boxes of different sizes and aspect_ratios centered at (0, 0).
We can later build the set of anchors for a full feature map by
shifting and tiling these tensors (see `meth:_grid_anchors`).
Args:
sizes (tuple[float]):
aspect_ratios (tuple[float]]):
Returns:
Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes
in XYXY format.
"""
# This is different from the anchor generator defined in the original Faster R-CNN
# code or Detectron. They yield the same AP, however the old version defines cell
# anchors in a less natural way with a shift relative to the feature grid and
# quantization that results in slightly different sizes for different aspect ratios.
# See also https://github.com/facebookresearch/Detectron/issues/227
anchors = []
for size in sizes:
area = size ** 2.0
for aspect_ratio in aspect_ratios:
# s * s = w * h
# a = h / w
# ... some algebra ...
# w = sqrt(s * s / a)
# h = a * w
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
anchors.append([x0, y0, x1, y1])
return torch.tensor(anchors)
def forward(self, features: List[torch.Tensor]):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[Boxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [Boxes(x) for x in anchors_over_all_feature_maps]
@ANCHOR_GENERATOR_REGISTRY.register()
class RotatedAnchorGenerator(nn.Module):
"""
Compute rotated anchors used by Rotated RPN (RRPN), described in
"Arbitrary-Oriented Scene Text Detection via Rotation Proposals".
"""
box_dim: int = 5
"""
the dimension of each anchor box.
"""
@configurable
def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5):
"""
This interface is experimental.
Args:
sizes (list[list[float]] or list[float]):
If sizes is list[list[float]], sizes[i] is the list of anchor sizes
(i.e. sqrt of anchor area) to use for the i-th feature map.
If sizes is list[float], the sizes are used for all feature maps.
Anchor sizes are given in absolute lengths in units of
the input image; they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
strides (list[int]): stride of each input feature.
angles (list[list[float]] or list[float]): list of angles (in degrees CCW)
to use for anchors. Same "broadcast" rule for `sizes` applies.
offset (float): Relative offset between the center of the first anchor and the top-left
corner of the image. Value has to be in [0, 1).
Recommend to use 0.5, which means half stride.
"""
super().__init__()
self.strides = strides
self.num_features = len(self.strides)
sizes = _broadcast_params(sizes, self.num_features, "sizes")
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
angles = _broadcast_params(angles, self.num_features, "angles")
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles)
self.offset = offset
assert 0.0 <= self.offset < 1.0, self.offset
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
return {
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
"strides": [x.stride for x in input_shape],
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
"angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES,
}
def _calculate_anchors(self, sizes, aspect_ratios, angles):
cell_anchors = [
self.generate_cell_anchors(size, aspect_ratio, angle).float()
for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles)
]
return BufferList(cell_anchors)
@property
def num_cell_anchors(self):
"""
Alias of `num_anchors`.
"""
return self.num_anchors
@property
def num_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel
location, on that feature map.
For example, if at every pixel we use anchors of 3 aspect
ratios, 2 sizes and 5 angles, the number of anchors is 30.
(See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS
and ANCHOR_GENERATOR.ANGLES in config)
In standard RRPN models, `num_anchors` on every feature map is the same.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def _grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors):
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
zeros = torch.zeros_like(shift_x)
shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1)
anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5))
return anchors
def generate_cell_anchors(
self,
sizes=(32, 64, 128, 256, 512),
aspect_ratios=(0.5, 1, 2),
angles=(-90, -60, -30, 0, 30, 60, 90),
):
"""
Generate a tensor storing canonical anchor boxes, which are all anchor
boxes of different sizes, aspect_ratios, angles centered at (0, 0).
We can later build the set of anchors for a full feature map by
shifting and tiling these tensors (see `meth:_grid_anchors`).
Args:
sizes (tuple[float]):
aspect_ratios (tuple[float]]):
angles (tuple[float]]):
Returns:
Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5)
storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format.
"""
anchors = []
for size in sizes:
area = size ** 2.0
for aspect_ratio in aspect_ratios:
# s * s = w * h
# a = h / w
# ... some algebra ...
# w = sqrt(s * s / a)
# h = a * w
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
anchors.extend([0, 0, w, h, a] for a in angles)
return torch.tensor(anchors)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
def build_anchor_generator(cfg, input_shape):
"""
Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.
"""
anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME
return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)
|
segmentation/utils/test_utils.py | wudongyuan/PanopticCenter | 506 | 11072539 | <filename>segmentation/utils/test_utils.py
# ------------------------------------------------------------------------------
# Utility functions for multi-scale testing.
# Written by Pingjun (https://github.com/bowenc0221/panoptic-deeplab/issues/25)
# Modified by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
import cv2
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
import segmentation.data.transforms.transforms as T
def flip_tensor(x, dim):
"""
Flip Tensor along a dimension
"""
dim = x.dim() + dim if dim < 0 else dim
return x[tuple(slice(None, None) if i != dim
else torch.arange(x.size(i) - 1, -1, -1).long()
for i in range(x.dim()))]
def upsample_predictions(pred, input_shape,scale):
# Override upsample method to correctly handle `offset`
result = OrderedDict()
for key in pred.keys():
out = F.interpolate(pred[key], size=input_shape, mode='bilinear', align_corners=True)
if 'offset' in key: #The order of second dim is (offset_y, offset_x)
out *= 1.0 / scale
result[key] = out
return result
def multi_scale_inference(config, model, raw_image, device):
scales = config.TEST.SCALE_LIST
flip = config.TEST.FLIP_TEST
output_stride = 2 ** (5 - sum(config.MODEL.BACKBONE.DILATION))
train_crop_h, train_crop_w = config.TEST.CROP_SIZE
scale = 1. / output_stride
pool_h = int((float(train_crop_h) - 1.0) * scale + 1.0)
pool_w = int((float(train_crop_w) - 1.0) * scale + 1.0)
# transforms
transforms = T.Compose(
[
T.ToTensor(),
T.Normalize(config.DATASET.MEAN, config.DATASET.STD)
]
)
if flip:
flip_range = 2
else:
flip_range = 1
h,w,_ = raw_image.shape
org_h_pad = (h + 31) // 32 * 32 + 1
org_w_pad = (w + 31) // 32 * 32 + 1
sum_semantic_with_flip = 0
sum_center_with_flip = 0
sum_offset_with_flip = 0
for i in range(len(scales)):
image = raw_image
scale = scales[i]
raw_h = int(h * scale)
raw_w = int(w * scale)
image = cv2.resize(image, (raw_w, raw_h), interpolation=cv2.INTER_LINEAR).astype(np.int32)
# pad image
new_h = (raw_h + 31) // 32 * 32 + 1
new_w = (raw_w + 31) // 32 * 32 + 1
input_image = np.zeros((new_h, new_w, 3), dtype=np.uint8)
input_image[:, :] = config.DATASET.MEAN
input_image[:raw_h, :raw_w, :] = image
image, _ = transforms(input_image, None)
image = image.unsqueeze(0).to(device)
if new_h < train_crop_h or new_w < train_crop_w:
model.set_image_pooling(None)
else:
model.set_image_pooling((pool_h, pool_w))
model = model.to(device)
for flip in range(flip_range):
if flip:
image = flip_tensor(image, 3)
out_dict = model(image)
for key in out_dict.keys(): # return to raw_input shape
out_dict[key] = out_dict[key][:, :, : raw_h, : raw_w]
if raw_h != org_h_pad or raw_w != org_w_pad:
out_dict = upsample_predictions(out_dict, (org_h_pad, org_w_pad), scale)
# average softmax or logit?
semantic_pred = F.softmax(out_dict['semantic'],dim=1)
center_pred = out_dict['center']
offset_pred = out_dict['offset']
if flip:
semantic_pred = flip_tensor(semantic_pred,3)
center_pred = flip_tensor(center_pred,3)
offset_pred = flip_tensor(offset_pred,3)
offset_pred[:, 1, :, :] *= (-1)
sum_semantic_with_flip += semantic_pred
sum_center_with_flip += center_pred
sum_offset_with_flip += offset_pred
semantic_mean = sum_semantic_with_flip / (flip_range * len(scales))
center_mean = sum_center_with_flip / (flip_range * len(scales))
offset_mean = sum_offset_with_flip / (flip_range * len(scales))
out_dict['semantic'] = semantic_mean
out_dict['center'] = center_mean
out_dict['offset'] = offset_mean
return out_dict |
pypower/uopf.py | Bengt/PYPOWER | 221 | 11072571 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Solves combined unit decommitment / optimal power flow.
"""
from time import time
from copy import deepcopy
from numpy import flatnonzero as find
from pypower.opf_args import opf_args2
from pypower.ppoption import ppoption
from pypower.isload import isload
from pypower.totcost import totcost
from pypower.fairmax import fairmax
from pypower.opf import opf
from pypower.idx_bus import PD
from pypower.idx_gen import GEN_STATUS, PG, QG, PMIN, MU_PMIN
def uopf(*args):
"""Solves combined unit decommitment / optimal power flow.
Solves a combined unit decommitment and optimal power flow for a single
time period. Uses an algorithm similar to dynamic programming. It proceeds
through a sequence of stages, where stage C{N} has C{N} generators shut
down, starting with C{N=0}. In each stage, it forms a list of candidates
(gens at their C{Pmin} limits) and computes the cost with each one of them
shut down. It selects the least cost case as the starting point for the
next stage, continuing until there are no more candidates to be shut down
or no more improvement can be gained by shutting something down.
If C{verbose} in ppopt (see L{ppoption} is C{true}, it prints progress
info, if it is > 1 it prints the output of each individual opf.
@see: L{opf}, L{runuopf}
@author: <NAME> (PSERC Cornell)
"""
##----- initialization -----
t0 = time() ## start timer
## process input arguments
ppc, ppopt = opf_args2(*args)
## options
verbose = ppopt["VERBOSE"]
if verbose: ## turn down verbosity one level for calls to opf
ppopt = ppoption(ppopt, VERBOSE=verbose - 1)
##----- do combined unit commitment/optimal power flow -----
## check for sum(Pmin) > total load, decommit as necessary
on = find( (ppc["gen"][:, GEN_STATUS] > 0) & ~isload(ppc["gen"]) ) ## gens in service
onld = find( (ppc["gen"][:, GEN_STATUS] > 0) & isload(ppc["gen"]) ) ## disp loads in serv
load_capacity = sum(ppc["bus"][:, PD]) - sum(ppc["gen"][onld, PMIN]) ## total load capacity
Pmin = ppc["gen"][on, PMIN]
while sum(Pmin) > load_capacity:
## shut down most expensive unit
avgPmincost = totcost(ppc["gencost"][on, :], Pmin) / Pmin
_, i = fairmax(avgPmincost) ## pick one with max avg cost at Pmin
i = on[i] ## convert to generator index
if verbose:
print('Shutting down generator %d so all Pmin limits can be satisfied.\n' % i)
## set generation to zero
ppc["gen"][i, [PG, QG, GEN_STATUS]] = 0
## update minimum gen capacity
on = find( (ppc["gen"][:, GEN_STATUS] > 0) & ~isload(ppc["gen"]) ) ## gens in service
Pmin = ppc["gen"][on, PMIN]
## run initial opf
results = opf(ppc, ppopt)
## best case so far
results1 = deepcopy(results)
## best case for this stage (ie. with n gens shut down, n=0,1,2 ...)
results0 = deepcopy(results1)
ppc["bus"] = results0["bus"].copy() ## use these V as starting point for OPF
while True:
## get candidates for shutdown
candidates = find((results0["gen"][:, MU_PMIN] > 0) & (results0["gen"][:, PMIN] > 0))
if len(candidates) == 0:
break
## do not check for further decommitment unless we
## see something better during this stage
done = True
for k in candidates:
## start with best for this stage
ppc["gen"] = results0["gen"].copy()
## shut down gen k
ppc["gen"][k, [PG, QG, GEN_STATUS]] = 0
## run opf
results = opf(ppc, ppopt)
## something better?
if results['success'] and (results["f"] < results1["f"]):
results1 = deepcopy(results)
k1 = k
done = False ## make sure we check for further decommitment
if done:
## decommits at this stage did not help, so let's quit
break
else:
## shutting something else down helps, so let's keep going
if verbose:
print('Shutting down generator %d.\n' % k1)
results0 = deepcopy(results1)
ppc["bus"] = results0["bus"].copy() ## use these V as starting point for OPF
## compute elapsed time
et = time() - t0
## finish preparing output
results0['et'] = et
return results0
|
flex/loading/common/single_header/collection_format.py | maroux/flex | 160 | 11072621 | from flex.constants import (
STRING,
CSV,
COLLECTION_FORMATS,
)
from flex.validation.common import (
generate_object_validator,
)
collection_format_schema = {
'type': STRING,
'default': CSV,
'enum': COLLECTION_FORMATS,
}
collection_format_validator = generate_object_validator(
schema=collection_format_schema,
)
|
env/lib/python3.8/site-packages/numpy/testing/print_coercion_tables.py | acrucetta/Chicago_COVI_WebApp | 1,738 | 11072625 | <filename>env/lib/python3.8/site-packages/numpy/testing/print_coercion_tables.py
#!/usr/bin/env python
"""Prints type-coercion tables for the built-in NumPy types
"""
from __future__ import division, absolute_import, print_function
import numpy as np
# Generic object that can be added, but doesn't do anything else
class GenericObject(object):
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O')
def print_cancast_table(ntypes):
print('X', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
print(row, end=' ')
for col in ntypes:
print(int(np.can_cast(row, col)), end=' ')
print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
print('+', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = np.obj2sctype(row)
print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
else:
coltype = np.obj2sctype(col)
try:
if firstarray:
rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
else:
rowvalue = rowtype(inputfirstvalue)
colvalue = coltype(inputsecondvalue)
if use_promote_types:
char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
else:
value = np.add(rowvalue, colvalue)
if isinstance(value, np.ndarray):
char = value.dtype.char
else:
char = np.dtype(type(value)).char
except ValueError:
char = '!'
except OverflowError:
char = '@'
except TypeError:
char = '#'
print(char, end=' ')
print()
if __name__ == '__main__':
print("can cast")
print_cancast_table(np.typecodes['All'])
print()
print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
print()
print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
print()
print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
print()
print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
print()
print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
print()
print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
|
agate/table/line_chart.py | andriyor/agate | 663 | 11072647 | <reponame>andriyor/agate
#!/usr/bin/env python
# pylint: disable=W0212
import leather
def line_chart(self, x=0, y=1, path=None, width=None, height=None):
"""
Render a line chart using :class:`leather.Chart`.
:param x:
The name or index of a column to plot as the x-axis. Defaults to the
first column in the table.
:param y:
The name or index of a column to plot as the y-axis. Defaults to the
second column in the table.
:param path:
If specified, the resulting SVG will be saved to this location. If
:code:`None` and running in IPython, then the SVG will be rendered
inline. Otherwise, the SVG data will be returned as a string.
:param width:
The width of the output SVG.
:param height:
The height of the output SVG.
"""
if type(x) is int:
x_name = self.column_names[x]
else:
x_name = x
if type(y) is int:
y_name = self.column_names[y]
else:
y_name = y
chart = leather.Chart()
chart.add_x_axis(name=x_name)
chart.add_y_axis(name=y_name)
chart.add_line(self, x=x, y=y)
return chart.to_svg(path=path, width=width, height=height)
|
test/integration/samples_in/simple_percent_comment.py | Inveracity/flynt | 487 | 11072648 | <filename>test/integration/samples_in/simple_percent_comment.py
var = 5
a = "Test: %s" % var # cool formatting!
|
v2.9.7/libxml2-2.9.7/python/tests/xpath.py | yapingxin/libxml2 | 11,356 | 11072685 | <filename>v2.9.7/libxml2-2.9.7/python/tests/xpath.py
#!/usr/bin/python -u
#
# this test exercise the XPath basic engine, parser, etc, and
# allows to detect memory leaks
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
doc = libxml2.parseFile("tst.xml")
if doc.name != "tst.xml":
print("doc.name error")
sys.exit(1);
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print("xpath query: wrong node set size")
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print("xpath query: wrong node set value")
sys.exit(1)
ctxt.setContextNode(res[0])
res = ctxt.xpathEval("foo")
if len(res) != 1:
print("xpath query: wrong node set size")
sys.exit(1)
if res[0].name != "foo":
print("xpath query: wrong node set value")
sys.exit(1)
doc.freeDoc()
ctxt.xpathFreeContext()
i = 1000
while i > 0:
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
doc.freeDoc()
ctxt.xpathFreeContext()
i = i -1
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
benchmark/python/sparse/updater.py | Vikas-kum/incubator-mxnet | 228 | 11072698 | <filename>benchmark/python/sparse/updater.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import mxnet as mx
from mxnet.ndarray.sparse import adam_update
import numpy as np
import argparse
mx.random.seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser(description='Benchmark adam updater')
parser.add_argument('--dim-in', type=int, default=240000, help='weight.shape[0]')
parser.add_argument('--dim-out', type=int, default=512, help='weight.shape[1]')
parser.add_argument('--nnr', type=int, default=5000, help='grad.indices.shape[0]')
parser.add_argument('--repeat', type=int, default=1000, help='num repeat')
parser.add_argument('--dense-grad', action='store_true',
help='if set to true, both gradient and weight are dense.')
parser.add_argument('--dense-state', action='store_true',
help='if set to true, states are dense, indicating standard update')
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
dim_in = args.dim_in
dim_out = args.dim_out
nnr = args.nnr
ctx = mx.cpu() if args.cpu else mx.gpu()
ones = mx.nd.ones((dim_in, dim_out), ctx=ctx)
if not args.dense_grad:
weight = ones.tostype('row_sparse')
indices = np.arange(dim_in)
np.random.shuffle(indices)
indices = np.unique(indices[:nnr])
indices = mx.nd.array(indices, ctx=ctx)
grad = mx.nd.sparse.retain(weight, indices)
else:
weight = ones.copy()
grad = ones.copy()
if args.dense_state:
mean = ones.copy()
else:
mean = ones.tostype('row_sparse')
var = mean.copy()
# warmup
for i in range(10):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
# measure speed
a = time.time()
for i in range(args.repeat):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
b = time.time()
print(b - a)
|
examples/shadertoy.py | szabolcsdombi/zengl | 116 | 11072712 | import struct
import zengl
from window import Window
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm')
uniform_buffer = ctx.buffer(size=64)
# Tested with:
# Happy Jumping - https://www.shadertoy.com/view/3lsSzf
# Raymarching - Primitives - https://www.shadertoy.com/view/Xds3zN
# GLSL ray tracing test - https://www.shadertoy.com/view/3sc3z4
# Ray Marching: Part 6 - https://www.shadertoy.com/view/4tcGDr
# Seascape - https://www.shadertoy.com/view/Ms2SD1
# Mandelbulb - https://www.shadertoy.com/view/MdXSWn
# Paste your code below
shadertoy = '''
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
// Time varying pixel color
vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
// Output to screen
fragColor = vec4(col,1.0);
}
'''
ctx.includes['shadertoy'] = shadertoy
ctx.includes['uniforms'] = '''
layout (std140) uniform Uniforms {
vec3 iResolution;
float iTime;
float iTimeDelta;
int iFrame;
vec4 iMouse;
vec4 iDate;
};
'''
canvas = ctx.pipeline(
vertex_shader='''
#version 330
vec2 positions[3] = vec2[](
vec2(-1.0, -1.0),
vec2(3.0, -1.0),
vec2(-1.0, 3.0)
);
void main() {
gl_Position = vec4(positions[gl_VertexID], 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
#include "uniforms"
#include "shadertoy"
layout (location = 0) out vec4 shader_color_output;
void main() {
mainImage(shader_color_output, gl_FragCoord.xy);
}
''',
layout=[
{
'name': 'Uniforms',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
],
framebuffer=[image],
topology='triangles',
vertex_count=3,
)
ubo = struct.Struct('=3f1f1f1i8x4f4f')
last_time = window.time
frame = 0
while window.update():
image.clear()
uniform_buffer.write(ubo.pack(
window.width, window.height, 0.0,
window.time,
window.time - last_time,
frame,
window.mouse[0], window.mouse[1], 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
))
canvas.render()
image.blit()
frame += 1
|
Sketchbots/sw/labqueue/support/handlers.py | rlugojr/ChromeWebLab | 306 | 11072785 | # Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpful classes for Webapp2 handlers
"""
from lask.server.rpc import *
import webapp2
from google.appengine.ext import db
from lask.core.model import *
from lask.core.exception import *
try: import simplejson as json
except ImportError: import json
from support.modeling import SimplejsonModelRegistry
from lask.server import rpc
import inspect
import re
import datetime
import logging
import config
import sys, traceback
import jinja2
import os
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(config.HELP_TEMPLATES_PATH))
class JSONResponseHandler(webapp2.RequestHandler):
"""A webapp2-style request handler which can represent responses as JSON-encoded strings
in the response body.
"""
__init_time = None
cors_allow_patterns = None
_is_authorized_request = False
def __init__(self, *args, **kwargs):
super(JSONResponseHandler, self).__init__(*args, **kwargs)
self.__init_time = datetime.datetime.now()
def add_cors_response_headers(self):
""" Adds appropriate CORS response headers, if needed by the request.
"""
# check if the request came from a CORS-allowed origin
if self.cors_allow_patterns is not None:
if ('*' in self.cors_allow_patterns) or (r"*" in self.cors_allow_patterns) or (u'*' in self.cors_allow_patterns):
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
else:
# # per http://www.w3.org/TR/cors/#access-control-allow-origin-response-header
# # check if the request Origin header matches anything in cors_allow_patterns
# # and if so, echo it back to the client
# origin = self.request.headers.get('Origin')
# if origin is not None:
# for exp in self.cors_allow_patterns:
# if re.match(exp, origin):
# self.response.headers.add_header('Access-Control-Allow-Origin', origin)
# break
pass
self.response.headers.add_header('Access-Control-Allow-Headers', 'Accept, Origin, Content-Type, Depth, User-Agent, Cache-Control, X-Requested-With, X-Request, Authorization');
def respond(self, obj, handler_name=None):
"""Will put obj in a suitable wrapper and then put the JSON-encoded string
representation of that in the response. The response will be of type text/plain.
"""
self.add_cors_response_headers()
if isinstance(obj, Exception):
# if the exception is HTTP-compatible it may specify a status code
if hasattr(obj, 'HTTP_status_code') and obj.HTTP_status_code:
status_int = obj.HTTP_status_code
else:
status_int = 500
# if the exception is HTTP-compatible it may specify some response headers (e.g. Allow for 405 errors)
if hasattr(obj, 'HTTP_response_headers') and obj.HTTP_response_headers:
for h in obj.HTTP_response_headers:
self.response.headers.add_header(h, obj.HTTP_response_headers[h])
if hasattr(obj, 'HTTP_message') and obj.HTTP_message:
status_message = obj.HTTP_message
else:
status_message = '(%s) %s %s' % (obj.__class__.__name__, HTTPException.get_default_message_for_status(status_int), obj)
status_type = 'error'
status_is_error = True
# attempt to generate a human-readable traceback, but don't raise
# if there is a problem doing so
# payload = 'exception'
payload = {'exception': {
'traceback': None,
}}
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
# exctype, tb = sys.exc_info()[:2]
tb = traceback.format_exception(exc_type, exc_value, exc_traceback)
payload['exception']['traceback'] = tb
except:
pass
self.error(status_int)
extra_status_info = {
'request': {
'url': self.request.url,
'remote_addr': self.request.remote_addr,
# 'body': self.request.body, # do not include body; if the user uploaded a binary file, including that here will cause an error
'arguments': {},
'headers': {},
'query_string': self.request.query_string,
}
}
for h in self.request.headers:
# print h
extra_status_info['request']['headers'][h] = self.request.headers[h]
args = self.request.params
# logging.info(self.request.params)
for k in args:
try:
extra_status_info['request']['arguments'][k] = str(args[k])
except UnicodeEncodeError:
extra_status_info['request']['arguments'][k] = '(value contains binary)'
# log this exception as well
if status_int == 404 or isinstance(obj, WrongTaskStateError): # and str(self.request.path).startswith('/api/tags/'):
# treat tag 404's as warnings
logging.warn(status_message)
else:
if 'GUID' in args:
logging.info('worker_guid=%s (GUID)' % args['GUID'])
elif 'worker_guid' in args:
logging.info('worker_guid=%s (worker_guid)' % args['worker_guid'])
elif 'created_by_guid' in args:
logging.info('worker_guid=%s (created_by_guid)' % args['created_by_guid'])
elif 'assignee_guid' in args:
logging.info('worker_guid=%s (assignee_guid)' % args['assignee_guid'])
logging.exception(status_message)
else:
status_int = 200
status_message = 'OK'
status_type = 'ok'
status_is_error = False
payload = obj
extra_status_info = None
self.response.status_int = status_int
self.response.status_message = status_message
self.response.status = '%i %s' % (status_int, status_message)
# self.response.headers['Content-Type'] = 'text/plain'
if handler_name is None:
handler_name = self.__class__.__name__
# set the content type appropriately
if (self.request.get('_rtype', None) == 'text') or ('accept' in self.request.headers and ('*/*' not in self.request.headers['accept']) and ('application/json' not in self.request.headers['accept'])):
self.response.content_type = 'text/plain'
human_readable = True
else:
self.response.content_type = 'application/json'
human_readable = False
# this is the general form of the object which wraps the result:
response_obj = {
'status': {
'code': status_int,
'message': status_message,
'type': status_type,
'is_error': status_is_error,
'handler': handler_name,
'extra_info': extra_status_info,
},
'result': payload
}
response_obj['status']['pre_output_proc_time_sec'] = (datetime.datetime.now() - self.__init_time).total_seconds()
if not self._is_authorized_request:
if config.ALLOW_UNAUTHENTICATED_USE_WITH_WARNING:
response_obj['status']['auth_warning'] = '****** WARNING: This request would be DENIED by the production server due to an invalid or missing signature. The request must include a valid signature in the Authorization header or _auth request parameter. ******'
logging.warn('****** This request would be DENIED by the production server due to an invalid or missing signature. The request must include a valid signature in the Authorization header or _auth request parameter. ******')
# else:
# response_obj['status']['auth_warning'] = '****** WARNING: This request would normally be DENIED due to an invalid or missing signature. It is being allowed because you are an authorized developer. The request must include a valid signature in the Authorization header or _auth request parameter. ******'
# logging.warn('****** This request would normally be DENIED due to an invalid or missing signature. It is being allowed because you are an authorized developer. The request must include a valid signature in the Authorization header or _auth request parameter. ******')
j = json.dumps(response_obj,
default=SimplejsonModelRegistry.default,
indent=human_readable) # setting human_readable to True will cause output to be indented with line breaks
self.response.out.write(j)
# and finally, if the object is an Exception raise it so that
# we can actually fix the problem when debugging
# if isinstance(obj, Exception):
# raise obj
class JSONResponseRPCHandler(JSONResponseHandler):
_raise_exceptions = False
_enable_help = False
_v_cahced_target = None
def get_allowed_methods_list(self, *args, **kwargs):
L = []
L.append('OPTIONS')
if (self.get_GET_method(*args, **kwargs) is not None):
L.append('GET')
if (self.get_POST_method(*args, **kwargs) is not None):
L.append('POST')
return L
def target(self):
raise Exception('The %s handler must override the target() method!' % (self.__class__.__name__))
def cached_target(self, *args, **kwargs):
if self._v_cahced_target is None:
self._v_cahced_target = self.target(*args, **kwargs)
return self._v_cahced_target
def auth_check(self):
""" Do an authorization check on the current request.
Will set self._is_authorized_request to True and then
return True if the request can proceed, or False if not.
If config.ALLOW_UNAUTHENTICATED_USE_WITH_WARNING is enabled then this
method should always return True. If it would have returned
False under normal cirumcstances then it will return True and
set self._is_authorized_request to False.
"""
if self._is_authorized_request:
return True
if self.is_cron_handler():
# always let cron handlers run
self._is_authorized_request = True
return True
# cron handlers pass if the current user is an admin
# user = users.get_current_user()
# if user and users.is_current_user_admin():
# self._is_authorized_request = True
# return True
auth, key_used, expected_signature, candidate_signature = AppCredentials.authenticate_request(self.request, UserCredentials.get_current())
if not auth:
self._is_authorized_request = False
# cannot authenticate the request
user_cred = UserCredentials.get_current()
if user_cred is not None and user_cred.authorized_user and user_cred.app_key is not None:
return True
elif not config.ALLOW_UNAUTHENTICATED_USE_WITH_WARNING:
try:
msg = 'Unauthorized (%s, %s, %s)' % (key_used, expected_signature, candidate_signature)
except:
msg = 'Unauthorized (could not determine key)'
self.respond(HTTPException(401, msg), self.__class__.__name__)
else:
self._is_authorized_request = True
return config.ALLOW_UNAUTHENTICATED_USE_WITH_WARNING or self._is_authorized_request
def options(self, *args, **kwargs):
""" ENTRY POINT for OPTIONS requests.
"""
logging.info('JSONResponseRPCHandler.options()')
self.add_cors_response_headers()
# add headers for the HTTP methods supported
self.response.headers.add_header('Access-Control-Allow-Methods', ', '.join(self.get_allowed_methods_list()))
self.response.headers.add_header('Allow', ', '.join(self.get_allowed_methods_list(*args, **kwargs)))
self.response.status_int = 200
# self.response.out.write('OK')
def get(self, *args, **kwargs):
""" ENTRY POINT for GET requests.
"""
self.add_cors_response_headers()
# check authorization!
if not self.auth_check():
return
# if help is enabled and needed, provide it and stop
method_name = self.get_GET_method(*args,**kwargs)
target_exception = None
t = None
try:
t = self.cached_target(*args,**kwargs)
except Exception as ex:
target_exception = ex # deal with the exception later
#
# Help is handled on GET requests (even if the help is about the POST method)
#
if self._enable_help and self.provided_help(t, method_name, t, self.get_POST_method(*args,**kwargs), target_exception):
return
if target_exception is not None:
return self.respond(target_exception, self.__class__.__name__)
elif hasattr(self, 'special_get'):
return self.special_get(*args, **kwargs)
elif method_name is not None:
# perform the request
return self.respond_basic_rpc(t, method_name)
else:
# no python method mapped to this HTTP method
return self.respond(HTTPException(405, HTTP_response_headers={ 'Allow': ', '.join(self.get_allowed_methods_list(*args, **kwargs)) }), self.__class__.__name__)
def post(self, *args, **kwargs):
""" ENTRY POINT for POST requests.
"""
self.add_cors_response_headers()
# check authorization!
if not self.auth_check():
return
method_name = self.get_POST_method(*args, **kwargs)
target_exception = None
t = None
try:
t = self.cached_target(*args,**kwargs)
except Exception as ex:
target_exception = ex # deal with the exception later
if target_exception is not None:
return self.respond(target_exception, self.__class__.__name__)
elif hasattr(self, 'special_post'):
return self.special_post(*args, **kwargs)
elif method_name is not None:
# perform the request
return self.respond_basic_rpc(t, method_name)
else:
# no python method mapped to this HTTP method
return self.respond(HTTPException(405, HTTP_response_headers={ 'Allow': ', '.join(self.get_allowed_methods_list(*args, **kwargs)) }), self.__class__.__name__)
def is_cron_handler(self):
if hasattr(self, '_is_cron_handler') and self._is_cron_handler:
return True
else:
return False
def get_GET_method(self, *args, **kwargs):
return self._GET_method
def get_POST_method(self, *args, **kwargs):
return self._POST_method
def respond_basic_rpc(self, target, m):
"""Perform the simplest bype of HTTP to RPC mapping
"""
if target is None:
return self.respond(HTTPException(404), self.__class__.__name__)
# return self.quick_error(404) # respond with HTTP-style error
# return self.respond(None, self.__class__.__name__)
# see if the request was sent with JSON-encoding on paramter values
# if self.request.get('_input_style', 'JSON') == 'JSON':
# j = True
# else:
# j = False
# try the RPC method and then display a nice error message in the
# response if there was a problem
try:
self.respond(
rpc.LaskRPC.request(
target,
m,
self.request.params,
[m],
JSONin=True,
m_name_for_messages=self.request.method,
target_name_for_messages=self.request.path,
),
self.__class__.__name__)
except Exception as ex:
self.respond(ex, self.__class__.__name__)
if self._raise_exceptions:
raise
def provided_help(self, get_target, get_method, post_target, post_method, target_exception):
"""If needed, provides help to the caller. Returns True if help was provided, otherwise False.
"""
method = None
target = None
h = self.request.params.get('HELP')
if h is None:
return False # no help needed
elif h == 'GET':
method_name = get_method
target = get_target
elif h == 'POST':
method_name = post_method
target = post_target
else:
method_name = None
url = self.request.path
if target is None or method_name is None:
args = []
method_docs = ''
else:
c = rpc.Console(target, method_name)
args = enumerate(c.get_mehod_args())
method_docs = c.get_html_docs()
test_app_key = None
test_app_secret = None
user_cred = UserCredentials.get_current()
if user_cred is not None:
test_app_key = user_cred.app_key
app_cred = AppCredentials.get_for_app_key(test_app_key)
if app_cred is not None:
test_app_secret = app_cred.secret
template = jinja_environment.get_template('object_method.html')
self.response.out.write(template.render({
'app_key': test_app_key,
'app_secret': test_app_secret,
'target': target,
'target_exception': target_exception,
'target_url':url,
'method_docs': method_docs,
'method_name': h,
'method_args_enum': args
}))
return True
class HTTPException(Exception):
"""A simple class used to represent various basic HTTP error responses
"""
HTTP_status_code = 500
HTTP_message = None
HTTP_response_headers = None
@classmethod
def get_default_message_for_status(cls, HTTP_status_code):
HTTP_message = None
if HTTP_status_code == 404:
HTTP_message = 'The requested object was not found.'
elif HTTP_status_code == 405:
HTTP_message = 'The requested object exists, but does not respond to the requested method. See the Allow header in this response for a list of allowed methods.'
elif HTTP_status_code == 400:
HTTP_message = 'There was an error in your request, such as a missing or invalid parameter.'
elif HTTP_status_code == 500:
HTTP_message = 'There was an error trying to fulfil your request.'
return HTTP_message
def __init__(self, HTTP_status_code, HTTP_message=None, HTTP_response_headers=None):
self.HTTP_status_code = HTTP_status_code
if HTTP_message is None:
HTTP_message = HTTPException.get_default_message_for_status(HTTP_status_code)
self.HTTP_message = HTTP_message
self.HTTP_response_headers = HTTP_response_headers
|
Projects/robosat-master/robosat/tools/train.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 3,266 | 11072793 | <reponame>DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
import os
import sys
import argparse
import collections
from PIL import Image
import torch
import torch.backends.cudnn
from torch.nn import DataParallel
from torch.optim import SGD
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
from tqdm import tqdm
from robosat.transforms import MaskToTensor, ConvertImageMode, ImageToTensor
from robosat.datasets import SlippyMapTilesConcatenation
from robosat.metrics import MeanIoU
from robosat.losses import CrossEntropyLoss2d
from robosat.unet import UNet
from robosat.utils import plot
from robosat.config import load_config
def add_parser(subparser):
parser = subparser.add_parser('train', help='trains model on dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', type=str, required=True, help='path to model configuration file')
parser.add_argument('--dataset', type=str, required=True, help='path to dataset configuration file')
parser.add_argument('--resume', type=str, required=False, help='checkpoint to resume training from')
parser.set_defaults(func=main)
def main(args):
model = load_config(args.model)
dataset = load_config(args.dataset)
device = torch.device('cuda' if model['common']['cuda'] else 'cpu')
if model['common']['cuda'] and not torch.cuda.is_available():
sys.exit('Error: CUDA requested but not available')
# if args.batch_size < 2:
# sys.exit('Error: PSPNet requires more than one image for BatchNorm in Pyramid Pooling')
os.makedirs(model['common']['checkpoint'], exist_ok=True)
num_classes = len(dataset['common']['classes'])
net = UNet(num_classes).to(device)
if args.resume:
path = os.path.join(model['common']['checkpoint'], args.resume)
cuda = model['common']['cuda']
def map_location(storage, _):
return storage.cuda() if cuda else storage.cpu()
chkpt = torch.load(path, map_location=map_location)
net.load_state_dict(chkpt)
resume_at_epoch = int(args.resume[11:16])
else:
resume_at_epoch = 0
if model['common']['cuda']:
torch.backends.cudnn.benchmark = True
net = DataParallel(net)
optimizer = SGD(net.parameters(), lr=model['opt']['lr'], momentum=model['opt']['momentum'])
scheduler = MultiStepLR(optimizer, milestones=model['opt']['milestones'], gamma=model['opt']['gamma'])
weight = torch.Tensor(dataset['weights']['values'])
for i in range(resume_at_epoch):
scheduler.step()
criterion = CrossEntropyLoss2d(weight=weight).to(device)
# criterion = FocalLoss2d(weight=weight).to(device)
train_loader, val_loader = get_dataset_loaders(model, dataset)
num_epochs = model['opt']['epochs']
history = collections.defaultdict(list)
for epoch in range(resume_at_epoch, num_epochs):
print('Epoch: {}/{}'.format(epoch + 1, num_epochs))
train_hist = train(train_loader, num_classes, device, net, optimizer, scheduler, criterion)
print('Train loss: {:.4f}, mean IoU: {:.4f}'.format(train_hist['loss'], train_hist['iou']))
for k, v in train_hist.items():
history['train ' + k].append(v)
val_hist = validate(val_loader, num_classes, device, net, criterion)
print('Validate loss: {:.4f}, mean IoU: {:.4f}'.format(val_hist['loss'], val_hist['iou']))
for k, v in val_hist.items():
history['val ' + k].append(v)
visual = 'history-{:05d}-of-{:05d}.png'.format(epoch + 1, num_epochs)
plot(os.path.join(model['common']['checkpoint'], visual), history)
checkpoint = 'checkpoint-{:05d}-of-{:05d}.pth'.format(epoch + 1, num_epochs)
torch.save(net.state_dict(), os.path.join(model['common']['checkpoint'], checkpoint))
def train(loader, num_classes, device, net, optimizer, scheduler, criterion):
num_samples = 0
running_loss = 0
iou = MeanIoU(range(num_classes))
net.train()
scheduler.step()
for images, masks, tiles in tqdm(loader, desc='Train', unit='batch', ascii=True):
images = images.to(device)
masks = masks.to(device)
assert images.size()[2:] == masks.size()[1:], 'resolutions for images and masks are in sync'
num_samples += int(images.size(0))
optimizer.zero_grad()
outputs = net(images)
assert outputs.size()[2:] == masks.size()[1:], 'resolutions for predictions and masks are in sync'
assert outputs.size()[1] == num_classes, 'classes for predictions and dataset are in sync'
loss = criterion(outputs, masks)
loss.backward()
optimizer.step()
running_loss += loss.item()
for mask, output in zip(masks, outputs):
mask = mask.data.cpu().numpy()
prediction = output.data.max(0)[1].cpu().numpy()
iou.add(mask.ravel(), prediction.ravel())
assert num_samples > 0, 'dataset contains training images and labels'
return {'loss': running_loss / num_samples, 'iou': iou.get()}
def validate(loader, num_classes, device, net, criterion):
num_samples = 0
running_loss = 0
iou = MeanIoU(range(num_classes))
net.eval()
for images, masks, tiles in tqdm(loader, desc='Validate', unit='batch', ascii=True):
images = images.to(device)
masks = masks.to(device)
assert images.size()[2:] == masks.size()[1:], 'resolutions for images and masks are in sync'
num_samples += int(images.size(0))
outputs = net(images)
assert outputs.size()[2:] == masks.size()[1:], 'resolutions for predictions and masks are in sync'
assert outputs.size()[1] == num_classes, 'classes for predictions and dataset are in sync'
loss = criterion(outputs, masks)
running_loss += loss.item()
for mask, output in zip(masks, outputs):
mask = mask.data.cpu().numpy()
prediction = output.data.max(0)[1].cpu().numpy()
iou.add(mask.ravel(), prediction.ravel())
assert num_samples > 0, 'dataset contains validation images and labels'
return {'loss': running_loss / num_samples, 'iou': iou.get()}
def get_dataset_loaders(model, dataset):
target_size = (model['common']['image_size'], ) * 2
batch_size = model['common']['batch_size']
path = dataset['common']['dataset']
mean, std = dataset['stats']['mean'], dataset['stats']['std']
image_transform = Compose([
ConvertImageMode('RGB'),
Resize(target_size, Image.BILINEAR),
CenterCrop(target_size),
ImageToTensor(),
Normalize(mean=mean, std=std)])
target_transform = Compose([
ConvertImageMode('P'),
Resize(target_size, Image.NEAREST),
CenterCrop(target_size),
MaskToTensor()])
train_dataset = SlippyMapTilesConcatenation(
[os.path.join(path, 'training', 'images')],
[image_transform],
os.path.join(path, 'training', 'labels'),
target_transform)
val_dataset = SlippyMapTilesConcatenation(
[os.path.join(path, 'validation', 'images')],
[image_transform],
os.path.join(path, 'validation', 'labels'),
target_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, drop_last=True)
return train_loader, val_loader
|
Cogs/Hw.py | RehanPlayz/CorpBot.py | 368 | 11072822 | <filename>Cogs/Hw.py
import discord, time
from discord.ext import commands
from Cogs import Utils, PCPP, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Hw(bot, settings))
# This is the Uptime module. It keeps track of how long the bot's been up
class Hw(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.hwactive = {}
self.charset = "0123456789"
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def gen_id(self):
# Just use the current time as that shouldn't ever be the same (unless a user
# manages to do this twice in < 1 second)
return str(time.time())
@commands.command(pass_context=True)
async def cancelhw(self, ctx):
"""Cancels a current hardware session."""
if str(ctx.author.id) in self.hwactive:
self._stop_hw(ctx.author)
await ctx.send("You've left your current hardware session!".format(ctx.prefix))
return
await ctx.send("You're not in a current hardware session.")
def _stop_hw(self, author):
if str(author.id) in self.hwactive:
del self.hwactive[str(author.id)]
@commands.command(pass_context=True)
async def sethwchannel(self, ctx, *, channel: discord.TextChannel = None):
"""Sets the channel for hardware (admin only)."""
if not await Utils.is_admin_reply(ctx): return
if channel == None:
self.settings.setServerStat(ctx.guild, "HardwareChannel", "")
msg = 'Hardware works *only* in pm now.'
return await ctx.send(msg)
# If we made it this far - then we can add it
self.settings.setServerStat(ctx.guild, "HardwareChannel", channel.id)
msg = 'Hardware channel set to **{}**.'.format(channel.name)
await ctx.send(Utils.suppressed(ctx,msg))
@sethwchannel.error
async def sethwchannel_error(self, error, ctx):
# do stuff
msg = 'sethwchannel Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def pcpp(self, ctx, url = None, style = None, escape = None):
"""Convert a pcpartpicker.com link into markdown parts. Available styles: normal, md, mdblock, bold, and bolditalic."""
usage = "Usage: `{}pcpp [url] [style=normal, md, mdblock, bold, bolditalic] [escape=yes/no (optional)]`".format(ctx.prefix)
if not style:
style = 'normal'
if not url:
return await ctx.send(usage)
if escape == None:
escape = 'no'
escape = escape.lower() in ["yes","true","on","enable","enabled"]
output = await PCPP.getMarkdown(url, style, escape)
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
return await ctx.send(msg)
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
return await ctx.send(msg)
await ctx.send(Utils.suppressed(ctx,output))
@commands.command(pass_context=True)
async def mainhw(self, ctx, *, build = None):
"""Sets a new main build from your build list."""
if not build:
return await ctx.send("Usage: `{}mainhw [build name or number]`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if mainBuild:
# Found it!
for b in buildList:
if b is mainBuild:
b['Main'] = True
else:
b['Main'] = False
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} set as main!".format(mainBuild['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
if mainBuild:
# Found it!
for b in buildList:
if b is mainBuild:
b['Main'] = True
else:
b['Main'] = False
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} set as main!".format(mainBuild['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
msg = "I couldn't find that build or number."
await ctx.send(msg)
@commands.command(pass_context=True)
async def delhw(self, ctx, *, build = None):
"""Removes a build from your build list."""
if not build:
return await ctx.send("Usage: `{}delhw [build name or number]`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
# Get build by name first - then by number
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
buildList.remove(b)
if b['Main'] and len(buildList):
buildList[0]['Main'] = True
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} removed!".format(b['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
b = buildList.pop(build)
if b['Main'] and len(buildList):
buildList[0]['Main'] = True
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} removed!".format(b['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
except:
pass
msg = "I couldn't find that build or number."
await ctx.send(msg)
@commands.command(pass_context=True)
async def edithw(self, ctx, *, build = None):
"""Edits a build from your build list."""
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
break
return await ctx.send(Utils.suppressed(ctx,msg))
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
return await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = 'You have no builds on file! You can add some with the `{}newhw` command.'.format(ctx.prefix)
return await ctx.send(msg)
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
if build is not None:
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if not mainBuild:
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
else:
# No build passed - get the main if it exists
for b in buildList:
if b['Main']:
mainBuild = b
break
if not mainBuild:
msg = "I couldn't find that build or number."
return await ctx.send(msg)
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
# Here, we have a build
bname = Utils.suppressed(ctx,mainBuild['Name'])
bparts = Utils.suppressed(ctx,mainBuild['Hardware'])
msg = '"{}"\'s current parts:'.format(bname)
try:
await hwChannel.send(msg)
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
await hwChannel.send(bparts)
msg = 'Alright, *{}*, what parts does "{}" have now? (Please include *all* parts for this build - you can add new lines with *shift + enter*)\n'.format(DisplayName.name(ctx.author), bname)
msg += 'You can also pass pcpartpicker links to have them formatted automagically - I can also format them using different styles.\n'
msg += 'For example: '
msg += '```https://pcpartpicker.com/list/123456 mdblock``` would format with the markdown block style.\n'
msg += 'Markdown styles available are *normal, md, mdblock, bold, bolditalic*'
while True:
parts = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not parts:
self._stop_hw(ctx.author)
return
if 'pcpartpicker.com' in parts.content.lower():
# Possibly a pc partpicker link?
msg = 'It looks like you sent a pc part picker link - did you want me to try and format that? (y/n/stop)'
test = await self.confirm(hw_id, ctx, parts, hwChannel, msg)
if test == None:
self._stop_hw(ctx.author)
return
elif test == True:
partList = parts.content.split()
if len(partList) == 1:
partList.append(None)
output = None
try:
output = await PCPP.getMarkdown(partList[0], partList[1], False)
except:
pass
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
# Make sure
conf = await self.confirm(hw_id, ctx, output, hwChannel, None, ctx.author)
if conf == None:
# Timed out
self._stop_hw(ctx.author)
return
elif conf == False:
# Didn't get our answer
msg = 'Alright, *{}*, what parts does "{}" have now? (Please include *all* parts for this build - you can add new lines with *shift + enter*)'.format(DisplayName.name(ctx.author), bname)
continue
m = '{} set to:\n{}'.format(bname, output)
await hwChannel.send(m)
mainBuild['Hardware'] = output
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
mainBuild['Hardware'] = parts.content
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
msg = '*{}*, {} was edited successfully!'.format(DisplayName.name(ctx.author), bname)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
@commands.command(pass_context=True)
async def renhw(self, ctx, *, build = None):
"""Renames a build from your build list."""
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
await ctx.send(msg)
return
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
return
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = 'You have no builds on file! You can add some with the `{}newhw` command.'.format(ctx.prefix)
await ctx.send(msg)
return
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
if build is not None:
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if not mainBuild:
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
else:
# No build passed - get the main if it exists
for b in buildList:
if b['Main']:
mainBuild = b
break
if not mainBuild:
msg = "I couldn't find that build or number."
await ctx.send(msg)
return
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
# Post the dm reaction
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
# Here, we have a build
bname = Utils.suppressed(ctx,mainBuild['Name'])
msg = 'Alright, *{}*, what do you want to rename "{}" to?'.format(DisplayName.name(ctx.author), bname)
while True:
try:
buildName = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if not buildName:
self._stop_hw(ctx.author)
return
buildExists = False
for build in buildList:
if build['Name'].lower() == buildName.content.lower():
mesg = 'It looks like you already have a build by that name, *{}*. Try again.'.format(DisplayName.name(ctx.author))
await hwChannel.send(mesg)
buildExists = True
break
if not buildExists:
mainBuild['Name'] = buildName.content
# Flush settings to all servers
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
bname2 = Utils.suppressed(ctx,buildName.content)
msg = '*{}*, {} was renamed to {} successfully!'.format(DisplayName.name(ctx.author), bname, bname2)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
@commands.command(pass_context=True)
async def gethw(self, ctx, *, user = None, search = None):
"""Searches the user's hardware for a specific search term."""
if not user:
usage = "Usage: `{}gethw [user] [search term]`".format(ctx.prefix)
return await ctx.send(usage)
# Let's check for username and search term
parts = user.split()
memFromName = None
entries = []
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
memFromName = None
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
# Got a member - let's check the remainder length, and search!
if len(buildStr) < 3:
usage = "Search term must be at least 3 characters."
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
bParts = build['Hardware']
for line in bParts.splitlines():
if buildStr.lower() in line.lower():
entries.append({"name":"{}. {}".format(len(entries)+1,build["Name"]),"value":line})
if len(entries):
# We're in business
return await PickList.PagePicker(title="Search results for \"{}\" ({:,} total)".format(buildStr, len(entries)),list=entries,ctx=ctx).pick()
# If we're here - then we didn't find a member - set it to the author, and run another quick search
buildStr = user
if len(buildStr) < 3:
usage = "Search term must be at least 3 characters."
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
bParts = build['Hardware']
for line in bParts.splitlines():
if buildStr.lower() in line.lower():
entries.append({"name":"{}. {}".format(len(entries)+1,build["Name"]),"value":line})
if len(entries):
# We're in business
return await PickList.PagePicker(title="Search results for \"{}\" ({:,} total)".format(buildStr, len(entries)),list=entries,ctx=ctx).pick()
return await Message.EmbedText(title="Nothing found for that search.",color=ctx.author).send(ctx)
@commands.command(pass_context=True)
async def hw(self, ctx, *, user : str = None, build = None):
"""Lists the hardware for either the user's default build - or the passed build."""
if not user:
user = "{}".format(ctx.author.mention)
# Let's check for username and build name
parts = user.split()
memFromName = None
buildParts = None
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
for build in buildList:
if build['Name'].lower() == buildStr.lower():
# Ha! Found it!
buildParts = build
break
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# Try again with numbers
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
try:
buildStr = int(buildStr)-1
if buildStr >= 0 and buildStr < len(buildList):
buildParts = buildList[buildStr]
except Exception:
memFromName = None
buildParts = None
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# One last shot - check if it's a build for us
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
if build['Name'].lower() == user.lower():
memFromName = ctx.author
buildParts = build
break
if not memFromName:
# Okay - *this* time is the last - check for number
try:
user_as_build = int(user)-1
if user_as_build >= 0 and user_as_build < len(buildList):
buildParts = buildList[user_as_build]
memFromName = ctx.author
except Exception:
pass
if not memFromName:
# Last check for a user passed as the only param
memFromName = DisplayName.memberForName(user, ctx.guild)
if not memFromName:
# We couldn't find them :(
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
if buildParts == None:
# Check if that user has no builds
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(memFromName), ctx.prefix)
return await ctx.send(msg)
# Must be the default build
for build in buildList:
if build['Main']:
buildParts = build
break
if not buildParts:
# Well... uh... no defaults
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
# At this point - we *should* have a user and a build
msg_head = "__**{}'s {}:**__\n\n".format(DisplayName.name(memFromName), buildParts['Name'])
msg = msg_head + buildParts['Hardware']
if len(msg) > 2000: # is there somwhere the discord char count is defined, to avoid hardcoding?
msg = buildParts['Hardware'] # if the header pushes us over the limit, omit it and send just the string
await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def rawhw(self, ctx, *, user : str = None, build = None):
"""Lists the raw markdown for either the user's default build - or the passed build."""
if not user:
user = "{}#{}".format(ctx.author.name, ctx.author.discriminator)
# Let's check for username and build name
parts = user.split()
memFromName = None
buildParts = None
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
for build in buildList:
if build['Name'].lower() == buildStr.lower():
# Ha! Found it!
buildParts = build
break
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# Try again with numbers
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
try:
buildStr = int(buildStr)-1
if buildStr >= 0 and buildStr < len(buildList):
buildParts = buildList[buildStr]
except Exception:
memFromName = None
buildParts = None
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# One last shot - check if it's a build for us
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
if build['Name'].lower() == user.lower():
memFromName = ctx.author
buildParts = build
break
if not memFromName:
# Okay - *this* time is the last - check for number
try:
user_as_build = int(user)-1
if user_as_build >= 0 and user_as_build < len(buildList):
buildParts = buildList[user_as_build]
memFromName = ctx.author
except Exception:
pass
if not memFromName:
# Last check for a user passed as the only param
memFromName = DisplayName.memberForName(user, ctx.guild)
if not memFromName:
# We couldn't find them :(
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
if buildParts == None:
# Check if that user has no builds
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(memFromName), ctx.prefix)
return await ctx.send(msg)
# Must be the default build
for build in buildList:
if build['Main']:
buildParts = build
break
if not buildParts:
# Well... uh... no defaults
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
# At this point - we *should* have a user and a build
p = discord.utils.escape_markdown(buildParts['Hardware'])
msg = "__**{}'s {} (Raw Markdown):**__\n\n{}".format(DisplayName.name(memFromName), buildParts['Name'], p)
await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def listhw(self, ctx, *, user = None):
"""Lists the builds for the specified user - or yourself if no user passed."""
usage = 'Usage: `{}listhw [user]`'.format(ctx.prefix)
if not user:
user = "{}#{}".format(ctx.author.name, ctx.author.discriminator)
member = DisplayName.memberForName(user, ctx.guild)
if not member:
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(member, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
if not len(buildList):
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(member), ctx.prefix)
return await ctx.send(msg)
items = [{"name":"{}. {}".format(i,x["Name"]+(" (Main Build)" if x["Main"] else "")),"value":Utils.truncate_string(x["Hardware"])} for i,x in enumerate(buildList,start=1)]
return await PickList.PagePicker(title="{}'s Builds ({:,} total)".format(DisplayName.name(member),len(buildList)),list=items,ctx=ctx).pick()
@commands.command()
async def lhw(self, ctx, *, user = None):
"""Lists only the titles of the builds for the specified user - or yourself if no user passed."""
usage = 'Usage: `{}lhw [user]`'.format(ctx.prefix)
if not user: user = ctx.author.id
member = DisplayName.memberForName(user, ctx.guild)
if not member: return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(member, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
if not len(buildList):
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(member), ctx.prefix)
return await ctx.send(msg)
desc = "\n".join([Utils.truncate_string("{}. {}".format(i,x["Name"]+(" (Main Build)" if x["Main"] else ""))) for i,x in enumerate(buildList,start=1)])
return await PickList.PagePicker(
title="{}'s Builds ({:,} total)".format(DisplayName.name(member),len(buildList)),
description=desc,
ctx=ctx
).pick()
@commands.command(pass_context=True)
async def newhw(self, ctx):
"""Initiate a new-hardware conversation with the bot. The hardware added will also be set as the Main Build."""
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
return await ctx.send(msg)
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
return await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
msg = 'Alright, *{}*, let\'s add a new build.\n\n'.format(DisplayName.name(ctx.author))
if len(buildList) == 1:
msg += 'You currently have *1 build* on file.\n\n'
else:
msg += 'You currently have *{} builds* on file.\n\nLet\'s get started!'.format(len(buildList))
try:
await hwChannel.send(msg)
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
msg = '*{}*, tell me what you\'d like to call this build (type stop to cancel):'.format(DisplayName.name(ctx.author))
# Get the build name
newBuild = { 'Main': True }
while True:
buildName = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not buildName:
self._stop_hw(ctx.author)
return
buildExists = False
for build in buildList:
if build['Name'].lower() == buildName.content.lower():
mesg = 'It looks like you already have a build by that name, *{}*. Try again.'.format(DisplayName.name(ctx.author))
await hwChannel.send(mesg)
buildExists = True
break
if not buildExists:
newBuild['Name'] = buildName.content
break
bname = Utils.suppressed(ctx,buildName.content)
msg = 'Alright, *{}*, what parts does "{}" have? (Please include *all* parts for this build - you can add new lines with *shift + enter*)\n'.format(DisplayName.name(ctx.author), bname)
msg += 'You can also pass pcpartpicker links to have them formatted automagically - I can also format them using different styles.\n'
msg += 'For example: '
msg += '```https://pcpartpicker.com/list/123456 mdblock``` would format with the markdown block style.\n'
msg += 'Markdown styles available are *normal, md, mdblock, bold, bolditalic*'
while True:
parts = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not parts:
self._stop_hw(ctx.author)
return
if 'pcpartpicker.com' in parts.content.lower():
# Possibly a pc partpicker link?
msg = 'It looks like you sent a pc part picker link - did you want me to try and format that? (y/n/stop)'
test = await self.confirm(hw_id, ctx, parts, hwChannel, msg)
if test == None:
self._stop_hw(ctx.author)
return
elif test == True:
partList = parts.content.split()
if len(partList) == 1:
partList.append(None)
output = None
try:
output = await PCPP.getMarkdown(partList[0], partList[1], False)
except:
pass
#output = PCPP.getMarkdown(parts.content)
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
# Make sure
conf = await self.confirm(hw_id, ctx, output, hwChannel, None, ctx.author)
if conf == None:
# Timed out
self._stop_hw(ctx.author)
return
elif conf == False:
# Didn't get our answer
msg = 'Alright, *{}*, what parts does "{}" have? (Please include *all* parts for this build - you can add new lines with *shift + enter*)'.format(DisplayName.name(ctx.author), bname)
continue
m = '{} set to:\n{}'.format(bname, output)
await hwChannel.send(m)
newBuild['Hardware'] = output
break
newBuild['Hardware'] = parts.content
break
# Check if we already have a main build and clear it
for build in buildList:
if build['Main']:
build['Main'] = False
buildList.append(newBuild)
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = '*{}*, {} was created successfully! It has been set as your main build. To select a different main, you can use `{}mainhw`'.format(DisplayName.name(ctx.author), bname, ctx.prefix)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
# New HW helper methods
def channelCheck(self, msg, dest = None):
if self.stillHardwaring(msg.author) == False:
# any message is a valid check if we're not editing
return True
if dest:
# We have a target channel
if type(dest) is discord.User or type(dest) is discord.Member:
dest = dest.dm_channel.id
elif type(dest) is discord.TextChannel:
dest = dest.id
elif type(dest) is discord.Guild:
dest = dest.get_channel(dest.id).id
if not dest == msg.channel.id:
return False
else:
# Just make sure it's in pm or the hw channel
if msg.channel == discord.TextChannel:
# Let's check our server stuff
hwChannel = self.settings.getServerStat(msg.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
return False
else:
# Nothing set - pm
if not type(msg.channel) == discord.DMChannel:
return False
return True
# Makes sure we're still editing - if this gets set to False,
# that means the user stopped editing/newhw
def stillHardwaring(self, author):
return str(author.id) in self.hwactive
def confirmCheck(self, msg, dest = None):
if not self.channelCheck(msg, dest):
return False
msgStr = msg.content.lower()
if msgStr.startswith('y'):
return True
if msgStr.startswith('n'):
return True
elif msgStr.startswith('stop'):
return True
return False
async def confirm(self, hw_id, ctx, message, dest = None, m = None, author = None):
# Get author name
authorName = None
if author:
if type(author) is str:
authorName = author
else:
try:
authorName = DisplayName.name(author)
except Exception:
pass
else:
if message:
try:
author = message.author
except Exception:
pass
try:
authorName = DisplayName.name(message.author)
except Exception:
pass
if not dest:
dest = message.channel
if not m:
if authorName:
msg = '*{}*, I got:'.format(Utils.suppressed(ctx,authorName))
else:
msg = "I got:"
if type(message) is str:
msg2 = Utils.suppressed(ctx,message)
else:
msg2 = '{}'.format(Utils.suppressed(ctx,message.content))
msg3 = 'Is that correct? (y/n/stop)'
await dest.send(msg)
await dest.send(msg2)
await dest.send(msg3)
else:
msg = m
await dest.send(Utils.suppressed(ctx,msg))
while True:
def littleCheck(m):
return ctx.author.id == m.author.id and self.confirmCheck(m, dest) and len(m.content)
try:
talk = await self.bot.wait_for('message', check=littleCheck, timeout=300)
except Exception:
talk = None
# See if we're still in the right context
if not hw_id == self.hwactive.get(str(ctx.author.id),None):
return None
# Hardware ended
if not self.stillHardwaring(ctx.author):
return None
if not talk:
if authorName:
msg = "*{}*, I'm out of time...".format(authorName)
else:
msg = "I'm out of time..."
await dest.send(msg)
return None
else:
# We got something
if talk.content.lower().startswith('y'):
return True
elif talk.content.lower().startswith('stop'):
if authorName:
msg = "No problem, *{}!* See you later!".format(authorName)
else:
msg = "No problem! See you later!"
await dest.send(msg)
return None
else:
return False
async def prompt(self, hw_id, ctx, message, dest = None, author = None):
# Get author name
authorName = None
if author:
if type(author) is str:
authorName = author
else:
try:
authorName = DisplayName.name(author)
except Exception:
pass
else:
if message:
try:
author = message.author
except Exception:
pass
try:
authorName = DisplayName.name(message.author)
except Exception:
pass
if not dest:
dest = ctx.channel
await dest.send(Utils.suppressed(ctx,message))
while True:
def littleCheck(m):
return ctx.author.id == m.author.id and self.channelCheck(m, dest) and len(m.content)
try:
talk = await self.bot.wait_for('message', check=littleCheck, timeout=300)
except Exception:
talk = None
# See if we're still in the right context
if not hw_id == self.hwactive.get(str(ctx.author.id),None):
return None
# Hardware ended
if not self.stillHardwaring(ctx.author):
return None
if not talk:
msg = "*{}*, I'm out of time...".format(authorName)
await dest.send(msg)
return None
else:
# Check for a stop
if talk.content.lower() == 'stop':
msg = "No problem, *{}!* See you later!".format(authorName, ctx.prefix)
await dest.send(msg)
return None
# Make sure
conf = await self.confirm(hw_id, ctx, talk, dest, "", author)
if conf == True:
# We're sure - return the value
return talk
elif conf == False:
# Not sure - ask again
return await self.prompt(hw_id, ctx, message, dest, author)
else:
# Timed out
return None
|
pypy/module/_multibytecodec/app_multibytecodec.py | nanjekyejoannah/pypy | 381 | 11072858 | # NOT_RPYTHON
#
# The interface here may be a little bit on the lightweight side.
from _multibytecodec import MultibyteIncrementalDecoder
from _multibytecodec import MultibyteIncrementalEncoder
class MultibyteStreamReader(MultibyteIncrementalDecoder):
def __new__(cls, stream, errors=None):
self = MultibyteIncrementalDecoder.__new__(cls, errors)
self.stream = stream
return self
def __read(self, read, size):
if size is None or size < 0:
return MultibyteIncrementalDecoder.decode(self, read(), True)
while True:
data = read(size)
final = not data
output = MultibyteIncrementalDecoder.decode(self, data, final)
if output or final:
return output
size = 1 # read 1 more byte and retry
def read(self, size=None):
return self.__read(self.stream.read, size)
def readline(self, size=None):
return self.__read(self.stream.readline, size)
def readlines(self, sizehint=None):
return self.__read(self.stream.read, sizehint).splitlines(True)
class MultibyteStreamWriter(MultibyteIncrementalEncoder):
def __new__(cls, stream, errors=None):
self = MultibyteIncrementalEncoder.__new__(cls, errors)
self.stream = stream
return self
def write(self, data):
self.stream.write(MultibyteIncrementalEncoder.encode(
self, data))
def reset(self):
data = MultibyteIncrementalEncoder.encode(
self, '', final=True)
if len(data) > 0:
self.stream.write(data)
MultibyteIncrementalEncoder.reset(self)
def writelines(self, lines):
for data in lines:
self.write(data)
|
alipay/aop/api/response/KoubeiCateringDishCommruleSyncResponse.py | antopen/alipay-sdk-python-all | 213 | 11072870 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.KbdishCommRuleInfo import KbdishCommRuleInfo
class KoubeiCateringDishCommruleSyncResponse(AlipayResponse):
def __init__(self):
super(KoubeiCateringDishCommruleSyncResponse, self).__init__()
self._kbdish_comm_rule_info_list = None
@property
def kbdish_comm_rule_info_list(self):
return self._kbdish_comm_rule_info_list
@kbdish_comm_rule_info_list.setter
def kbdish_comm_rule_info_list(self, value):
if isinstance(value, list):
self._kbdish_comm_rule_info_list = list()
for i in value:
if isinstance(i, KbdishCommRuleInfo):
self._kbdish_comm_rule_info_list.append(i)
else:
self._kbdish_comm_rule_info_list.append(KbdishCommRuleInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(KoubeiCateringDishCommruleSyncResponse, self).parse_response_content(response_content)
if 'kbdish_comm_rule_info_list' in response:
self.kbdish_comm_rule_info_list = response['kbdish_comm_rule_info_list']
|
lib/jinja2/testsuite/utils.py | Slashbunny/maraschino | 790 | 11072911 | <reponame>Slashbunny/maraschino
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~
Tests utilities jinja uses.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import gc
import unittest
import pickle
from jinja2.testsuite import JinjaTestCase
from jinja2.utils import LRUCache, escape, object_type_repr
class LRUCacheTestCase(JinjaTestCase):
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
class HelpersTestCase(JinjaTestCase):
def test_object_type_repr(self):
class X(object):
pass
self.assert_equal(object_type_repr(42), 'int object')
self.assert_equal(object_type_repr([]), 'list object')
self.assert_equal(object_type_repr(X()),
'jinja2.testsuite.utils.X object')
self.assert_equal(object_type_repr(None), 'None')
self.assert_equal(object_type_repr(Ellipsis), 'Ellipsis')
class MarkupLeakTestCase(JinjaTestCase):
def test_markup_leaks(self):
counts = set()
for count in xrange(20):
for item in xrange(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LRUCacheTestCase))
suite.addTest(unittest.makeSuite(HelpersTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
|
venv/venv.py | MaxTurchin/pycopy-lib | 126 | 11072914 | <gh_stars>100-1000
import sys
import os
if len(sys.argv) != 2:
print("usage: venv ENV_DIR")
sys.exit(1)
env = sys.argv[1]
os.makedirs(env + "/bin")
with open(env + "/bin/activate", "w") as f:
f.write("""\
export PYCOPYPATH=%s
""" % os.path.abspath(env + "/lib"))
|
tests/wipy/pin_irq.py | sebastien-riou/micropython | 13,648 | 11072979 | <reponame>sebastien-riou/micropython<filename>tests/wipy/pin_irq.py
"""
Pin IRQ test for the CC3200 based boards.
"""
from machine import Pin
import machine
import os
import time
mch = os.uname().machine
if "LaunchPad" in mch:
pins = ["GP16", "GP13"]
elif "WiPy" in mch:
pins = ["GP16", "GP13"]
else:
raise Exception("Board not supported!")
pin0 = Pin(pins[0], mode=Pin.OUT, value=1)
pin1 = Pin(pins[1], mode=Pin.IN, pull=Pin.PULL_UP)
def pin_handler(pin_o):
global pin_irq_count_trigger
global pin_irq_count_total
global _trigger
if _trigger & pin1_irq.flags():
pin_irq_count_trigger += 1
pin_irq_count_total += 1
pin_irq_count_trigger = 0
pin_irq_count_total = 0
_trigger = Pin.IRQ_FALLING
pin1_irq = pin1.irq(trigger=_trigger, handler=pin_handler)
for i in range(0, 10):
pin0.toggle()
time.sleep_ms(5)
print(pin_irq_count_trigger == 5)
print(pin_irq_count_total == 5)
pin_irq_count_trigger = 0
pin_irq_count_total = 0
_trigger = Pin.IRQ_RISING
pin1_irq = pin1.irq(trigger=_trigger, handler=pin_handler)
for i in range(0, 200):
pin0.toggle()
time.sleep_ms(5)
print(pin_irq_count_trigger == 100)
print(pin_irq_count_total == 100)
pin1_irq.disable()
pin0(1)
pin_irq_count_trigger = 0
pin_irq_count_total = 0
_trigger = Pin.IRQ_FALLING
pin1_irq.init(trigger=_trigger, handler=pin_handler)
pin0(0)
time.sleep_us(50)
print(pin_irq_count_trigger == 1)
print(pin_irq_count_total == 1)
pin0(1)
time.sleep_us(50)
print(pin_irq_count_trigger == 1)
print(pin_irq_count_total == 1)
# check the call method
pin1_irq()
print(pin_irq_count_trigger == 1) # no flags since the irq was manually triggered
print(pin_irq_count_total == 2)
pin1_irq.disable()
pin_irq_count_trigger = 0
pin_irq_count_total = 0
for i in range(0, 10):
pin0.toggle()
time.sleep_ms(5)
print(pin_irq_count_trigger == 0)
print(pin_irq_count_total == 0)
# test waking up from suspended mode on low level
pin0(0)
t0 = time.ticks_ms()
pin1_irq.init(trigger=Pin.IRQ_LOW_LEVEL, wake=machine.SLEEP)
machine.sleep()
print(time.ticks_ms() - t0 < 10)
print("Awake")
# test waking up from suspended mode on high level
pin0(1)
t0 = time.ticks_ms()
pin1_irq.init(trigger=Pin.IRQ_HIGH_LEVEL, wake=machine.SLEEP)
machine.sleep()
print(time.ticks_ms() - t0 < 10)
print("Awake")
# check for memory leaks
for i in range(0, 1000):
pin0_irq = pin0.irq(trigger=_trigger, handler=pin_handler)
pin1_irq = pin1.irq(trigger=_trigger, handler=pin_handler)
# next ones must raise
try:
pin1_irq.init(trigger=123456, handler=pin_handler)
except:
print("Exception")
try:
pin1_irq.init(trigger=Pin.IRQ_LOW_LEVEL, wake=1789456)
except:
print("Exception")
try:
pin0_irq = pin0.irq(
trigger=Pin.IRQ_RISING, wake=machine.SLEEP
) # GP16 can't wake up from DEEPSLEEP
except:
print("Exception")
pin0_irq.disable()
pin1_irq.disable()
|
compilers/src/make.py | ksuarez95/coinhive-block | 105 | 11072981 | domains = open("../../domains", "r").read().split('\n')
output = open('../../hostfile', "w")
output.write("##COINHIVE DOMAINS\n")
for domain in domains:
output.write("0.0.0.0 " + domain + "\n")
output.write("##END COINHIVE\n")
|
tests/import/import1b.py | learnforpractice/micropython-cpp | 692 | 11073015 | <gh_stars>100-1000
var = 123
def throw():
raise ValueError
|
python-midonetclient/src/midonetclient/port.py | obino/midonet | 221 | 11073030 | <filename>python-midonetclient/src/midonetclient/port.py
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from midonetclient import admin_state_up_mixin
from midonetclient import mac_ip
from midonetclient import port_group_port
from midonetclient.port_type import VXLAN
from midonetclient import resource_base
from midonetclient import vendor_media_type
class Port(resource_base.ResourceBase,
admin_state_up_mixin.AdminStateUpMixin):
media_type = vendor_media_type.APPLICATION_PORT_JSON
def __init__(self, uri, dto, auth):
super(Port, self).__init__(uri, dto, auth)
def get_id(self):
return self.dto['id']
def get_type(self):
return self.dto['type']
def get_active(self):
return self.dto['active']
def get_device_id(self):
return self.dto['deviceId']
def get_inbound_filter_id(self):
return self.dto['inboundFilterId']
def get_outbound_filter_id(self):
return self.dto['outboundFilterId']
def get_vif_id(self):
return self.dto['vifId']
def get_host_id(self):
return self.dto['hostId']
def get_interface_name(self):
return self.dto['interfaceName']
def get_vlan_id(self):
if self.dto['type'] == VXLAN:
return None
return self.dto['vlanId']
def get_peer_id(self):
return self.dto['peerId']
def get_network_address(self):
return self.dto['networkAddress']
def get_network_length(self):
return self.dto['networkLength']
def get_port_address(self):
return self.dto['portAddress']
def get_port_mac(self):
return self.dto['portMac']
def get_rtr_port_vni(self):
return self.dto['rtrPortVni']
def can_off_ramp_vxlan(self):
return self.dto['offRampVxlan']
def get_vtep(self):
return self.dto['vtepId']
def get_vni(self):
return self.dto['vni']
def get_bgp_status(self):
return self.dto['bgpStatus']
def get_service_container_id(self):
return self.dto['serviceContainerId']
def service_container_id(self, id_):
self.dto['serviceContainerId'] = id_
return self
def id(self, id):
self.dto['id'] = id
return self
def inbound_filter_id(self, id_):
self.dto['inboundFilterId'] = id_
return self
def outbound_filter_id(self, id_):
self.dto['outboundFilterId'] = id_
return self
def vif_id(self, id_):
self.dto['vifId'] = id_
return self
def vlan_id(self, id_):
self.dto['vlanId'] = id_
return self
def port_address(self, port_address):
self.dto['portAddress'] = port_address
return self
def network_address(self, network_address):
self.dto['networkAddress'] = network_address
return self
def network_length(self, network_length):
self.dto['networkLength'] = network_length
return self
def port_mac(self, port_mac):
self.dto['portMac'] = port_mac
return self
def rtr_port_vni(self, vni):
self.dto['rtrPortVni'] = vni
return self
def off_ramp_vxlan(self, ramp):
self.dto['offRampVxlan'] = ramp
return self
def type(self, type_):
self.dto['type'] = type_
return self
def link(self, peer_uuid):
self.dto['peerId'] = peer_uuid
headers = {'Content-Type':
vendor_media_type.APPLICATION_PORT_LINK_JSON}
self.auth.do_request(self.dto['link'], 'POST', self.dto,
headers=headers)
self.get()
return self
def unlink(self):
self.auth.do_request(self.dto['link'], 'DELETE')
self.get()
return self
def get_port_groups(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_PORT_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
port_group_port.PortGroupPort)
def get_inbound_mirrors(self, query=None):
return self.dto['inboundMirrorIds']
def inbound_mirrors(self, inMirrors):
self.dto['inboundMirrorIds'] = inMirrors
return self
def get_outbound_mirrors(self, query=None):
return self.dto['outboundMirrorIds']
def outbound_mirrors(self, outMirrors):
self.dto['outboundMirrorIds'] = outMirrors
return self
def get_post_in_filter_mirrors(self, query=None):
return self.dto['postInFilterMirrorIds']
def post_in_filter_mirrors(self, inMirrors):
self.dto['postInFilterMirrorIds'] = inMirrors
return self
def get_pre_out_filter_mirrors(self, query=None):
return self.dto['preOutFilterMirrorIds']
def pre_out_filter_mirrors(self, outMirrors):
self.dto['preOutFilterMirrorIds'] = outMirrors
return self
def add_remote_peer(self):
return mac_ip.MacIp(self.dto['peeringTable'], {}, self.auth)
def get_remote_peers(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_MAC_IP_COLLECTION_JSON}
return self.get_children(self.dto['peeringTable'], query, headers,
mac_ip.MacIp)
def get_qos_policy_id(self):
return self.dto['qosPolicyId']
def qos_policy_id(self, id_):
self.dto['qosPolicyId'] = id_
return self
def set_addresses(self, addresses):
self.dto['portSubnet'] = addresses
return self
def get_addresses(self):
return self.dto['portSubnet']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.