ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4f5bb27d7a2500eaf47581ae6016cf259b635e | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import hmac
import os
import json
import ecdsa
import pyaes
from .util import bfh, bh2u, to_string
from . import version
from .util import print_error, InvalidPassword, assert_bytes, to_bytes, inv_dict
from . import segwit_addr
def read_json_dict(filename):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = {}
return r
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'p2wpkh-p2sh': 0x049d7878,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x4b2430c,
'p2wsh': 0x2aa7a99
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'p2wpkh-p2sh': 0x049d7cb2,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x4b24746,
'p2wsh': 0x2aa7ed3
}
class NetworkConstants:
@classmethod
def set_mainnet(cls):
cls.TESTNET = False
cls.WIF_PREFIX = 0x80
cls.ADDRTYPE_P2PKH = 0
cls.ADDRTYPE_P2SH = 5
cls.SEGWIT_HRP = "bc"
cls.HEADERS_URL = "https://headers.electrum.org/blockchain_headers"
cls.GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
cls.DEFAULT_PORTS = {'t': '50001', 's': '50002'}
cls.DEFAULT_SERVERS = read_json_dict('servers.json')
@classmethod
def set_testnet(cls):
cls.TESTNET = True
cls.WIF_PREFIX = 0xef
cls.ADDRTYPE_P2PKH = 111
cls.ADDRTYPE_P2SH = 196
cls.SEGWIT_HRP = "tb"
cls.HEADERS_URL = "https://headers.electrum.org/testnet_headers"
cls.GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
cls.DEFAULT_PORTS = {'t':'51001', 's':'51002'}
cls.DEFAULT_SERVERS = read_json_dict('servers_testnet.json')
NetworkConstants.set_mainnet()
################################## transactions
FEE_STEP = 10000
MAX_FEE_RATE = 300000
FEE_TARGETS = [25, 10, 5, 2]
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPadding(Exception):
pass
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(x):
return op_push(len(x)//2) + x
def sha256(x):
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def Hash(x):
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return bfh(key)
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def hash160_to_b58_address(h160, addrtype, witness_program_version=1):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h):
return segwit_addr.encode(NetworkConstants.SEGWIT_HRP, 0, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key))
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)))
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return hash_to_segwit_addr(hash_160(bfh(pubkey)))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def script_to_address(script):
from .transaction import get_address_from_output_script
t, addr = get_address_from_output_script(bfh(script))
assert t == TYPE_ADDRESS
return addr
def address_to_script(addr):
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
if witprog is not None:
assert (0 <= witver <= 16)
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script):
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(bytes([c])) * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
# extended key export format for segwit
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
def serialize_privkey(secret, compressed, txin_type):
prefix = bytes([(SCRIPT_TYPES[txin_type]+NetworkConstants.WIF_PREFIX)&255])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
return EncodeBase58Check(vchIn)
def deserialize_privkey(key):
# whether the pubkey is compressed should be visible from the keystore
vch = DecodeBase58Check(key)
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), True
elif vch:
txin_type = inv_dict(SCRIPT_TYPES)[vch[0] - NetworkConstants.WIF_PREFIX]
assert len(vch) in [33, 34]
compressed = len(vch) == 34
return txin_type, vch[1:33], compressed
else:
raise BaseException("cannot deserialize", key)
def regenerate_key(pk):
assert len(pk) == 32
return EC_KEY(pk)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return bfh('%064x' % pkey.secret)
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def public_key_from_private_key(pk, compressed):
pkey = regenerate_key(pk)
public_key = GetPubKey(pkey.pubkey, compressed)
return bh2u(public_key)
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = public_key_from_private_key(privkey, compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
try:
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [NetworkConstants.ADDRTYPE_P2PKH, NetworkConstants.ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message(address, sig, message):
assert_bytes(sig, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, bh2u(pubkey))
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, bfh(pubkey))
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)//4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return bfh( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) )
return bfh( '04'+('%064x'%P.x())+('%064x'%P.y()) )
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0] == 0x03)[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return bh2u(point_to_ser(self.pubkey.point, compressed))
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
message = to_bytes(message, 'utf8')
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
assert_bytes(message)
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = bfh(ephemeral.get_public_key(compressed=True))
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xprv = xprv_header(xtype) + bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xpub = xpub_header(xtype) + bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = XPRV_HEADERS if prv else XPUB_HEADERS
if header not in headers.values():
raise BaseException('Invalid xpub format', hex(header))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in [0x02, 0x03]
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
|
py | 1a4f5c9c706d1ca32e36e1d94056ca336a610aa6 | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module extends the TensorFlowOnSpark API to support Spark ML Pipelines.
It provides a TFEstimator class to fit a TFModel using TensorFlow. The TFEstimator will actually spawn a TensorFlowOnSpark cluster
to conduct distributed training, but due to architectural limitations, the TFModel will only run single-node TensorFlow instances
when inferencing on the executors. The executors will run in parallel, but the TensorFlow model must fit in the memory
of each executor.
There is also an option to provide a separate "export" function, which allows users to export a different graph for inferencing vs. training.
This is useful when the training graph uses InputMode.TENSORFLOW with queue_runners, but the inferencing graph needs placeholders.
And this is especially useful for exporting saved_models for TensorFlow Serving.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pyspark.context import SparkContext
from pyspark.ml.param.shared import Param, Params, TypeConverters
from pyspark.ml.pipeline import Estimator, Model
from pyspark.sql import Row, SparkSession
import tensorflow as tf
from tensorflow.contrib.saved_model.python.saved_model import reader, signature_def_utils
from tensorflow.python.saved_model import loader
from . import TFCluster, gpu_info, dfutil
import argparse
import copy
import logging
import os
import subprocess
import sys
##### TensorFlowOnSpark Params
class TFTypeConverters(object):
"""Custom DataFrame TypeConverter for dictionary types (since this is not provided by Spark core)."""
@staticmethod
def toDict(value):
if type(value) == dict:
return value
else:
raise TypeError("Could not convert %s to OrderedDict" % value)
class HasBatchSize(Params):
batch_size = Param(Params._dummy(), "batch_size", "Number of records per batch", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasBatchSize, self).__init__()
def setBatchSize(self, value):
return self._set(batch_size=value)
def getBatchSize(self):
return self.getOrDefault(self.batch_size)
class HasClusterSize(Params):
cluster_size = Param(Params._dummy(), "cluster_size", "Number of nodes in the cluster", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasClusterSize, self).__init__()
def setClusterSize(self, value):
return self._set(cluster_size=value)
def getClusterSize(self):
return self.getOrDefault(self.cluster_size)
class HasEpochs(Params):
epochs = Param(Params._dummy(), "epochs", "Number of epochs to train", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasEpochs, self).__init__()
def setEpochs(self, value):
return self._set(epochs=value)
def getEpochs(self):
return self.getOrDefault(self.epochs)
class HasInputMapping(Params):
input_mapping = Param(Params._dummy(), "input_mapping", "Mapping of input DataFrame column to input tensor", typeConverter=TFTypeConverters.toDict)
def __init__(self):
super(HasInputMapping, self).__init__()
def setInputMapping(self, value):
return self._set(input_mapping=value)
def getInputMapping(self):
return self.getOrDefault(self.input_mapping)
class HasInputMode(Params):
input_mode = Param(Params._dummy(), "input_mode", "Input data feeding mode (0=TENSORFLOW, 1=SPARK)", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasInputMode, self).__init__()
def setInputMode(self, value):
return self._set(input_mode=value)
def getInputMode(self):
return self.getOrDefault(self.input_mode)
class HasModelDir(Params):
model_dir = Param(Params._dummy(), "model_dir", "Path to save/load model checkpoints", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasModelDir, self).__init__()
def setModelDir(self, value):
return self._set(model_dir=value)
def getModelDir(self):
return self.getOrDefault(self.model_dir)
class HasNumPS(Params):
num_ps = Param(Params._dummy(), "num_ps", "Number of PS nodes in cluster", typeConverter=TypeConverters.toInt)
driver_ps_nodes = Param(Params._dummy(), "driver_ps_nodes", "Run PS nodes on driver locally", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasNumPS, self).__init__()
def setNumPS(self, value):
return self._set(num_ps=value)
def getNumPS(self):
return self.getOrDefault(self.num_ps)
def setDriverPSNodes(self, value):
return self._set(driver_ps_nodes=value)
def getDriverPSNodes(self):
return self.getOrDefault(self.driver_ps_nodes)
class HasOutputMapping(Params):
output_mapping = Param(Params._dummy(), "output_mapping", "Mapping of output tensor to output DataFrame column", typeConverter=TFTypeConverters.toDict)
def __init__(self):
super(HasOutputMapping, self).__init__()
def setOutputMapping(self, value):
return self._set(output_mapping=value)
def getOutputMapping(self):
return self.getOrDefault(self.output_mapping)
class HasProtocol(Params):
protocol = Param(Params._dummy(), "protocol", "Network protocol for Tensorflow (grpc|rdma)", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasProtocol, self).__init__()
def setProtocol(self, value):
return self._set(protocol=value)
def getProtocol(self):
return self.getOrDefault(self.protocol)
class HasReaders(Params):
readers = Param(Params._dummy(), "readers", "number of reader/enqueue threads", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasReaders, self).__init__()
def setReaders(self, value):
return self._set(readers=value)
def getReaders(self):
return self.getOrDefault(self.readers)
class HasSteps(Params):
steps = Param(Params._dummy(), "steps", "Maximum number of steps to train", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasSteps, self).__init__()
def setSteps(self, value):
return self._set(steps=value)
def getSteps(self):
return self.getOrDefault(self.steps)
class HasTensorboard(Params):
tensorboard = Param(Params._dummy(), "tensorboard", "Launch tensorboard process", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasTensorboard, self).__init__()
def setTensorboard(self, value):
return self._set(tensorboard=value)
def getTensorboard(self):
return self.getOrDefault(self.tensorboard)
class HasTFRecordDir(Params):
tfrecord_dir = Param(Params._dummy(), "tfrecord_dir", "Path to temporarily export a DataFrame as TFRecords (for InputMode.TENSORFLOW apps)", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasTFRecordDir, self).__init__()
def setTFRecordDir(self, value):
return self._set(tfrecord_dir=value)
def getTFRecordDir(self):
return self.getOrDefault(self.tfrecord_dir)
##### SavedModelBuilder Params
class HasExportDir(Params):
export_dir = Param(Params._dummy(), "export_dir", "Directory to export saved_model", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasExportDir, self).__init__()
def setExportDir(self, value):
return self._set(export_dir=value)
def getExportDir(self):
return self.getOrDefault(self.export_dir)
class HasSignatureDefKey(Params):
signature_def_key = Param(Params._dummy(), "signature_def_key", "Identifier for a specific saved_model signature", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasSignatureDefKey, self).__init__()
self._setDefault(signature_def_key=None)
def setSignatureDefKey(self, value):
return self._set(signature_def_key=value)
def getSignatureDefKey(self):
return self.getOrDefault(self.signature_def_key)
class HasTagSet(Params):
tag_set = Param(Params._dummy(), "tag_set", "Comma-delimited list of tags identifying a saved_model metagraph", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasTagSet, self).__init__()
def setTagSet(self, value):
return self._set(tag_set=value)
def getTagSet(self):
return self.getOrDefault(self.tag_set)
class Namespace(object):
"""
Utility class to convert dictionaries to Namespace-like objects.
Based on https://docs.python.org/dev/library/types.html#types.SimpleNamespace
"""
argv = None
def __init__(self, d):
if isinstance(d, list):
self.argv = d
elif isinstance(d, dict):
self.__dict__.update(d)
elif isinstance(d, argparse.Namespace):
self.__dict__.update(vars(d))
elif isinstance(d, Namespace):
self.__dict__.update(d.__dict__)
else:
raise Exception("Unsupported Namespace args: {}".format(d))
def __iter__(self):
if self.argv:
for item in self.argv:
yield item
else:
for key in self.__dict__.keys():
yield key
def __repr__(self):
if self.argv:
return "{}".format(self.argv)
else:
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
if self.argv:
return self.argv == other
else:
return self.__dict__ == other.__dict__
class TFParams(Params):
"""Mix-in class to store namespace-style args and merge w/ SparkML-style params."""
args = None
def merge_args_params(self):
local_args = copy.copy(self.args) # make a local copy of args
args_dict = vars(local_args) # get dictionary view
for p in self.params:
args_dict[p.name] = self.getOrDefault(p.name) # update with params
return local_args
class TFEstimator(Estimator, TFParams, HasInputMapping,
HasClusterSize, HasNumPS, HasInputMode, HasProtocol, HasTensorboard, HasModelDir, HasExportDir, HasTFRecordDir,
HasBatchSize, HasEpochs, HasReaders, HasSteps):
"""Spark ML Estimator which launches a TensorFlowOnSpark cluster for distributed training.
The columns of the DataFrame passed to the ``fit()`` method will be mapped to TensorFlow tensors according to the ``setInputMapping()`` method.
If an ``export_fn`` was provided to the constructor, it will be run on a single executor immediately after the distributed training has completed.
This allows users to export a TensorFlow saved_model with a different execution graph for inferencing, e.g. replacing an input graph of
TFReaders and QueueRunners with Placeholders.
For InputMode.TENSORFLOW, the input DataFrame will be exported as TFRecords to a temporary location specified by the ``tfrecord_dir``.
The TensorFlow application will then be expected to read directly from this location during training. However, if the input DataFrame was
produced by the ``dfutil.loadTFRecords()`` method, i.e. originated from TFRecords on disk, then the `tfrecord_dir` will be set to the
original source location of the TFRecords with the additional export step.
Args:
:train_fn: TensorFlow "main" function for training.
:tf_args: Arguments specific to the TensorFlow "main" function.
:export_fn: TensorFlow function for exporting a saved_model.
"""
train_fn = None
export_fn = None
def __init__(self, train_fn, tf_args, export_fn=None):
super(TFEstimator, self).__init__()
self.train_fn = train_fn
self.export_fn = export_fn
self.args = Namespace(tf_args)
self._setDefault(input_mapping={},
cluster_size=1,
num_ps=0,
driver_ps_nodes=False,
input_mode=TFCluster.InputMode.SPARK,
protocol='grpc',
tensorboard=False,
model_dir=None,
export_dir=None,
tfrecord_dir=None,
batch_size=100,
epochs=1,
readers=1,
steps=1000)
def _fit(self, dataset):
"""Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
"""
sc = SparkContext.getOrCreate()
logging.info("===== 1. train args: {0}".format(self.args))
logging.info("===== 2. train params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. train args + params: {0}".format(local_args))
if local_args.input_mode == TFCluster.InputMode.TENSORFLOW:
if dfutil.isLoadedDF(dataset):
# if just a DataFrame loaded from tfrecords, just point to original source path
logging.info("Loaded DataFrame of TFRecord.")
local_args.tfrecord_dir = dfutil.loadedDF[dataset]
else:
# otherwise, save as tfrecords and point to save path
assert local_args.tfrecord_dir, "Please specify --tfrecord_dir to export DataFrame to TFRecord."
if self.getInputMapping():
# if input mapping provided, filter only required columns before exporting
dataset = dataset.select(self.getInputMapping().keys())
logging.info("Exporting DataFrame {} as TFRecord to: {}".format(dataset.dtypes, local_args.tfrecord_dir))
dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir)
logging.info("Done saving")
tf_args = self.args.argv if self.args.argv else local_args
cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps,
local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes)
if local_args.input_mode == TFCluster.InputMode.SPARK:
# feed data, using a deterministic order for input columns (lexicographic by key)
input_cols = sorted(self.getInputMapping().keys())
cluster.train(dataset.select(input_cols).rdd, local_args.epochs)
cluster.shutdown()
# Run export function, if provided
if self.export_fn:
assert local_args.export_dir, "Export function requires --export_dir to be set"
logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir))
def _export(iterator, fn, args):
single_node_env(args)
fn(args)
# Run on a single exeucutor
sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args))
return self._copyValues(TFModel(self.args))
class TFModel(Model, TFParams,
HasInputMapping, HasOutputMapping,
HasBatchSize,
HasModelDir, HasExportDir, HasSignatureDefKey, HasTagSet):
"""Spark ML Model backed by a TensorFlow model checkpoint/saved_model on disk.
During ``transform()``, each executor will run an independent, single-node instance of TensorFlow in parallel, so the model must fit in memory.
The model/session will be loaded/initialized just once for each Spark Python worker, and the session will be cached for
subsequent tasks/partitions to avoid re-loading the model for each partition.
Args:
:tf_args: Dictionary of arguments specific to TensorFlow "main" function.
"""
def __init__(self, tf_args):
super(TFModel, self).__init__()
self.args = Namespace(tf_args)
self._setDefault(input_mapping={},
output_mapping={},
batch_size=100,
model_dir=None,
export_dir=None,
signature_def_key=None,
tag_set=None)
def _transform(self, dataset):
"""Transforms the input DataFrame by applying the _run_model() mapPartitions function.
Args:
:dataset: A Spark DataFrame for TensorFlow inferencing.
"""
spark = SparkSession.builder.getOrCreate()
# set a deterministic order for input/output columns (lexicographic by key)
input_cols = [ col for col, tensor in sorted(self.getInputMapping().items()) ] # input col => input tensor
output_cols = [ col for tensor, col in sorted(self.getOutputMapping().items()) ] # output tensor => output col
# run single-node inferencing on each executor
logging.info("input_cols: {}".format(input_cols))
logging.info("output_cols: {}".format(output_cols))
# merge args + params
logging.info("===== 1. inference args: {0}".format(self.args))
logging.info("===== 2. inference params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. inference args + params: {0}".format(local_args))
tf_args = self.args.argv if self.args.argv else local_args
rdd_out = dataset.select(input_cols).rdd.mapPartitions(lambda it: _run_model(it, local_args, tf_args))
# convert to a DataFrame-friendly format
rows_out = rdd_out.map(lambda x: Row(*x))
return spark.createDataFrame(rows_out, output_cols)
# global to each python worker process on the executors
global_sess = None # tf.Session cache
global_args = None # args provided to the _run_model() method. Any change will invalidate the global_sess cache.
def _run_model(iterator, args, tf_args):
"""mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
Returns:
An iterator of result data.
"""
single_node_env(tf_args)
logging.info("===== input_mapping: {}".format(args.input_mapping))
logging.info("===== output_mapping: {}".format(args.output_mapping))
input_tensor_names = [ tensor for col,tensor in sorted(args.input_mapping.items()) ]
output_tensor_names = [ tensor for tensor,col in sorted(args.output_mapping.items()) ]
# if using a signature_def_key, get input/output tensor info from the requested signature
if args.signature_def_key:
assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir))
meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
signature = signature_def_utils.get_signature_def_by_key(meta_graph_def, args.signature_def_key)
logging.debug("signature: {}".format(signature))
inputs_tensor_info = signature.inputs
logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info))
outputs_tensor_info = signature.outputs
logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info))
result = []
global global_sess, global_args
if global_sess and global_args == args:
# if graph/session already loaded/started (and using same args), just reuse it
sess = global_sess
else:
# otherwise, create new session and load graph from disk
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
if args.export_dir:
assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
# load graph from a saved_model
logging.info("===== restoring from saved_model: {}".format(args.export_dir))
loader.load(sess, args.tag_set.split(','), args.export_dir)
elif args.model_dir:
# load graph from a checkpoint
ckpt = tf.train.latest_checkpoint(args.model_dir)
assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir)
logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta"))
saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True)
saver.restore(sess, ckpt)
else:
raise Exception("Inferencing requires either --model_dir or --export_dir argument")
global_sess = sess
global_args = args
# get list of input/output tensors (by name)
if args.signature_def_key:
input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]
output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
else:
input_tensors = [t + ':0' for t in input_tensor_names]
output_tensors = [t + ':0' for t in output_tensor_names]
logging.info("input_tensors: {0}".format(input_tensors))
logging.info("output_tensors: {0}".format(output_tensors))
# feed data in batches and return output tensors
for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):
inputs_feed_dict = {}
for i in range(len(input_tensors)):
inputs_feed_dict[input_tensors[i]] = tensors[i]
outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
lengths = [ len(output) for output in outputs ]
input_size = len(tensors[0])
assert all([ l == input_size for l in lengths ]), "Output array sizes {} must match input size: {}".format(lengths, input_size)
python_outputs = [ output.tolist() for output in outputs ] # convert from numpy to standard python types
result.extend(zip(*python_outputs)) # convert to an array of tuples of "output columns"
return result
def single_node_env(args):
"""Sets up environment for a single-node TF session.
Args:
:args: command line arguments as either argparse args or argv list
"""
if isinstance(args, list):
sys.argv = args
elif args.argv:
sys.argv = args.argv
# ensure expanded CLASSPATH w/o glob characters (required for Spark 2.1 + JNI)
if 'HADOOP_PREFIX' in os.environ and 'TFOS_CLASSPATH_UPDATED' not in os.environ:
classpath = os.environ['CLASSPATH']
hadoop_path = os.path.join(os.environ['HADOOP_PREFIX'], 'bin', 'hadoop')
hadoop_classpath = subprocess.check_output([hadoop_path, 'classpath', '--glob']).decode()
logging.debug("CLASSPATH: {0}".format(hadoop_classpath))
os.environ['CLASSPATH'] = classpath + os.pathsep + hadoop_classpath
os.environ['TFOS_CLASSPATH_UPDATED'] = '1'
# reserve GPU, if requested
if tf.test.is_built_with_cuda():
# GPU
num_gpus = args.num_gpus if 'num_gpus' in args else 1
gpus_to_use = gpu_info.get_gpus(num_gpus)
logging.info("Using gpu(s): {0}".format(gpus_to_use))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use
# Note: if there is a GPU conflict (CUDA_ERROR_INVALID_DEVICE), the entire task will fail and retry.
else:
# CPU
logging.info("Using CPU")
os.environ['CUDA_VISIBLE_DEVICES'] = ''
def get_meta_graph_def(saved_model_dir, tag_set):
"""Utility function to read a meta_graph_def from disk.
From `saved_model_cli.py <https://github.com/tensorflow/tensorflow/blob/8e0e8d41a3a8f2d4a6100c2ea1dc9d6c6c4ad382/tensorflow/python/tools/saved_model_cli.py#L186>`_
Args:
:saved_model_dir: path to saved_model.
:tag_set: list of string tags identifying the TensorFlow graph within the saved_model.
Returns:
A TensorFlow meta_graph_def, or raises an Exception otherwise.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError("MetaGraphDef associated with tag-set {0} could not be found in SavedModel".format(tag_set))
def yield_batch(iterable, batch_size, num_tensors=1):
"""Generator that yields batches of a DataFrame iterator.
Args:
:iterable: Spark partition iterator.
:batch_size: number of items to retrieve per invocation.
:num_tensors: number of tensors (columns) expected in each item.
Returns:
An array of ``num_tensors`` arrays, each of length `batch_size`
"""
tensors = [ [] for i in range(num_tensors) ]
for item in iterable:
if item is None:
break
for i in range(num_tensors):
tmp = str(item[i]) if type(item[i]) is bytearray else item[i]
tensors[i].append(tmp)
if len(tensors[0]) >= batch_size:
yield tensors
tensors = [ [] for i in range(num_tensors) ]
if len(tensors[0]) > 0:
yield tensors
|
py | 1a4f5ca4b86a5a24b38d7ff276e8294b9db0f571 | #!/usr/bin/python3
import time
import json
import sys
import random
from neopixel import *
from dynamic_pattern_list_builder import *
# LED strip configuration:
LED_COUNT = 24 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
pulse_on = Color(255, 255, 255)
pulse_off = Color(0, 0, 0)
heartbeat_pulse = 3
heartbeat_gap = 0.07 # gap between beats
# Dictionary containing object positions
patterns = {
'elevation' : [1, 2, 3],
'distance' : [10, 15, 20, 25],
'direction' : [[0, 45, 90, 135, 180, 225, 270, 315],[315, 270, 225, 180, 135, 90, 45, 0],[0, 45, 90, 135, 180, 225, 270, 315]],
'pin_out' : [[0,1,2,3,4,5,6,7],[8,9,10,11,12,13,14,15],[16,17,18,19,20,21,22,23]]
}
# global list declarations and class initialization
dynamicPattern = Dynamic_pattern_list_builder() # class initialization
pat = dynamicPattern.pattern_builder() # dynamic_pattern declaration
randNumList = [] # list of random numbers
visitedPattern = [] # list of visited patterns
dList = [] # list of keys
# create list of dictionary keys
for i in pat:
dList.append(i)
# json handler to read in dictionary of dynamic patterns
#f = open('dynamic_pattern_list.json', 'r')
#fin = json.load(f)
#f.close()
#for i in fin:
# print(fin['dynamic patterns'])
# creates the heartbeat pulse
# handles 10, 15, 20 feet heartbeat patterns
# if 25 feet, creates sonar pulse
def heart_beat(strip, elevation, distance, direction):
pix = patterns.get('pin_out')[elevation-1][direction/45]
beat = 0
if (distance == 10):
beat = 0.300
elif (distance == 15):
beat = 0.650
elif (distance == 20):
beat = 1.000
elif (distance == 25):
beat = 1.00
heart_gap = 0.5
# sonar pulse for 25 feet
for i in range(heartbeat_pulse):
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heart_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(beat)
# Heartbeat pattern for 10 through 20 feet
for x in range(heartbeat_pulse):
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(beat)
# handler for dynamic patterns
# calls dynamic_pattern_list_builder.py
# randomly selects a dynamic pattern and calls all the beats to simulate that pattern
def dynamic_pattern_handler(strip):
while (len(randNumList) < 23):
rNum = random.randint(0, 22)
while (rNum not in randNumList):
randNumList.append(rNum)
dBeat = dList[rNum]
visitedPattern.append(dBeat)
for dPat in visitedPattern:
print(dPat)
for beat in pat.get(dPat):
elevation = beat[0]
distance = beat[1]
direction = beat[2]
print ('elevation: ' + str(elevation) + ' ' + 'distance: ' + str(distance) + ' ' + 'direction: ' + str(direction))
heart_beat(strip, elevation, distance, direction)
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Initialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
try:
dynamic_pattern_handler(strip)
except KeyboardInterrupt:
colorWipe(strip, Color(0,0,0), 10)
print("Goodbye World")
|
py | 1a4f5d442aafc47133fed6002c94fbe731c31acc | #
# Copyright (c) 2021 Cisco Systems, Inc and its affiliates
# All rights reserved
#
from msxswagger import DocumentationConfig, Security, Sso
from config import Config
from helpers.consul_helper import ConsulHelper
class SwaggerHelper(object):
def __init__(self, config: Config, consul_helper: ConsulHelper):
self._config = config
self._consul_helper = consul_helper
def get_documentation_config(self):
sso_url = self._consul_helper.get_string(
key=f"{self._config.config_prefix}/defaultapplication/swagger.security.sso.baseUrl",
default=self._config.swagger.ssourl)
client_id = self._consul_helper.get_string(
key=f"{self._config.config_prefix}/helloworldservice/public.security.clientId",
default=self._config.swagger.clientid)
return DocumentationConfig(
root_path='/helloworld',
security=Security(
enabled=self._config.swagger.secure,
sso=Sso(base_url=sso_url, client_id=client_id)))
def get_swagger_resource(self):
return self._config.swagger.swaggerjsonpath
|
py | 1a4f5d592536783bffb870bef14f828e219e10bb | def main():
import sys
import signal
import argparse
import json
from edman import DB
from scripts.action import Action
# Ctrl-Cを押下された時の対策
signal.signal(signal.SIGINT, lambda sig, frame: sys.exit('\n'))
# コマンドライン引数処理
parser = argparse.ArgumentParser(description='ドキュメントの項目を修正するスクリプト')
# parser.add_argument('-c', '--collection', help='collection name.')
parser.add_argument('objectid', help='objectid str.')
parser.add_argument('amend_file', type=open, help='JSON file.')
parser.add_argument('structure', help='Select ref or emb.')
parser.add_argument('-i', '--inifile', help='DB connect file path.')
# 引数を付けなかった場合はヘルプを表示して終了する
if len(sys.argv) == 1:
parser.parse_args(["-h"])
sys.exit(0)
args = parser.parse_args()
# 構造はrefかembのどちらか
if not (args.structure == 'ref' or args.structure == 'emb'):
parser.error("structure requires 'ref' or 'emb'.")
try:
# iniファイル読み込み
con = Action.reading_config_file(args.inifile)
# ファイル読み込み
try:
amend_data = json.load(args.amend_file)
except json.JSONDecodeError:
sys.exit(f'File is not json format.')
except IOError:
sys.exit('file read error.')
# DB接続
db = DB(con)
# 対象oidの所属コレクションを自動的に取得 ※動作が遅い場合は使用しないこと
collection = db.find_collection_from_objectid(args.objectid)
# アップデート処理
if db.update(collection, args.objectid, amend_data, args.structure):
print('アップデート成功')
else:
print('アップデート失敗')
except Exception as e:
tb = sys.exc_info()[2]
sys.stderr.write(f'{type(e).__name__}: {e.with_traceback(tb)}\n')
sys.exit(1)
if __name__ == "__main__":
main() |
py | 1a4f5d70a8577a0a9705df21ce4d77fd3aae1605 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The context of mindspore, used to configure the current execution environment,
includes the execution mode, execution backend and other feature switches.
"""
import json
import os
import time
import threading
from collections import namedtuple
from types import FunctionType
from mindspore import log as logger
from mindspore._c_expression import MSContext, ms_ctx_param
from mindspore._checkparam import args_type_check, Validator
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
_reset_auto_parallel_context
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context
from .default_config import __device_target__, __package_name__
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
'get_ps_context', 'reset_ps_context']
GRAPH_MODE = 0
PYNATIVE_MODE = 1
_DEVICE_APP_MEMORY_SIZE = 31 # The max memory size of graph plus variable.
_re_pattern = r'[1-9][0-9]*(\.)?[0-9]*GB|0\.[0-9]*GB'
_k_context = None
def _make_directory(path):
"""Make directory."""
real_path = None
if path is None or not isinstance(path, str) or path.strip() == "":
raise ValueError(f"Input path `{path}` is invalid type")
# convert the relative paths
path = os.path.realpath(path)
logger.debug("The absolute path is %r", path)
# check whether the path is already existed and has written permissions
if os.path.exists(path):
real_path = path
else:
# All exceptions need to be caught because create directory maybe have some limit(permissions)
logger.debug("The directory(%s) doesn't exist, will create it", path)
try:
os.makedirs(path)
real_path = path
except PermissionError as e:
logger.error(f"No write permission on the directory `{path}, error = {e}")
raise ValueError(f"No write permission on the directory `{path}`.")
return real_path
def _get_print_file_name(file_name):
"""Add timestamp suffix to file name. Rename the file name: file_name + "." + time(seconds)."""
time_second = str(int(time.time()))
file_name = file_name + "." + time_second
if os.path.exists(file_name):
ValueError("This file {} already exists.".format(file_name))
return file_name
class _ThreadLocalInfo(threading.local):
"""
Thread local Info used for store thread local attributes.
"""
def __init__(self):
super(_ThreadLocalInfo, self).__init__()
self._reserve_class_name_in_scope = True
@property
def reserve_class_name_in_scope(self):
"""Gets whether to save the network class name in the scope."""
return self._reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Sets whether to save the network class name in the scope."""
if not isinstance(reserve_class_name_in_scope, bool):
raise ValueError(
"Set reserve_class_name_in_scope value must be bool!")
self._reserve_class_name_in_scope = reserve_class_name_in_scope
_ContextRecord = namedtuple(
"_ContextRecord", ["is_pynative_mode", "switch_context_fn"])
class _ContextSwitchInfo(threading.local):
"""
Record of context switch information.
Args:
is_pynative (bool): Whether to adopt the PyNative mode.
"""
def __init__(self, is_pynative):
super(_ContextSwitchInfo, self).__init__()
self.context_stack = []
if is_pynative:
self.push(True, None)
def push(self, is_pynative, switch_context_fn):
"""
Push a context switch record onto the stack.
Args:
is_pynative (bool): Whether context switch to PyNative mode.
switch_context_fn (Function): A callable that executes the context switch.
"""
if isinstance(switch_context_fn, FunctionType):
switch_context_fn()
self.context_stack.append(
_ContextRecord(is_pynative, switch_context_fn))
def pop(self):
self.context_stack.pop()
class _Context:
"""
_Context is the environment in which operations are executed
Note:
Create a context through instantiating Context object is not recommended.
should use context() to get the context since Context is singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._thread_local_info = _ThreadLocalInfo()
self._context_switches = _ContextSwitchInfo(True)
self._context_handle = MSContext.get_instance()
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def __getattribute__(self, attr):
value = object.__getattribute__(self, attr)
if attr == "_context_handle" and value is None:
raise ValueError("Context handle is none in context!!!")
return value
def get_param(self, param):
return self._context_handle.get_param(param)
def set_param(self, param, value):
self._context_handle.set_param(param, value)
def set_mode(self, mode):
"""
Switch between Graph mode and PyNative mode.
Args:
mode (int): GRAPH_MODE or PYNATIVE_MODE.
"""
if mode == PYNATIVE_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("vm")
self._context_switches.push(True, None)
elif mode == GRAPH_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("ge")
self._context_switches.push(False, None)
else:
raise ValueError(f'The execution mode {mode} is invalid!')
self.set_param(ms_ctx_param.mode, mode)
def set_backend_policy(self, policy):
success = self._context_handle.set_backend_policy(policy)
if not success:
raise RuntimeError("Backend policy must be one of ge, vm, ms.")
def set_save_graphs_path(self, save_graphs_path):
self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
def set_device_target(self, target):
valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
if not target in valid_targets:
raise ValueError(f"Target device name {target} is invalid! It must be one of {valid_targets}")
if target == "Davinci":
target = "Ascend"
self.set_param(ms_ctx_param.device_target, target)
if self.enable_debug_runtime and target == "CPU":
self.set_backend_policy("vm")
def set_auto_tune_mode(self, tune_mode):
candidate = ["NO_TUNE", "RL", "GA", "RL,GA", "GA,RL"]
if tune_mode in candidate:
self.set_param(ms_ctx_param.tune_mode, tune_mode)
else:
raise ValueError(f"Tune mode must be in ['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}")
def set_device_id(self, device_id):
if device_id < 0 or device_id > 4095:
raise ValueError(f"Device id must be in [0, 4095], but got {device_id}")
self.set_param(ms_ctx_param.device_id, device_id)
def set_max_call_depth(self, max_call_depth):
if max_call_depth <= 0:
raise ValueError(f"Max call depth must be greater than 0, but got {max_call_depth}")
self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
def set_profiling_options(self, option):
if not isinstance(option, str):
raise TypeError("The parameter option must be str.")
self.set_param(ms_ctx_param.profiling_options, option)
def set_variable_memory_max_size(self, variable_memory_max_size):
"""set values of variable_memory_max_size and graph_memory_max_size"""
if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
raise ValueError("Context param variable_memory_max_size should be in correct format! Such as \"5GB\"")
if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
raise ValueError("Context param variable_memory_max_size should be not greater than 31GB.")
variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
# pylint: disable=protected-access
self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)
def set_max_device_memory(self, max_device_memory):
if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
max_device_memory_value = float(max_device_memory[:-2])
if max_device_memory_value == 0:
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
def set_print_file_path(self, file_path):
"""Add timestamp suffix to file name. Sets print file path."""
print_file_path = os.path.realpath(file_path)
if os.path.isdir(print_file_path):
raise IOError("Print_file_path should be file path, but got {}.".format(file_path))
if os.path.exists(print_file_path):
_path, _file_name = os.path.split(print_file_path)
path = _make_directory(_path)
file_name = _get_print_file_name(_file_name)
full_file_name = os.path.join(path, file_name)
else:
full_file_name = print_file_path
self.set_param(ms_ctx_param.print_file_path, full_file_name)
def set_env_config_path(self, env_config_path):
"""Check and set env_config_path."""
if not self._context_handle.enable_dump_ir():
raise ValueError("The 'env_config_path' is not supported, please enable ENABLE_DUMP_IR "
"with '-D on' and recompile source.")
env_config_path = os.path.realpath(env_config_path)
if not os.path.isfile(env_config_path):
raise ValueError("The %r set by 'env_config_path' should be an existing json file." % env_config_path)
try:
with open(env_config_path, 'r') as f:
json.load(f)
except (TypeError, ValueError) as exo:
raise ValueError("The %r set by 'env_config_path' should be a json file. "
"Detail: %s." % (env_config_path, str(exo)))
self.set_param(ms_ctx_param.env_config_path, env_config_path)
setters = {
'mode': set_mode,
'save_graphs_path': set_save_graphs_path,
'device_target': set_device_target,
'device_id': set_device_id,
'auto_tune_mode': set_auto_tune_mode,
'max_call_depth': set_max_call_depth,
'profiling_options': set_profiling_options,
'variable_memory_max_size': set_variable_memory_max_size,
'max_device_memory': set_max_device_memory,
'print_file_path': set_print_file_path,
'env_config_path': set_env_config_path
}
@property
def reserve_class_name_in_scope(self):
"""Gets whether to save the network class name in the scope."""
return self._thread_local_info.reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Sets whether to save the network class name in the scope."""
self._thread_local_info.reserve_class_name_in_scope = reserve_class_name_in_scope
@property
def enable_ge(self):
return self._context_handle.get_backend_policy() == 'ge'
@property
def enable_debug_runtime(self):
return self._thread_local_info.debug_runtime
@enable_debug_runtime.setter
def enable_debug_runtime(self, enable):
thread_info = self._thread_local_info
thread_info.debug_runtime = enable
def _context():
"""
Get the global _context, if context is not created, create a new one.
Returns:
_Context, the global context in PyNative mode.
"""
global _k_context
if _k_context is None:
default_backend = 'debug'
try:
from mindspore import default_config
default_backend = default_config.__backend__
except ImportError:
logger.error("import default config fail")
_k_context = _Context()
_k_context.enable_debug_runtime = False
if default_backend == 'debug':
_k_context.enable_debug_runtime = True
default_backend = 'vm'
_k_context.set_backend_policy(default_backend)
return _k_context
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
auto_parallel_search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
all_reduce_fusion_config=list, pipeline_stages=int)
def set_auto_parallel_context(**kwargs):
r"""
Set auto parallel context, which is valid only for Ascend and GPU target.
Auto parallel context should be configured before the initialization of your network.
Note:
Attribute name is required for setting attributes.
If a program has tasks with different parallel modes, then before setting new parallel mode for the
next task, interface mindspore.context.reset_auto_parallel_context() needs to be called to reset
the configuration.
Setting or changing parallel modes must be called before any creating Initializer, otherwise,
RuntimeError may be raised when compiling the network.
Some configurations are parallel mode specific, see the below table for details:
=========================== ===========================
Common AUTO_PARALLEL
=========================== ===========================
device_num gradient_fp32_sync
global_rank loss_repeated_mean
gradients_mean auto_parallel_search_mode
parallel_mode strategy_ckpt_load_file
all_reduce_fusion_config strategy_ckpt_save_file
enable_parallel_optimizer full_batch
\ pipeline_stages
=========================== ===========================
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
gradients_mean (bool): Whether to perform mean operator after allreduce of gradients.
"stand_alone" do not support gradients_mean. Default: False.
gradient_fp32_sync (bool): Run allreduce of gradients in fp32.
"stand_alone", "data_parallel" and "hybrid_parallel" do not support
gradient_fp32_sync. Default: True.
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Default: "stand_alone".
- stand_alone: Only one processor is working.
- data_parallel: Distributes the data across different processors.
- hybrid_parallel: Achieves data parallelism and model parallelism manually.
- semi_auto_parallel: Achieves data parallelism and model parallelism by
setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
auto_parallel_search_mode (str): There are two kinds of shard strategy search modes, "recursive_programming"
and "dynamic_programming". Default: "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
the same network initialization parameter values for all devices, broadcast the parameters
on device 0 to other devices. Parameter broadcasting in different parallel modes is different,
data_parallel mode, all parameters are broadcast except for the parameter whose attribute
layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
segmented parameters do not participate in broadcasting. Default: False.
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
should be set with True. Default: False.
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
`Lamb` and `AdamWeightDecay` in Ascend . Default: False.
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices. Only support ReduceOp.SUM
and HCCL_WORLD_GROUP/NCCL_WORLD_GROUP. No Default, if it is not set, the fusion is closed.
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how
the devices are distributed alone the pipeline. The total devices will be divided into
'pipeline_stags' stages. This currently could only be used when
parallel mode semi_auto_parallel is enabled. Default: 1.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(full_batch=True)
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> context.set_auto_parallel_context(pipeline_stages=2)
"""
_set_auto_parallel_context(**kwargs)
def get_auto_parallel_context(attr_key):
"""
Gets auto parallel context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
return _get_auto_parallel_context(attr_key)
def reset_auto_parallel_context():
"""
Reset auto parallel context attributes to the default values:
- device_num: 1.
- global_rank: 0.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: 'stand_alone'.
- auto_parallel_search_mode: 'dynamic_programming'.
- parameter_broadcast: False.
- strategy_ckpt_load_file: ''.
- strategy_ckpt_save_file: ''.
- full_batch: False.
- enable_parallel_optimizer: False.
- pipeline_stages: 1.
"""
_reset_auto_parallel_context()
def _check_target_specific_cfgs(device, arg_key):
"""Checking whether a config is suitable for a specified device"""
device_cfgs = {
'enable_auto_mixed_precision': ['Ascend'],
'enable_dump': ['Ascend'],
'save_dump_path': ['Ascend'],
'enable_graph_kernel': ['Ascend', 'GPU'],
'enable_reduce_precision': ['Ascend'],
'enable_profiling': ['Ascend'],
'profiling_options': ['Ascend'],
'print_file_path': ['Ascend'],
'variable_memory_max_size': ['Ascend'],
'auto_tune_mode': ['Ascend'],
'max_device_memory': ['GPU']
}
# configs not in map device_cfgs are supposed to be suitable for all devices
if not arg_key in device_cfgs:
return True
supported_devices = device_cfgs[arg_key]
if device in supported_devices:
return True
logger.warning(f"Config '{arg_key}' only supports devices in {supported_devices}, current device is '{device}'"
", ignore it.")
return False
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
enable_profiling=bool, profiling_options=str, enable_auto_mixed_precision=bool,
enable_graph_kernel=bool, check_bprop=bool, max_device_memory=str, print_file_path=str,
enable_sparse=bool, max_call_depth=int, env_config_path=str)
def set_context(**kwargs):
"""
Sets context for running environment.
Context should be configured before running your program. If there is no configuration,
the "Ascend" device target will be used by default. GRAPH_MODE or
PYNATIVE_MODE can be set by `mode` attribute and both modes support all backends, default
mode is PYNATIVE_MODE.
When the `save_graphs` attribute is set to True, attribute of `save_graphs_path` is used to set the
intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
For other configurations and arguments, please refer to the corresponding module
description, the configuration is optional and can be enabled when needed.
Note:
Attribute name is required for setting attributes.
The mode is not recommended to be changed after net was initialized because the implementations of some
operations are different in graph mode and pynative mode. Default: PYNATIVE_MODE.
Some configurations are device specific, see the below table for details:
=========================== =========================== =================
Common(CPU/GPU/Ascend) Ascend GPU
=========================== =========================== =================
check_bprop print_file_path max_device_memory
device_id enable_dump enable_graph_kernel
device_target save_dump_path
enable_sparse enable_graph_kernel
max_call_depth enable_reduce_precision
mode enable_profiling
reserve_class_name_in_scope profiling_options
save_graphs variable_memory_max_size
save_graphs_path auto_tune_mode
env_config_path
grad_for_scalar
=========================== =========================== =================
Args:
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: PYNATIVE_MODE(1).
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU". Default: "Ascend".
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should be no more than 4096. Default: 0.
save_graphs (bool): Whether to save graphs. Default: False.
save_graphs_path (str): Path to save graphs. Default: ".".
If the program is executed in the parallel mode, `save_graphs_path` should consist of the path and the
current device id, to ensure that writing file conflicts won't happen when the different processes try to
create the files in the same directory. For example, the `device_id` can be generated by
`device_id = os.getenv("DEVICE_ID")` and the `save_graphs_path` can be set by
`context.set_context(save_graphs_path="path/to/ir/files"+device_id)`.
enable_graph_kernel (bool): Whether to enable composition of basic primitives. These primitives would be
compiled into a fused kernel automatically. Default: False.
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
enable_reduce_precision (bool): Whether to enable precision reduction. Default: True.
enable_dump (bool): Whether to enable dump. Default: False.
save_dump_path (str): When the program is executed on Ascend, operators can dump data in this path.
The root dump path is configured in /home/HwHiAiUser/ide_daemon/ide_daemon.cfg.
So the real dump path is "{configured root dump path}/{`save_dump_path`}". Default: ".".
variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "0GB".
enable_profiling (bool): Whether to open profiling. Default: False.
profiling_options (str): Set profiling collection options, operators can profiling data here.
The values of profiling collection options are as follows, supporting the collection of multiple data.
- output: the saving the path of the profiling collection result file. The directory spectified by this
parameter needs to be created in advance on the training environment (container or host side) and ensure
that the running user configured during installation has read and write permissions.It supports the
configuration of absolute or relative paths(relative to the current path when executing the command line).
The absolute path configuration starts with '/', for example:/home/data/output.
The relative path configuration directly starts with the directory name,for example:output.
- training_trace: collect iterative trajectory data, that is, the training task and software information of
the AI software stack, to achieve performance analysis of the training task, focusing on data
enhancement, forward and backward calculation, gradient aggregation update and other related data.
The value is on/off.
- task_trace: collect task trajectory data, that is, the hardware information of the HWTS/AICore of
the Ascend 910 processor, and analyze the information of beginning and ending of the task.
The value is on/off.
- aicpu: collect profiling data enhanced by aicpu data. The value is on/off.
- fp_point: specify the start position of the forward operator of the training network iteration trajectory,
which is used to record the start timestamp of the forward calculation.The configuration value is the name
of the first operator specified in the forward direction. when the value is empty,the system will
automatically obtain the forward operator name.
- bp_point: specify the end position of the iteration trajectory reversal operator of the training network,
record the end timestamp of the backward calculation. The configuration value is the name of the operator
after the specified reverse. when the value is empty,the system will automatically obtain the backward
operator name.
- aic_metrics: the values are as follows:
ArithmeticUtilization: percentage statistics of various calculation indicators.
PipeUtilization: the time-consuming ratio of calculation unit and handling unit,this item is
the default value.
Memory: percentage of external memory read and write instructions.
MemoryL0: percentage of internal memory read and write instructions.
ResourceConflictRatio: proportion of pipline queue instructions.
The profiling_options is like '{"output":'/home/data/output','training_trace':'on'}'
check_bprop (bool): Whether to check bprop. Default: False.
max_device_memory (str): Sets the maximum memory available for devices.
Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
a file by default, and turns off printing to the screen. If the file already exists, add a timestamp
suffix to the file. Default: ''.
enable_sparse (bool): Whether to enable sparsity feature. Default: False.
max_call_depth (int): Specify the maximum depth of function call. Default: 1000.
env_config_path (str): Config path for DFX.
auto_tune_mode (str): The mode of auto tune when op building, get the best tiling performance,
default: NO_TUNE. The value must be in ['RL', 'GA', 'RL,GA'].
RL: rl_tune;
GA: ga_tune;
RL,GA: rl_tune/ga_tune(Automatic selection).
- rl_tune: Reinforecement Learning tune.
- ga_tune: Genetic Algorithm tune.
grad_for_scalar (bool): Whether to get gradient for scalar. Default: False.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.set_context(mode=context.GRAPH_MODE)
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(mode=context.GRAPH_MODE,
... device_target="Ascend",device_id=0, save_graphs=True,
... save_graphs_path="/mindspore")
>>> context.set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
"""
ctx = _context()
# set device target first
if 'device_target' in kwargs:
ctx.set_device_target(kwargs['device_target'])
device = ctx.get_param(ms_ctx_param.device_target)
if not device.lower() in __device_target__:
raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
f"but got device target {device}")
device = ctx.get_param(ms_ctx_param.device_target)
for key, value in kwargs.items():
if not _check_target_specific_cfgs(device, key):
continue
if hasattr(ctx, key):
setattr(ctx, key, value)
continue
if key in ctx.setters:
ctx.setters[key](ctx, value)
continue
# enum variables beginning with '_' are for internal use
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError("Set context keyword %s is not recognized!" % key)
def get_context(attr_key):
"""
Gets context attribute value according to the input key.
Args:
attr_key (str): The key of the attribute.
Returns:
Object, The value of given attribute key.
Raises:
ValueError: If input key is not an attribute in context.
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
_ = _check_target_specific_cfgs(device, attr_key)
if hasattr(ctx, attr_key):
return getattr(ctx, attr_key)
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError("Get context keyword %s is not recognized!" % attr_key)
class ParallelMode:
"""
Parallel mode options.
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
- STAND_ALONE: Only one processor is working.
- DATA_PARALLEL: Distributes the data across different processors.
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
- AUTO_PARALLEL: Achieves parallelism automatically.
MODE_LIST: The list of all supported parallel modes.
"""
STAND_ALONE = "stand_alone"
DATA_PARALLEL = "data_parallel"
HYBRID_PARALLEL = "hybrid_parallel"
SEMI_AUTO_PARALLEL = "semi_auto_parallel"
AUTO_PARALLEL = "auto_parallel"
MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
@args_type_check(enable_ps=bool)
def set_ps_context(**kwargs):
"""
Set parameter server training mode context.
Note:
Some other environment variables should also be set for parameter server training mode.
These environment variables are listed below:
MS_SERVER_NUM # Server number
MS_WORKER_NUM # Worker number
MS_SCHED_HOST # Scheduler IP address
MS_SCHED_PORT # Scheduler port
MS_ROLE # The role of this process:
MS_SCHED #represents the scheduler,
MS_WORKER #represents the worker,
MS_PSERVER #represents the Server
Args:
enable_ps (bool): Whether to enable parameter server training mode.
Only after enable_ps is set True, the environment variables will be effective.
Default: False.
Raises:
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True)
"""
_set_ps_context(**kwargs)
def get_ps_context(attr_key):
"""
Get parameter server training mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
return _get_ps_context(attr_key)
def reset_ps_context():
"""
Reset parameter server training mode context attributes to the default values:
- enable_ps: False.
"""
_reset_ps_context()
|
py | 1a4f5f493a2c097db92b06ec5eb0ab3ade07811e | from dataclasses import dataclass
from space_game.domain_names import ObjectId
from space_game.events.Event import Event
@dataclass
class ObjectDeletedEvent(Event):
object_id: ObjectId
|
py | 1a4f5ffb999b7eb97786f15904bd815368af862d | import torchvision.transforms as transforms
import torch
from PIL import Image, ImageOps
import random
import utils.utils2.transforms as local_transforms
"""
As mentioned in http://pytorch.org/docs/master/torchvision/models.html
All pre-trained models expect input images normalized in the same way, i.e. mini-batches
of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least
224. The images have to be loaded in to a range of [0, 1] and then normalized using
ean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
NOTE: transforms.ToTensor() transforms the incoming data to range of [0, 1]. It also
converts [H x W x C] to [C x H x W], which is expected by PyTorch models.
"""
# For now we will use PyTorch model zoo models
pytorch_zoo_normaliser = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# inception_normaliser = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
class MyRandomCrop(object):
def __init__(self, size):
"""
This is a variant of torchvision's RandomCrop. This one pads image only if
the image is smaller than the intended size. Image will be padded to the
right and bottom.
:param size: tuple (width, height)
"""
self.size = size
def __call__(self, img):
width, height = img.size
target_width, target_height = self.size
pad_width = 0
pad_height = 0
do_padding = False
if width < target_width:
pad_width = target_width - width
do_padding = True
if height < target_height:
pad_height = target_height - height
do_padding = True
#
pad = (0, 0, pad_width, pad_height)
if do_padding:
img = ImageOps.expand(img, border=pad, fill=0)
width, height = img.size
if width == target_width and height == target_height:
return img
x1 = random.randint(0, width - target_width)
y1 = random.randint(0, height - target_height)
return img.crop((x1, y1, x1 + target_width, y1 + target_height))
def get_transformer_crop(crop_img_size, # 224 or more expected by PyTorch model zoo
scale_img_size,
normaliser = pytorch_zoo_normaliser,
do_augment=False):
if do_augment:
# This is a augmented transformation,
return transforms.Compose([transforms.Scale(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.RandomHorizontalFlip(),
local_transforms.ColorJitter(0.4, 0.4, 0.4, 0),
# TODO - Add more transformations
transforms.ToTensor(),
normaliser])
else:
# This is a vanilla transformation
return transforms.Compose([transforms.Scale(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.ToTensor(),
normaliser])
def get_transformer(img_size, # 224 or more expected by PyTorch model zoo
normaliser = pytorch_zoo_normaliser,
do_augment=False):
if do_augment:
# This is a augmented transformation,
return transforms.Compose([transforms.Scale((img_size, img_size)),
transforms.RandomHorizontalFlip(),
local_transforms.ColorJitter(0.4, 0.4, 0.4, 0),
transforms.ToTensor(),
normaliser])
else:
# This is a vanilla transformation
return transforms.Compose([transforms.Scale((img_size, img_size)),
transforms.ToTensor(),
normaliser])
def get_test_valid_transformer_crop(crop_img_size,
scale_img_size,
normaliser=pytorch_zoo_normaliser):
"""Transformation for Validation and Test set"""
# TODO, implement TTA
# NOTE: With the below logic, one might want to do multiple inference on the same
# image, because there is some randomness, we do not know how big the image is
return transforms.Compose([transforms.Resize(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.ToTensor(),
normaliser])
def get_test_valid_transformer(img_size,
normaliser=pytorch_zoo_normaliser):
"""Transformation for Validation and Test set"""
# TODO, implement TTA
# NOTE: With the below logic, one might want to do multiple inference on the same
# image, because there is some randomness, we do not know how big the image is
return transforms.Compose([transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
normaliser]) |
py | 1a4f61be74eb492e5dd53a057b59e57888585bab | import unittest
import endurox as e
import exutils as u
class TestTpencrypt(unittest.TestCase):
# Test data encryption
def test_tpencrypt_ok(self):
w = u.NdrxStopwatch()
while w.get_delta_sec() < u.test_duratation():
# binary data:
buf=e.tpencrypt(b'\x00\x01\xff')
self.assertNotEqual(buf, b'\x00\x01\xff')
buf_org=e.tpdecrypt(buf)
self.assertEqual(buf_org, b'\x00\x01\xff')
# string based:
buf=e.tpencrypt("HELLO WORLD")
self.assertNotEqual(buf, "HELLO WORLD")
buf_org=e.tpdecrypt(buf)
self.assertEqual(buf_org, "HELLO WORLD")
if __name__ == '__main__':
unittest.main()
|
py | 1a4f61df6c6bb31263db7736d58be7e032f0a3b6 | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
try:
from datadog_api_client.v1.model import widget_sort
except ImportError:
widget_sort = sys.modules[
'datadog_api_client.v1.model.widget_sort']
from datadog_api_client.v1.model.widget_field_sort import WidgetFieldSort
class TestWidgetFieldSort(unittest.TestCase):
"""WidgetFieldSort unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWidgetFieldSort(self):
"""Test WidgetFieldSort"""
# FIXME: construct object with mandatory attributes with example values
# model = WidgetFieldSort() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4f634abe7fae1fa03fdf0ca0ebfea8a298ec3d | import time, datetime
class Sensor():
def __init__(self, bme680):
self.sensor = bme680.BME680();
def initialise(self,bme680):
self.sensor.set_humidity_oversample(bme680.OS_2X)
self.sensor.set_pressure_oversample(bme680.OS_4X)
self.sensor.set_temperature_oversample(bme680.OS_8X)
self.sensor.set_filter(bme680.FILTER_SIZE_3)
self.sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
self.sensor.set_gas_heater_temperature(320)
self.sensor.set_gas_heater_duration(150)
self.sensor.select_gas_heater_profile(0)
def getGasSensorBaseline(self):
start_time = time.time()
curr_time = time.time()
# takes 5 minutes to complete
burn_in_time = 300
burn_in_data = []
print("Collecting gas resistance burn-in data for 5 mins\n")
while curr_time - start_time < burn_in_time:
curr_time = time.time()
if self.sensor.get_sensor_data() and self.sensor.data.heat_stable:
gas = self.sensor.data.gas_resistance
burn_in_data.append(gas)
print("Gas: {0} Ohms".format(gas))
time.sleep(1)
gas_baseline = sum(burn_in_data[-50:]) / 50.0
#print("Gas baseline: {0} Ohms, humidity baseline: {1:.2f} %RH\n".format(gas_baseline, hum_baseline))
return gas_baseline;
def getAirQualityScore(self, gas_baseline):
gas = self.sensor.data.gas_resistance
gas_offset = gas_baseline - gas
# Set the humidity baseline to 40%, an optimal indoor humidity.
hum_baseline = 40.0
# This sets the balance between humidity and gas reading in the
# calculation of air_quality_score (25:75, humidity:gas)
hum_weighting = 0.25
hum = self.sensor.data.humidity
hum_offset = hum - hum_baseline
# Calculate hum_score as the distance from the hum_baseline.
if hum_offset > 0:
hum_score = (100 - hum_baseline - hum_offset) / (100 - hum_baseline) * (hum_weighting * 100)
else:
hum_score = (hum_baseline + hum_offset) / hum_baseline * (hum_weighting * 100)
# Calculate gas_score as the distance from the gas_baseline.
if gas_offset > 0:
gas_score = (gas / gas_baseline) * (100 - (hum_weighting * 100))
else:
gas_score = 100 - (hum_weighting * 100)
# Calculate air_quality_score
air_quality_score = hum_score + gas_score
return air_quality_score;
def getData(self, gas_baseline):
data_dict = {};
output = '';
if self.sensor.get_sensor_data() and self.sensor.data.heat_stable:
data_dict['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
data_dict['temperature'] = self.sensor.data.temperature;
data_dict['pressure'] = self.sensor.data.pressure;
data_dict['humidity'] = self.sensor.data.humidity;
data_dict['airq'] = self.getAirQualityScore(gas_baseline);
else:
data_dict['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
data_dict['temperature'] = 0;
data_dict['pressure'] = 0;
data_dict['humidity'] = 0;
data_dict['airq'] = 0;
return data_dict;
|
py | 1a4f635cb82f9242f8e43535c8ee03e58db81582 | # Generated by Django 2.2.7 on 2019-12-20 20:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('complaints', '0003_auto_20191221_0102'),
]
operations = [
migrations.AlterField(
model_name='complaint',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 21, 1, 49, 40, 3999)),
),
migrations.AlterField(
model_name='complaint',
name='status',
field=models.BooleanField(default=True),
),
]
|
py | 1a4f6484b4de730fdaacf485c96d934951154836 | import numpy as np
import pyqtgraph as pg
from scipy import signal
from acconeer_utils.clients.reg.client import RegClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = get_base_config()
config.sensor = args.sensors
client.setup_session(config)
pg_updater = PGUpdater(config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = PresenceDetectionProcessor(config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_base_config():
config = configs.IQServiceConfig()
config.range_interval = [0.4, 0.8]
config.sweep_rate = 60
config.gain = 0.6
return config
class PresenceDetectionProcessor:
def __init__(self, config):
self.config = config
# Settings
n_dft = 15 # Data length for frequency estimation [s] | 20
t_freq_est = 0.5 # Time between frequency estimations [s] | 2
tau_iq = 0.04 # Time constant low-pass filter on IQ-data [s] | 0.04
self.f_s = self.config.sweep_rate # Time constant low-pass filter on IQ-data [s] | 150
self.D = 124 # Spatial or Range down sampling factor | 124
self.f_low = 0.1 # Lowest frequency of interest [Hz] | 0.1
self.f_high = 1.0 # Highest frequency of interest [Hz] | 1
self.M = int(self.f_s / 10) # Time down sampling for DFT | 40 f_s/M ~ 10 Hz
self.lambda_p = 40 # Threshold: spectral peak to noise ratio [1] | 50
self.lamda_05 = 6 # Threshold: ratio fundamental and half harmonic
self.interpolate = True # Interpolation between DFT points
self.delta_f = 1 / n_dft
self.dft_f_vec = np.arange(self.f_low, self.f_high, self.delta_f)
self.dft_points = np.size(self.dft_f_vec)
# Butterworth bandpass filter
f_n = self.f_s / 2
v_low = self.f_low / f_n
v_high = self.f_high / f_n
self.b, self.a = signal.butter(4, [v_low, v_high], btype="bandpass")
# Exponential lowpass filter
self.alpha_iq = np.exp(-2 / (self.f_s * tau_iq))
self.alpha_phi = np.exp(-2 * self.f_low / self.f_s)
# Parameter init
self.sweeps_in_block = int(np.ceil(n_dft * self.f_s))
self.new_sweeps_per_results = int(np.ceil(t_freq_est * self.f_s))
self.phi_vec = np.zeros((self.sweeps_in_block, 1))
self.f_est_vec = np.zeros(1)
self.f_dft_est_vec = np.zeros(1)
self.snr_vec = 0
self.sweep_index = 0
def process(self, sweep):
if self.sweep_index == 0:
delay_points = int(np.ceil(np.size(sweep) / self.D))
self.data_s_d_mat = np.zeros((self.sweeps_in_block, delay_points), dtype="complex")
self.data_s_d_mat[self.sweep_index, :] = self.downsample(sweep, self.D)
out_data = None
elif self.sweep_index < self.sweeps_in_block:
self.data_s_d_mat[self.sweep_index, :] = self.iq_lp_filter_time(
self.data_s_d_mat[self.sweep_index - 1, :],
self.downsample(sweep, self.D)
)
temp_phi = self.unwrap_phase(
self.phi_vec[self.sweep_index - 1],
self.data_s_d_mat[self.sweep_index, :],
self.data_s_d_mat[self.sweep_index - 1, :]
)
self.phi_vec[self.sweep_index] = self.unwrap_phase(
self.phi_vec[self.sweep_index - 1],
self.data_s_d_mat[self.sweep_index, :],
self.data_s_d_mat[self.sweep_index - 1, :]
)
phi_filt = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)
out_data = {
"phi_raw": self.phi_vec,
"phi_filt": phi_filt,
"power_spectrum": np.zeros(self.dft_points),
"x_dft": np.linspace(self.f_low, self.f_high, self.dft_points),
"f_dft_est_hist": self.f_dft_est_vec,
"f_est_hist": self.f_est_vec,
"f_dft_est": 0,
"f_est": 0,
"f_low": self.f_low,
"f_high": self.f_high,
"snr": 0,
"lambda_p": self.lambda_p,
"dist_range": self.config.range_interval,
"init_progress": round(100 * self.sweep_index / self.sweeps_in_block),
}
else:
# Lowpass filter IQ data downsampled in distance points
self.data_s_d_mat = np.roll(self.data_s_d_mat, -1, axis=0)
self.data_s_d_mat[-1, :] = self.iq_lp_filter_time(
self.data_s_d_mat[-1, :],
self.downsample(sweep, self.D)
)
# Phase unwrapping of IQ data
temp_phi = self.unwrap_phase(
self.phi_vec[-1],
self.data_s_d_mat[-1, :],
self.data_s_d_mat[-2, :]
)
self.phi_vec = np.roll(self.phi_vec, -1, axis=0)
self.phi_vec[-1] = temp_phi
if np.mod(self.sweep_index, self.new_sweeps_per_results - 1) == 0:
# Bandpass filter unwrapped data
phi_filt_vec = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)
P, dft_est, _ = self.dft(self.downsample(phi_filt_vec, self.M))
f_breath_est, _, snr, _ = self.breath_freq_est(P)
self.f_est_vec = np.append(self.f_est_vec, f_breath_est)
self.f_dft_est_vec = np.append(self.f_dft_est_vec, dft_est)
self.snr_vec = np.append(self.snr_vec, snr)
out_data = {
"phi_raw": self.phi_vec,
"phi_filt": phi_filt_vec,
"power_spectrum": P,
"x_dft": np.linspace(self.f_low, self.f_high, self.dft_points),
"f_dft_est_hist": self.f_dft_est_vec,
"f_est_hist": self.f_est_vec,
"f_dft_est": dft_est,
"f_est": f_breath_est,
"f_low": self.f_low,
"f_high": self.f_high,
"snr": snr,
"lambda_p": self.lambda_p,
"dist_range": self.config.range_interval,
"init_progress": None,
}
else:
out_data = None
self.sweep_index += 1
return out_data
def downsample(self, data, n):
return data[::n]
def iq_lp_filter_time(self, state, new_data):
return self.alpha_iq * state + (1 - self.alpha_iq) * new_data
def unwrap_phase(self, phase_lp, data_1, data_2):
return phase_lp * self.alpha_phi + np.angle(np.mean(data_2 * np.conjugate(data_1)))
def dft(self, data):
data = np.squeeze(data)
n_vec = np.arange(data.size) * self.M
dft = np.exp((2j * np.pi / self.f_s) * np.outer(self.dft_f_vec, n_vec))
P = np.square(np.abs(np.matmul(dft, data)))
idx_f = np.argmax(P)
dft_est = self.dft_f_vec[idx_f]
return P, dft_est, P[idx_f]
def noise_est(self, P):
return np.mean(np.sort(P)[:(self.dft_points//2)-1])
def half_peak_frequency(self, P, f_est):
idx_half = int(f_est / (2 * self.delta_f))
if idx_half < self.f_low:
return 0
else:
return (1 / self.delta_f) * (
(self.dft_f_vec[idx_half+1] - f_est / 2) * P[idx_half]
+ (f_est/2 - self.dft_f_vec[idx_half]) * P[idx_half + 1]
)
def breath_freq_est(self, P):
f_idx = np.argmax(P)
P_peak = P[f_idx]
if self.interpolate:
f_est, P_peak = self.freq_quad_interpolation(P)
else:
f_est = self.dft_f_vec[f_idx]
P_half = self.half_peak_frequency(P, f_est)
if (P_peak < self.lamda_05 * P_half):
f_est = f_est / 2
P_peak = P_half
if self.f_low < f_est < self.f_high and P_peak > self.lambda_p*self.noise_est(P):
f_est_valid = True
else:
f_est_valid = False
f_est = 0
snr = P_peak / self.noise_est(P)
return f_est, P_peak, snr, f_est_valid
def freq_quad_interpolation(self, P):
f_idx = np.argmax(P)
if 0 < f_idx < P.size and P.size > 3:
f_est = self.dft_f_vec[f_idx] \
+ self.delta_f / 2 * (
(np.log(P[f_idx+1])-np.log(P[f_idx-1]))
/ (2*np.log(P[f_idx]) - np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
)
P_peak = P[f_idx] + np.exp(
1/8 * np.square(np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
/ (2*np.log(P[f_idx]) - np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
)
if not (self.f_low < f_est < self.f_high):
f_est = 0
else:
f_est = 0
P_peak = 0
return f_est, P_peak
class PGUpdater:
def __init__(self, config):
self.config = config
def setup(self, win):
win.resize(800, 600)
win.setWindowTitle("Acconeer sleep breathing estimation example")
phi_title = "Breathing motion (detection range: {} m to {} m)" \
.format(*self.config.range_interval)
self.phi_plot = win.addPlot(title=phi_title)
self.phi_plot.showGrid(x=True, y=True)
self.phi_plot.setLabel("left", "Amplitude")
self.phi_plot.setLabel("bottom", "Samples")
self.phi_plot.addLegend()
self.filt_phi_curve = self.phi_plot.plot(
pen=example_utils.pg_pen_cycler(0),
name="Filtered",
)
self.raw_phi_curve = self.phi_plot.plot(
pen=example_utils.pg_pen_cycler(1),
name="Raw",
)
win.nextRow()
self.spect_plot = win.addPlot(title="Power spectrum")
self.spect_plot.showGrid(x=True, y=True)
self.spect_plot.setLabel("left", "Power")
self.spect_plot.setLabel("bottom", "Frequency (Hz)")
self.spect_curve = self.spect_plot.plot(pen=example_utils.pg_pen_cycler(1))
self.spect_smax = example_utils.SmoothMax(self.config.sweep_rate / 15)
self.spect_dft_inf_line = pg.InfiniteLine(pen=example_utils.pg_pen_cycler(1, "--"))
self.spect_plot.addItem(self.spect_dft_inf_line)
self.spect_est_inf_line = pg.InfiniteLine(pen=example_utils.pg_pen_cycler(0, "--"))
self.spect_plot.addItem(self.spect_est_inf_line)
self.spect_plot.setXRange(0, 1)
self.spect_plot.setYRange(0, 1)
self.spect_text_item = pg.TextItem("Initiating...", anchor=(0.5, 0.5), color="k")
self.spect_text_item.setPos(0.5, 0.5)
self.spect_plot.addItem(self.spect_text_item)
win.nextRow()
self.fest_plot = win.addPlot(title="Breathing estimation history")
self.fest_plot.showGrid(x=True, y=True)
self.fest_plot.setLabel("left", "Frequency (Hz)")
self.fest_plot.setLabel("bottom", "Samples")
self.fest_plot.addLegend()
self.fest_curve = self.fest_plot.plot(
pen=example_utils.pg_pen_cycler(0),
name="Breathing est.",
)
self.fest_dft_curve = self.fest_plot.plot(
pen=example_utils.pg_pen_cycler(1),
name="DFT est.",
)
self.fest_plot.setXRange(0, 1)
self.fest_plot.setYRange(0, 1.2)
self.fest_text_item = pg.TextItem(anchor=(0, 0), color="k")
self.fest_text_item.setPos(0, 1.2)
self.fest_plot.addItem(self.fest_text_item)
def update(self, data):
self.filt_phi_curve.setData(np.squeeze(data["phi_filt"]))
self.raw_phi_curve.setData(np.squeeze(data["phi_raw"]))
if data["init_progress"] is not None:
self.spect_text_item.setText("Initiating: {} %".format(data["init_progress"]))
else:
snr = data["snr"]
if snr == 0:
s = "SNR: N/A | {:.0f} dB".format(10*np.log10(data["lambda_p"]))
else:
fmt = "SNR: {:.0f} | {:.0f} dB"
s = fmt.format(10*np.log10(snr), 10*np.log10(data["lambda_p"]))
self.spect_text_item.setText(s)
self.spect_text_item.setAnchor((0, 1))
self.spect_text_item.setPos(0, 0)
f_est = data["f_est"]
if f_est > 0:
s = "Latest frequency estimate: {:.2f} Hz | {:.0f} BPM".format(f_est, f_est*60)
self.fest_text_item.setText(s)
self.fest_plot.enableAutoRange(x=True)
self.spect_curve.setData(data["x_dft"], data["power_spectrum"])
self.spect_dft_inf_line.setValue(data["f_dft_est"])
self.spect_est_inf_line.setValue(data["f_est"])
self.spect_plot.setYRange(0, self.spect_smax.update(np.amax(data["power_spectrum"])))
self.fest_curve.setData(np.squeeze(data["f_est_hist"]))
self.fest_dft_curve.setData(np.squeeze(data["f_dft_est_hist"]))
if __name__ == "__main__":
main()
|
py | 1a4f64a55c4b723893963c37621ec5467eb7c517 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-06-10 03:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0002_remove_project_language'),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=0)),
('usability', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')])),
('content', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')])),
('score', models.FloatField(blank=True, default=0)),
('design_average', models.FloatField(blank=True, default=0)),
('usability_average', models.FloatField(blank=True, default=0)),
('content_average', models.FloatField(blank=True, default=0)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='reviews.Project')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rater', to=settings.AUTH_USER_MODEL)),
],
),
]
|
gyp | 1a4f65334ec781232d1feddf396aa307beafd5f2 | {
'targets': [
{
'target_name': 'csound-api',
'include_dirs': [
'<!(node --eval "require(\'nan\')")'
],
'sources': [
'src/csound-api.cc'
],
'conditions': [
['OS == "mac"', {
# When creating an Xcode project, this lets Xcode find headers.
'include_dirs': [
'/usr/local/include'
],
'libraries': [
'/usr/local/lib/libcsnd6.6.0.dylib'
],
# This is needed so Boost can find the <atomic> header.
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++'
],
'MACOSX_DEPLOYMENT_TARGET': '10.7'
}
}],
['OS == "linux"', {
'libraries': [
'-lcsound64'
]
}],
['OS == "win"', {
'defines': [
# This is needed due to the issue described at
# https://connect.microsoft.com/VisualStudio/feedback/details/1892487
'_ENABLE_ATOMIC_ALIGNMENT_FIX',
# Prevent min and max macros from being defined in windows.h.
'NOMINMAX'
],
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1 # /EHsc
}
}
}]
]
}
]
}
|
py | 1a4f65998d960bc5290f56aada647f83b3d7d2e2 | """Authors: Cody Baker and Ben Dichter."""
from pathlib import Path
from datetime import datetime
from typing import Optional
import spikeextractors as se
from pynwb import NWBHDF5IO
from nwb_conversion_tools import NWBConverter, CEDRecordingInterface
from nwb_conversion_tools.utils.spike_interface import write_recording
from .cedstimulusinterface import CEDStimulusInterface
def quick_write(
ced_file_path: str,
session_description: str,
session_start: str,
save_path: str,
sorting: Optional[se.SortingExtractor] = None,
recording_lfp: Optional[se.RecordingExtractor] = None,
overwrite: bool = False,
):
"""Automatically extracts required session info from ced_file_path and writes NWBFile in spikeextractors."""
ced_file_path = Path(ced_file_path)
session_id = ced_file_path.stem
nwbfile_kwargs = dict(
session_description=session_description,
session_start_time=session_start.astimezone(),
session_id=session_id,
)
if sorting is not None:
se.NwbSortingExtractor.write_sorting(
sorting=sorting,
save_path=save_path,
overwrite=overwrite,
skip_properties=["mda_max_channel"],
skip_features=["waveforms"],
**nwbfile_kwargs
)
if recording_lfp is not None:
write_recording(recording=recording_lfp, save_path=save_path, write_as="lfp")
class CEDNWBConverter(NWBConverter):
data_interface_classes = dict(
CEDRecording=CEDRecordingInterface, CEDStimulus=CEDStimulusInterface
)
def __init__(self, source_data):
channel_info = self.data_interface_classes[
"CEDRecording"
].RX.get_all_channels_info(source_data["CEDRecording"]["file_path"])
rhd_channels = []
stim_channels = []
for ch, info in channel_info.items():
if "Rhd" in info["title"]:
rhd_channels.append(ch)
if info["title"] in ["CED_Mech", "MechTTL", "Laser"]:
stim_channels.append(ch)
source_data["CEDRecording"].update(smrx_channel_ids=rhd_channels)
source_data["CEDStimulus"].update(smrx_channel_ids=stim_channels)
super().__init__(source_data)
def get_metadata(self):
metadata = super().get_metadata()
smrx_file_path = Path(
self.data_interface_objects["CEDRecording"].source_data["file_path"]
)
session_id = smrx_file_path.stem
metadata["NWBFile"].update(
institution="EMBL - Heidelberg", lab="Mease", session_id=session_id
)
return metadata
|
py | 1a4f66488995ef0445f036da62ae4b6b704cdda2 | import numpy as np
import math
import torch.nn as nn
from .utils import unetConv2, unetUp, conv2DBatchNormRelu, conv2DBatchNorm
import torch
import torch.nn.functional as F
from models.layers.grid_attention_layer import GridAttentionBlock2D_TORR as AttentionBlock2D
from models.networks_other import init_weights
class sononet_grid_attention(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, in_channels=3, is_batchnorm=True, n_convs=None,
nonlocal_mode='concatenation', aggregation_mode='concat'):
super(sononet_grid_attention, self).__init__()
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
self.n_classes= n_classes
self.aggregation_mode = aggregation_mode
self.deep_supervised = True
if n_convs is None:
n_convs = [3, 3, 3, 2, 2]
filters = [64, 128, 256, 512]
filters = [int(x / self.feature_scale) for x in filters]
####################
# Feature Extraction
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm, n=n_convs[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm, n=n_convs[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm, n=n_convs[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm, n=n_convs[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
self.conv5 = unetConv2(filters[3], filters[3], self.is_batchnorm, n=n_convs[4])
################
# Attention Maps
self.compatibility_score1 = AttentionBlock2D(in_channels=filters[2], gating_channels=filters[3],
inter_channels=filters[3], sub_sample_factor=(1,1),
mode=nonlocal_mode, use_W=False, use_phi=True,
use_theta=True, use_psi=True, nonlinearity1='relu')
self.compatibility_score2 = AttentionBlock2D(in_channels=filters[3], gating_channels=filters[3],
inter_channels=filters[3], sub_sample_factor=(1,1),
mode=nonlocal_mode, use_W=False, use_phi=True,
use_theta=True, use_psi=True, nonlinearity1='relu')
#########################
# Aggreagation Strategies
self.attention_filter_sizes = [filters[2], filters[3]]
if aggregation_mode == 'concat':
self.classifier = nn.Linear(filters[2]+filters[3]+filters[3], n_classes)
self.aggregate = self.aggreagation_concat
else:
self.classifier1 = nn.Linear(filters[2], n_classes)
self.classifier2 = nn.Linear(filters[3], n_classes)
self.classifier3 = nn.Linear(filters[3], n_classes)
self.classifiers = [self.classifier1, self.classifier2, self.classifier3]
if aggregation_mode == 'mean':
self.aggregate = self.aggregation_sep
elif aggregation_mode == 'deep_sup':
self.classifier = nn.Linear(filters[2] + filters[3] + filters[3], n_classes)
self.aggregate = self.aggregation_ds
elif aggregation_mode == 'ft':
self.classifier = nn.Linear(n_classes*3, n_classes)
self.aggregate = self.aggregation_ft
else:
raise NotImplementedError
####################
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm2d):
init_weights(m, init_type='kaiming')
def aggregation_sep(self, *attended_maps):
return [ clf(att) for clf, att in zip(self.classifiers, attended_maps) ]
def aggregation_ft(self, *attended_maps):
preds = self.aggregation_sep(*attended_maps)
return self.classifier(torch.cat(preds, dim=1))
def aggregation_ds(self, *attended_maps):
preds_sep = self.aggregation_sep(*attended_maps)
pred = self.aggregation_concat(*attended_maps)
return [pred] + preds_sep
def aggregation_concat(self, *attended_maps):
return self.classifier(torch.cat(attended_maps, dim=1))
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
conv5 = self.conv5(maxpool4)
batch_size = inputs.shape[0]
pooled = F.adaptive_avg_pool2d(conv5, (1, 1)).view(batch_size, -1)
# Attention Mechanism
g_conv1, att1 = self.compatibility_score1(conv3, conv5)
g_conv2, att2 = self.compatibility_score2(conv4, conv5)
# flatten to get single feature vector
fsizes = self.attention_filter_sizes
g1 = torch.sum(g_conv1.view(batch_size, fsizes[0], -1), dim=-1)
g2 = torch.sum(g_conv2.view(batch_size, fsizes[1], -1), dim=-1)
return self.aggregate(g1, g2, pooled)
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
|
py | 1a4f6653ee15b618482dbd09c5c2b99c5bcc89a9 | """ Unit tests for the Driver base class."""
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, IndepVarComp, Group, ExplicitComponent
from openmdao.devtools.testutil import assert_rel_error
from openmdao.test_suite.components.sellar import SellarDerivatives
from openmdao.test_suite.components.simple_comps import DoubleArrayComp, NonSquareArrayComp
class TestDriver(unittest.TestCase):
def test_basic_get(self):
prob = Problem()
prob.model = model = SellarDerivatives()
model.add_design_var('z')
model.add_objective('obj')
model.add_constraint('con1', lower=0)
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_driver()
designvars = prob.driver.get_design_var_values()
self.assertEqual(designvars['pz.z'][0], 5.0 )
designvars = prob.driver.get_objective_values()
self.assertEqual(designvars['obj_cmp.obj'], prob['obj'] )
designvars = prob.driver.get_constraint_values()
self.assertEqual(designvars['con_cmp1.con1'], prob['con1'] )
def test_scaled_design_vars(self):
prob = Problem()
prob.model = model = SellarDerivatives()
model.add_design_var('z', ref=5.0, ref0=3.0)
model.add_objective('obj')
model.add_constraint('con1', lower=0)
prob.set_solver_print(level=0)
prob.setup(check=False)
dv = prob.driver.get_design_var_values()
self.assertEqual(dv['pz.z'][0], 1.0)
self.assertEqual(dv['pz.z'][1], -0.5)
prob.driver.set_design_var('pz.z', np.array((2.0, -2.0)))
self.assertEqual(prob['z'][0], 7.0)
self.assertEqual(prob['z'][1], -1.0)
def test_scaled_constraints(self):
prob = Problem()
prob.model = model = SellarDerivatives()
model.add_design_var('z')
model.add_objective('obj')
model.add_constraint('con1', lower=0, ref=2.0, ref0=3.0)
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
cv = prob.driver.get_constraint_values()['con_cmp1.con1'][0]
base = prob['con1']
self.assertEqual((base-3.0)/(2.0-3.0), cv)
def test_scaled_objectves(self):
prob = Problem()
prob.model = model = SellarDerivatives()
model.add_design_var('z')
model.add_objective('obj', ref=2.0, ref0=3.0)
model.add_constraint('con1', lower=0)
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
cv = prob.driver.get_objective_values()['obj_cmp.obj'][0]
base = prob['obj']
self.assertEqual((base-3.0)/(2.0-3.0), cv)
def test_scaled_derivs(self):
prob = Problem()
prob.model = model = SellarDerivatives()
model.add_design_var('z', ref=2.0, ref0=0.0)
model.add_objective('obj', ref=1.0, ref0=0.0)
model.add_constraint('con1', lower=0, ref=2.0, ref0=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
base = prob.compute_total_derivs(of=['obj', 'con1'], wrt=['z'])
derivs = prob.driver._compute_total_derivs(of=['obj_cmp.obj', 'con_cmp1.con1'], wrt=['pz.z'],
return_format='dict')
assert_rel_error(self, base[('con1', 'z')][0], derivs['con_cmp1.con1']['pz.z'][0], 1e-5)
assert_rel_error(self, base[('obj', 'z')][0]*2.0, derivs['obj_cmp.obj']['pz.z'][0], 1e-5)
def test_vector_scaled_derivs(self):
prob = Problem()
prob.model = model = Group()
model.add_subsystem('px', IndepVarComp(name="x", val=np.ones((2, ))))
comp = model.add_subsystem('comp', DoubleArrayComp())
model.connect('px.x', 'comp.x1')
model.add_design_var('px.x', ref=np.array([2.0, 3.0]), ref0=np.array([0.5, 1.5]))
model.add_objective('comp.y1', ref=np.array([[7.0, 11.0]]), ref0=np.array([5.2, 6.3]))
model.add_constraint('comp.y2', lower=0.0, upper=1.0, ref=np.array([[2.0, 4.0]]), ref0=np.array([1.2, 2.3]))
prob.setup(check=False)
prob.run_driver()
derivs = prob.driver._compute_total_derivs(of=['comp.y1'], wrt=['px.x'],
return_format='dict')
oscale = np.array([1.0/(7.0-5.2), 1.0/(11.0-6.3)])
iscale = np.array([2.0-0.5, 3.0-1.5])
J = comp.JJ[0:2, 0:2]
# doing this manually so that I don't inadvertantly make an error in the vector math in both the code and test.
J[0, 0] *= oscale[0]*iscale[0]
J[0, 1] *= oscale[0]*iscale[1]
J[1, 0] *= oscale[1]*iscale[0]
J[1, 1] *= oscale[1]*iscale[1]
assert_rel_error(self, J, derivs['comp.y1']['px.x'], 1.0e-3)
obj = prob.driver.get_objective_values()
obj_base = np.array([ (prob['comp.y1'][0]-5.2)/(7.0-5.2), (prob['comp.y1'][1]-6.3)/(11.0-6.3) ])
assert_rel_error(self, obj['comp.y1'], obj_base, 1.0e-3)
con = prob.driver.get_constraint_values()
con_base = np.array([ (prob['comp.y2'][0]-1.2)/(2.0-1.2), (prob['comp.y2'][1]-2.3)/(4.0-2.3) ])
assert_rel_error(self, con['comp.y2'], con_base, 1.0e-3)
def test_vector_scaled_derivs_diff_sizes(self):
prob = Problem()
prob.model = model = Group()
model.add_subsystem('px', IndepVarComp(name="x", val=np.ones((2, ))))
comp = model.add_subsystem('comp', NonSquareArrayComp())
model.connect('px.x', 'comp.x1')
model.add_design_var('px.x', ref=np.array([2.0, 3.0]), ref0=np.array([0.5, 1.5]))
model.add_objective('comp.y1', ref=np.array([[7.0, 11.0, 2.0]]), ref0=np.array([5.2, 6.3, 1.2]))
model.add_constraint('comp.y2', lower=0.0, upper=1.0, ref=np.array([[2.0]]), ref0=np.array([1.2]))
prob.setup(check=False)
prob.run_driver()
derivs = prob.driver._compute_total_derivs(of=['comp.y1'], wrt=['px.x'],
return_format='dict')
oscale = np.array([1.0/(7.0-5.2), 1.0/(11.0-6.3), 1.0/(2.0-1.2)])
iscale = np.array([2.0-0.5, 3.0-1.5])
J = comp.JJ[0:3, 0:2]
# doing this manually so that I don't inadvertantly make an error in the vector math in both the code and test.
J[0, 0] *= oscale[0]*iscale[0]
J[0, 1] *= oscale[0]*iscale[1]
J[1, 0] *= oscale[1]*iscale[0]
J[1, 1] *= oscale[1]*iscale[1]
J[2, 0] *= oscale[2]*iscale[0]
J[2, 1] *= oscale[2]*iscale[1]
assert_rel_error(self, J, derivs['comp.y1']['px.x'], 1.0e-3)
obj = prob.driver.get_objective_values()
obj_base = np.array([ (prob['comp.y1'][0]-5.2)/(7.0-5.2), (prob['comp.y1'][1]-6.3)/(11.0-6.3), (prob['comp.y1'][2]-1.2)/(2.0-1.2) ])
assert_rel_error(self, obj['comp.y1'], obj_base, 1.0e-3)
con = prob.driver.get_constraint_values()
con_base = np.array([ (prob['comp.y2'][0]-1.2)/(2.0-1.2)])
assert_rel_error(self, con['comp.y2'], con_base, 1.0e-3)
if __name__ == "__main__":
unittest.main()
|
py | 1a4f666300684f7f1b769d235e02ea26513a0c7a | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
)
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
|
py | 1a4f67558140a4623f4bf36b372f9501e05ea75e | # Copyright (c) 2019 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import ast
import builtins # To check against functions that are built-in in Python.
import math # Imported here so it can be used easily by the setting functions.
import uuid # Imported here so it can be used easily by the setting functions.
import base64 # Imported here so it can be used easily by the setting functions.
import hashlib # Imported here so it can be used easily by the setting functions.
from types import CodeType
from typing import Any, Callable, Dict, FrozenSet, NamedTuple, Optional, Set, TYPE_CHECKING
from UM.Settings.Interfaces import ContainerInterface
from UM.Settings.PropertyEvaluationContext import PropertyEvaluationContext
from UM.Logger import Logger
if TYPE_CHECKING:
from typing import FrozenSet
class IllegalMethodError(Exception):
pass
def _debug_value(value: Any) -> Any:
Logger.log("d", "Setting Function: %s", value)
return value
#
# This class is used to evaluate Python codes (or you can call them formulas) for a setting's property. If a setting's
# property is a static type, e.g., a string, an int, a float, etc., its value will just be interpreted as it is, but
# when it's a Python code (formula), the value needs to be evaluated via this class.
#
class SettingFunction:
## Constructor.
#
# \param code The Python code this function should evaluate.
def __init__(self, expression: str) -> None:
super().__init__()
self._code = expression
# Keys of all settings that are referenced to in this function.
self._used_keys = frozenset() # type: FrozenSet[str]
self._used_values = frozenset() # type: FrozenSet[str]
self._compiled = None # type: Optional[CodeType] #Actually an Optional['code'] object, but Python doesn't properly expose this 'code' object via any library.
self._valid = False # type: bool
try:
tree = ast.parse(self._code, "eval")
result = _SettingExpressionVisitor().visit(tree)
self._used_keys = frozenset(result.keys)
self._used_values = frozenset(result.values)
self._compiled = compile(self._code, repr(self), "eval")
self._valid = True
except (SyntaxError, TypeError) as e:
Logger.log("e", "Parse error in function ({1}) for setting: {0}".format(str(e), self._code))
except IllegalMethodError as e:
Logger.log("e", "Use of illegal method {0} in function ({1}) for setting".format(str(e), self._code))
except Exception as e:
Logger.log("e", "Exception in function ({0}) for setting: {1}".format(str(e), self._code))
## Call the actual function to calculate the value.
#
# \param value_provider The container from which to get setting values in
# the formula.
def __call__(self, value_provider: ContainerInterface, context: Optional[PropertyEvaluationContext] = None) -> Any:
if not value_provider:
return None
if not self._valid:
return None
locals = {} # type: Dict[str, Any]
# if there is a context, evaluate the values from the perspective of the original caller
if context is not None:
value_provider = context.rootStack()
for name in self._used_values:
value = value_provider.getProperty(name, "value", context)
if value is None:
continue
locals[name] = value
g = {} # type: Dict[str, Any]
g.update(globals())
g.update(self.__operators)
# override operators if there is any in the context
if context is not None:
g.update(context.context.get("override_operators", {}))
try:
if self._compiled:
return eval(self._compiled, g, locals)
Logger.log("e", "An error ocurred evaluating the function {0}.".format(self))
return 0
except Exception as e:
Logger.logException("d", "An exception occurred in inherit function {0}: {1}".format(self, str(e)))
return 0 # Settings may be used in calculations and they need a value
def __eq__(self, other: object) -> bool:
if not isinstance(other, SettingFunction):
return False
return self._code == other._code
def __hash__(self) -> int:
return hash(self._code)
## Returns whether the function is ready to be executed.
#
# \return True if the function is valid, or False if it's not.
def isValid(self) -> bool:
return self._valid
## Retrieve a set of the keys (strings) of all the settings used in this function.
#
# \return A set of the keys (strings) of all the settings used in this functions.
def getUsedSettingKeys(self) -> FrozenSet[str]:
return self._used_keys
def __str__(self) -> str:
return "={0}".format(self._code)
def __repr__(self) -> str:
return "<UM.Settings.SettingFunction (0x{0:x}) ={1} >".format(id(self), self._code)
## To support Pickle
#
# Pickle does not support the compiled code, so instead remove it from the state.
# We can re-compile it later on anyway.
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
del state["_compiled"]
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
self._compiled = compile(self._code, repr(self), "eval")
## Expose a custom function to the code executed by SettingFunction
#
# \param name What identifier to use in the executed code.
# \param operator A callable that implements the actual logic to execute.
@classmethod
def registerOperator(cls, name: str, operator: Callable) -> None:
cls.__operators[name] = operator
_SettingExpressionVisitor._knownNames.add(name)
__operators = {
"debug": _debug_value
}
_VisitResult = NamedTuple("_VisitResult", [("values", Set[str]), ("keys", Set[str])])
# Helper class used to analyze a parsed function.
#
# It walks a Python AST generated from a Python expression. It will analyze the AST and
# produce two sets, one set of "used keys" and one set of "used values". "used keys" are
# setting keys (strings) that are used by the expression, whereas "used values" are
# actual variable references that are needed for the function to be executed.
class _SettingExpressionVisitor(ast.NodeVisitor):
def __init__(self) -> None:
super().__init__()
self.values = set() # type: Set[str]
self.keys = set() # type: Set[str]
def visit(self, node: ast.AST) -> _VisitResult:
super().visit(node)
return _VisitResult(values = self.values, keys = self.keys)
def visit_Name(self, node: ast.Name) -> None: # [CodeStyle: ast.NodeVisitor requires this function name]
if node.id in self._blacklist:
raise IllegalMethodError(node.id)
if node.id not in self._knownNames and node.id not in dir(builtins):
self.values.add(node.id)
self.keys.add(node.id)
## This one is used before Python 3.8 to visit string types.
#
# visit_Str will be marked as deprecated from Python 3.8 and onwards.
def visit_Str(self, node: ast.AST) -> None:
if node.s not in self._knownNames and node.s not in dir(builtins): # type: ignore #AST uses getattr stuff, so ignore type of node.s.
self.keys.add(node.s) # type: ignore
## This one is used on Python 3.8+ to visit string types.
def visit_Constant(self, node: ast.AST) -> None:
if isinstance(node.value, str) and node.value not in self._knownNames and node.value not in dir(builtins): # type: ignore #AST uses getattr stuff, so ignore type of node.value.
self.keys.add(node.value) # type: ignore
_knownNames = {
"math",
"max",
"min",
"debug",
"sum",
"len",
"uuid",
"hashlib",
"base64"
} # type: Set[str]
_blacklist = {
"sys",
"os",
"import",
"__import__",
"eval",
"exec",
"subprocess",
} # type: Set[str]
|
py | 1a4f6803e8d7b5dfbaad1ee0560b6b90bcc0b9fa | import json
import unittest
from linkml.generators.jsonschemagen import JsonSchemaGenerator
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
# reported in https://github.com/linkml/linkml/issues/726
schema_str = """
id: http://example.org
name: issue-726
imports:
- https://w3id.org/linkml/types
prefixes:
x: http://example.org/
default_prefix: x
default_range: string
description: test
classes:
C:
tree_root: true
slots:
- s1
- s2
- s3
- s4
slot_usage:
s1:
equals_string: foo
s3:
equals_number: 32
D:
slots:
- s1
- s2
- s3
- s4
slots:
s1:
description: test slot that can be overridden with specific values
s2:
equals_string: bar
s3:
description: test override for equals_number
range: integer
s4:
equals_number: 7
range: integer
"""
class Issue726ConstCase(TestEnvironmentTestCase):
env = env
def test_jsonschema(self):
gen = JsonSchemaGenerator(schema_str)
output = gen.serialize()
print(output)
js = json.loads(output)
top_props = js['properties']
s1C = top_props['s1']
s2C = top_props['s2']
s3C = top_props['s3']
s4C = top_props['s4']
D = js['$defs']['D']['properties']
s1D = D['s1']
s2D = D['s2']
s3D = D['s3']
s4D = D['s4']
self.assertEqual(s1C['const'], 'foo')
self.assertEqual(s2C['const'], 'bar')
self.assertNotIn('const', s1D)
self.assertEqual(s2D['const'], 'bar')
self.assertEqual(s3C['const'], 32)
self.assertEqual(s4C['const'], 7)
self.assertNotIn('const', s3D)
self.assertEqual(s4D['const'], 7)
if __name__ == '__main__':
unittest.main()
|
py | 1a4f680c1ef960986cafaf88e236707846c2536f | from app import app
app.run(host=app.config.get('APP_HOST', '127.0.0.1'),
port=app.config.get('APP_PORT', 5000),
debug=app.config.get('APP_DEBUG', False)) |
py | 1a4f689d86a93807a72a469c3dcfbb71ff1821ab | from pathlib import Path
import os
import face_recognition
import numpy as np
import pickle
DIRECTORY_SAVE = 'biometric_systems/faces'
def get_features_from_face(frame: np.ndarray, face_locations: list[list]) -> list[float]:
return face_recognition.face_encodings(frame, known_face_locations=face_locations, model='large')[0]
class Faces:
def __init__(self, username: str):
self.username = username
self.__create_dir_if_not_exist()
self.__create_user_file_if_not_exist()
self.pre_defined_faces: list[list[float]] = []
self.__load_faces()
def add(self, new_face_features: list[float]):
self.pre_defined_faces.append(new_face_features)
def verify_user(self, face_cmp: list[float]) -> bool:
return face_recognition.compare_faces(self.pre_defined_faces, face_cmp, tolerance=0.4)
def save_faces(self):
with open(f"{DIRECTORY_SAVE}/{self.username}", 'wb') as file:
pickle.dump(self.pre_defined_faces, file)
def __load_faces(self):
try:
with open(f"{DIRECTORY_SAVE}/{self.username}", 'rb') as file:
self.pre_defined_faces = pickle.load(file)
except EOFError:
print('error')
pass
def __create_user_file_if_not_exist(self):
file = Path(f"{DIRECTORY_SAVE}/{self.username}")
file.touch(exist_ok=True)
@staticmethod
def __create_dir_if_not_exist():
os.makedirs(f"{DIRECTORY_SAVE}/", exist_ok=True)
|
py | 1a4f693150a2266d7dc7417286b55fede97076f9 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: Force10
# OS: SFTOS
# ---------------------------------------------------------------------
# Copyright (C) 2007-2013 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Force10.SFTOS"
pattern_more = r"^--More-- or \(q\)uit"
pattern_unprivileged_prompt = r"^(?P<host>\S+?)>"
pattern_prompt = r"^(?P<host>\S+?)#"
pattern_syntax_error = r"% Invalid input detected at "
pattern_operation_error = r"% Error: "
command_disable_pager = "terminal length 0"
command_super = "enable"
command_enter_config = "configure"
command_leave_config = "exit"
command_save_config = "write memory"
command_submit = "\r"
convert_interface_name = BaseProfile.convert_interface_name_cisco
|
py | 1a4f69abc283c86e6555153fd2f3ade259613f1a | import subprocess
import sys
from gather_targets import gather_targets
targets = gather_targets(sys.argv[1:])
success = 0
failed_targets = list()
for target in targets:
print(f"\n\n~~~~~~~~~~~~~~~~~~ {target} ~~~~~~~~~~~~~~~~~~\n\n")
success += subprocess.call(['dbt','clean', '--profiles-dir','.'])
success += subprocess.call(['dbt','deps', '--profiles-dir','.'])
## this gnarly mess handles dbt's compile behavior, which is to compile everything including the excluded models.
success += subprocess.call(['dbt','run' ,
'--profiles-dir','.',
'--target', target,
'--exclude', f'tag:exclude_{target}',
'--vars', '{"xdb_allow_unsupported_macros":true}'])
success += subprocess.call(['dbt','test',
'--profiles-dir','.',
'--target', target,
'--exclude', f'tag:exclude_{target}', f'tag:exclude_{target}_tests',
'--vars', '{"xdb_allow_unsupported_macros":true}'])
print("\n\nTest unsupported macros throw compilation error...")
## test that excluded models throw a compilation error
exceptions_text = subprocess.Popen(['dbt','run',
'--profiles-dir','.',
'--target', target,
'--models', f'tag:exclude_{target}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, _ = exceptions_text.communicate()
passed = bool(sum([val in out for val in (b'Compilation Error', b'WARNING: Nothing to do',)]))
print("\033[0;32mAnticipated error(s) correctly thrown, exceptions pass.\033[0m" if passed else "\033[0;31mExpected error not thrown!\033[0m")
success += int(not passed)
if success != 0:
failed_targets.append(target)
print(f"\n\033[0;32m All builds and tests successful! Tested against {','.join(targets)}.\033[0m\n" if success == 0 else
f"\n\033[0;31m Builds and/or tests failed :(. Tested against {','.join(failed_targets)}\033[0m\n")
sys.exit(success)
|
py | 1a4f6ae2e8a7fac81ebda1e8a5f8bae3ae763cea | cancerlist = ["PANCANCER"]
input_file1 = []
output_file1 = []
threshold = 0.2
probe_count = 485577
for i in range(0, len(cancerlist)) :
input_file1.append(open(str(threshold) + ".Cutoff.FC.Pvalue." + cancerlist[i] + ".txt", 'r'))
output_file1.append(open(str(threshold) + ".MeaningfulCpGsitesByPvalue0.05.Without.Extreme." + cancerlist[i] + ".txt", 'w'))
input_file1[i].readline()
for j in range(0, probe_count) :
line1 = input_file1[i].readline().split()
site_id = line1.pop(0)
if(line1[0] == "NoSamples" or len(line1) == 1) : continue
if(float(line1[2]) > 0.05) : continue
printline = site_id
for k in range(0, len(line1)) :
printline += "\t" + line1[k]
printline += "\n"
output_file1[i].write(printline)
if(j % 10000 == 0) : print(cancerlist[i] + " %d completed." % j)
|
py | 1a4f6b40350f8bf4541914cc64b4c9ce24fb24d4 | from rest_framework.viewsets import ModelViewSet
from super_moite_moite.models import Categorie, Tache, PointTache, TrackTache
from super_moite_moite.serializers import LogementSerializer, CategorieSerializer, TacheSerializer, \
PointTacheSerializer, TrackTacheSerializer, TrackTacheSerializerSansProfil
class LogementView(ModelViewSet):
serializer_class = LogementSerializer
def get_queryset(self):
return self.request.user.profil.logements.all()
class CategorieView(ModelViewSet):
serializer_class = CategorieSerializer
def get_queryset(self):
return Categorie.objects.filter(logement__habitants=self.request.user.profil)
class TacheView(ModelViewSet):
serializer_class = TacheSerializer
def get_queryset(self):
return Tache.objects.filter(categorie__logement__habitants=self.request.user.profil)
class PointTacheView(ModelViewSet):
serializer_class = PointTacheSerializer
def get_queryset(self):
return PointTache.objects.filter(tache__categorie__logement__habitants=self.request.user.profil)
class TrackTacheView(ModelViewSet):
serializer_class = TrackTacheSerializer
def get_queryset(self):
return TrackTache.objects.filter(tache__categorie__logement__habitants=self.request.user.profil)
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return super().get_serializer_class()
return TrackTacheSerializerSansProfil
def perform_create(self, serializer):
serializer.save(profil=self.request.user.profil)
|
py | 1a4f6bb4b2e035a311133a0d00750966372723be | #!/usr/bin/python
import argparse
import os
import subprocess
class UnsafeName(Exception):
pass
class UnsafeInstallDirectory(Exception):
pass
class UnsafeCmd(Exception):
pass
TEMPLATE = """
### BEGIN INIT INFO
# Provides: GPIO Polling with web reporting
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: GPIO Polling
# Description: GPIO Polling with reporting to house-monitor
### END INIT INFO
#! /bin/bash
# /etc/init.d/PROJECT_NAME
# This init script created by:
# https://github.com/DonGar/github-service-wrapper.git
DAEMON_PATH="PROJECT_PATH"
DAEMON="PROJECT_CMD"
NAME="PROJECT_NAME"
DESC="My daemon description"
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
case "$1" in
start)
printf "%-50s" "Starting $NAME..."
cd $DAEMON_PATH
git pull
nohup ./$DAEMON > /dev/null 2>&1 &
PID=$!
if [ -z $PID ]; then
printf "%s\n" "Fail"
else
echo $PID > $PIDFILE
printf "%s\n" "Ok"
fi
;;
status)
printf "%-50s" "Checking $NAME..."
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Process dead but pidfile exists"
else
echo "Running"
fi
else
printf "%s\n" "Service not running"
fi
;;
stop)
printf "%-50s" "Stopping $NAME"
PID=`cat $PIDFILE`
cd $DAEMON_PATH
if [ -f $PIDFILE ]; then
kill -HUP $PID
printf "%s\n" "Ok"
rm -f $PIDFILE
else
printf "%s\n" "pidfile not found"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: $0 {status|start|stop|restart}"
exit 1
esac
"""
def parse_args():
parser = argparse.ArgumentParser(
description='Install new init script for github project.')
parser.add_argument('--name', help='Service name.')
parser.add_argument('--path',
help='Where the git repo is created (Default: /usr/local/<project>.')
parser.add_argument('url', help='URL to clone the git repo from.')
parser.add_argument('cmd', help='Command to run as the service.')
parser.add_argument('cmd_args', nargs=argparse.REMAINDER)
args = parser.parse_args()
# Discover the default name if there isn't an explicit one.
if args.name is None:
# 'https://github.com/DonGar/github-service-wrapper.git' >
# 'github-service-wrapper.git' ->
# ('github-service-wrapper', '.git') ->
# 'github-service-wrapper'
args.name = os.path.splitext(os.path.basename(args.url))[0]
if args.path is None:
args.path = os.path.join('/usr/local', args.name)
# Turn the install path into an absolute path.
args.path = os.path.abspath(args.path)
args.cmd = [args.cmd] + args.cmd_args
return args
def sanity_check(init_script_name, name, path):
# name should be a simple name with no path elements.
if os.path.dirname(name) != '':
raise UnsafeName('Name should be simple: %s' % name)
# If the init script already exists, ensure we created it previously.
if os.path.exists(init_script_name):
with open(init_script_name, 'r') as init_script:
contents = init_script.read()
# If doesn't contain this string, we didn't creat it.
if contents.find('github-service-wrapper') == -1:
raise UnsafeName('Trying to replace existing %s.' % init_script_name)
def clone_repo(url, path):
# If the git clone target dir exists, or is empty, do a clone.
if not os.path.exists(path) or not os.listdir(path):
subprocess.check_call(['git', 'clone', url, path])
return
# If the dir eixsts and is not a git repo, don't use it. It's dangerous.
if not os.path.exists(os.path.join(path, '.git')):
raise UnsafeInstallDirectory('Install dir is not empty: %s' % path)
# If the dir exists, and is a git repo, see if it points to our URL.
old_url = subprocess.check_output(['git', 'config', 'remote.origin.url'],
cwd=path)
old_url = old_url.strip()
if url != old_url:
raise UnsafeInstallDirectory('Install dir contains a checkout from: %s' %
old_url)
subprocess.check_call(['git', 'pull'], cwd=path)
def install_init_d(init_script_name, name, path, cmd):
# Create the init script by filling in values in TEMPLATE.
template = TEMPLATE
template = template.replace('PROJECT_NAME', name)
template = template.replace('PROJECT_PATH', path)
template = template.replace('PROJECT_CMD', ' '.join(cmd))
# Write out the template.
with open(init_script_name, 'w+') as init_script:
init_script.write(template)
# Make it executable.
os.chmod(init_script_name, 0755)
def sanity_check_cmd(path, cmd):
cmd_name = os.path.join(path, cmd[0])
if not os.path.isfile(cmd_name) or not os.access(cmd_name, os.X_OK):
raise UnsafeCmd('Command not present in checkout: "%s"' % cmd_name)
def main():
args = parse_args()
init_script_name = os.path.join('/etc/init.d/', args.name)
print 'Setting up: %s' % args.name
print ' Cloning from: %s' % args.url
print ' Into: %s' % args.path
print ' Script: %s' % init_script_name
print ' Daemon: %s' % ' '.join(args.cmd)
# Sanity check args
sanity_check(init_script_name, args.name, args.path)
# Setup the new repo.
clone_repo(args.url, args.path)
# Verify that the command exists in the repo.
sanity_check_cmd(args.path, args.cmd)
# Setup the init script, and set it to run.
install_init_d(init_script_name, args.name, args.path, args.cmd)
subprocess.check_call(['update-rc.d', args.name, 'defaults'])
if __name__ == "__main__":
main()
|
py | 1a4f6cef54d45c48a093792e61900d725aa08d9b | import pytest
import time
import json
from swsscommon import swsscommon
CFG_VLAN_SUB_INTF_TABLE_NAME = "VLAN_SUB_INTERFACE"
CFG_PORT_TABLE_NAME = "PORT"
STATE_PORT_TABLE_NAME = "PORT_TABLE"
STATE_INTERFACE_TABLE_NAME = "INTERFACE_TABLE"
APP_INTF_TABLE_NAME = "INTF_TABLE"
ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
ASIC_ROUTE_ENTRY_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY"
ADMIN_STATUS = "admin_status"
class TestSubPortIntf(object):
PHYSICAL_PORT_UNDER_TEST = "Ethernet64"
SUB_PORT_INTERFACE_UNDER_TEST = "Ethernet64.10"
IPV4_ADDR_UNDER_TEST = "10.0.0.33/31"
IPV4_TOME_UNDER_TEST = "10.0.0.33/32"
IPV4_SUBNET_UNDER_TEST = "10.0.0.32/31"
IPV6_ADDR_UNDER_TEST = "fc00::41/126"
IPV6_TOME_UNDER_TEST = "fc00::41/128"
IPV6_SUBNET_UNDER_TEST = "fc00::40/126"
def connect_dbs(self, dvs):
self.config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
self.state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0)
self.appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
self.asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
def set_parent_port_admin_status(self, port_name, status):
fvs = swsscommon.FieldValuePairs([(ADMIN_STATUS, status)])
tbl = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME)
tbl.set(port_name, fvs)
time.sleep(1)
def create_sub_port_intf_profile(self, sub_port_intf_name):
fvs = swsscommon.FieldValuePairs([(ADMIN_STATUS, "up")])
tbl = swsscommon.Table(self.config_db, CFG_VLAN_SUB_INTF_TABLE_NAME)
tbl.set(sub_port_intf_name, fvs)
time.sleep(1)
def add_sub_port_intf_ip_addr(self, sub_port_intf_name, ip_addr):
fvs = swsscommon.FieldValuePairs([("NULL", "NULL")])
tbl = swsscommon.Table(self.config_db, CFG_VLAN_SUB_INTF_TABLE_NAME)
tbl.set(sub_port_intf_name + "|" + ip_addr, fvs)
time.sleep(2)
def set_sub_port_intf_admin_status(self, sub_port_intf_name, status):
fvs = swsscommon.FieldValuePairs([(ADMIN_STATUS, status)])
tbl = swsscommon.Table(self.config_db, CFG_VLAN_SUB_INTF_TABLE_NAME)
tbl.set(sub_port_intf_name, fvs)
time.sleep(1)
def remove_sub_port_intf_profile(self, sub_port_intf_name):
tbl = swsscommon.Table(self.config_db, CFG_VLAN_SUB_INTF_TABLE_NAME)
tbl._del(sub_port_intf_name)
time.sleep(1)
def remove_sub_port_intf_ip_addr(self, sub_port_intf_name, ip_addr):
tbl = swsscommon.Table(self.config_db, CFG_VLAN_SUB_INTF_TABLE_NAME)
tbl._del(sub_port_intf_name + "|" + ip_addr)
time.sleep(1)
def get_oids(self, table):
tbl = swsscommon.Table(self.asic_db, table)
return set(tbl.getKeys())
def get_newly_created_oid(self, table, old_oids):
new_oids = self.get_oids(table)
oid = list(new_oids - old_oids)
assert len(oid) == 1, "Wrong # of newly created oids: %d, expected #: 1." % (len(oid))
return oid[0]
def check_sub_port_intf_key_existence(self, db, table_name, key):
tbl = swsscommon.Table(db, table_name)
keys = tbl.getKeys()
assert key in keys, "Key %s not exist" % (key)
def check_sub_port_intf_fvs(self, db, table_name, key, fv_dict):
tbl = swsscommon.Table(db, table_name)
keys = tbl.getKeys()
assert key in keys
(status, fvs) = tbl.get(key)
assert status == True
assert len(fvs) >= len(fv_dict)
for field, value in fvs:
if field in fv_dict:
assert fv_dict[field] == value, \
"Wrong value for field %s: %s, expected value: %s" % (field, value, fv_dict[field])
def check_sub_port_intf_route_entries(self):
ipv4_ip2me_found = False
ipv4_subnet_found = False
ipv6_ip2me_found = False
ipv6_subnet_found = False
tbl = swsscommon.Table(self.asic_db, ASIC_ROUTE_ENTRY_TABLE)
raw_route_entries = tbl.getKeys()
for raw_route_entry in raw_route_entries:
route_entry = json.loads(raw_route_entry)
if route_entry["dest"] == self.IPV4_TOME_UNDER_TEST:
ipv4_ip2me_found = True
elif route_entry["dest"] == self.IPV4_SUBNET_UNDER_TEST:
ipv4_subnet_found = True
elif route_entry["dest"] == self.IPV6_TOME_UNDER_TEST:
ipv6_ip2me_found = True
elif route_entry["dest"] == self.IPV6_SUBNET_UNDER_TEST:
ipv6_subnet_found = True
assert ipv4_ip2me_found and ipv4_subnet_found and ipv6_ip2me_found and ipv6_subnet_found
def check_sub_port_intf_key_removal(self, db, table_name, key):
tbl = swsscommon.Table(db, table_name)
keys = tbl.getKeys()
assert key not in keys, "Key %s not removed" % (key)
def check_sub_port_intf_route_entries_removal(self, removed_route_entries):
tbl = swsscommon.Table(self.asic_db, ASIC_ROUTE_ENTRY_TABLE)
raw_route_entries = tbl.getKeys()
for raw_route_entry in raw_route_entries:
route_entry = json.loads(raw_route_entry)
assert route_entry["dest"] not in removed_route_entries
def test_sub_port_intf_creation(self, dvs):
self.connect_dbs(dvs)
old_rif_oids = self.get_oids(ASIC_RIF_TABLE)
self.set_parent_port_admin_status(self.PHYSICAL_PORT_UNDER_TEST, "up")
self.create_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
# Verify that sub port interface state ok is pushed to STATE_DB by Intfmgrd
fv_dict = {
"state": "ok",
}
self.check_sub_port_intf_fvs(self.state_db, STATE_PORT_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
# Verify that sub port interface configuration is synced to APPL_DB INTF_TABLE by Intfmgrd
fv_dict = {
ADMIN_STATUS: "up",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
# Verify that a sub port router interface entry is created in ASIC_DB
fv_dict = {
"SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_SUB_PORT",
"SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID": "10",
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict)
# Remove a sub port interface
self.remove_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
def test_sub_port_intf_add_ip_addrs(self, dvs):
self.connect_dbs(dvs)
old_rif_oids = self.get_oids(ASIC_RIF_TABLE)
self.set_parent_port_admin_status(self.PHYSICAL_PORT_UNDER_TEST, "up")
self.create_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
# Verify that ip address state ok is pushed to STATE_DB INTERFACE_TABLE by Intfmgrd
fv_dict = {
"state": "ok",
}
self.check_sub_port_intf_fvs(self.state_db, STATE_INTERFACE_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + "|" + self.IPV4_ADDR_UNDER_TEST, fv_dict)
self.check_sub_port_intf_fvs(self.state_db, STATE_INTERFACE_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + "|" + self.IPV6_ADDR_UNDER_TEST, fv_dict)
# Verify that ip address configuration is synced to APPL_DB INTF_TABLE by Intfmgrd
fv_dict = {
"scope": "global",
"family": "IPv4",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + ":" + self.IPV4_ADDR_UNDER_TEST, fv_dict)
fv_dict["family"] = "IPv6"
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + ":" + self.IPV6_ADDR_UNDER_TEST, fv_dict)
# Verify that an IPv4 ip2me route entry is created in ASIC_DB
# Verify that an IPv4 subnet route entry is created in ASIC_DB
# Verify that an IPv6 ip2me route entry is created in ASIC_DB
# Verify that an IPv6 subnet route entry is created in ASIC_DB
self.check_sub_port_intf_route_entries()
# Remove IP addresses
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
# Remove a sub port interface
self.remove_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
def test_sub_port_intf_admin_status_change(self, dvs):
self.connect_dbs(dvs)
old_rif_oids = self.get_oids(ASIC_RIF_TABLE)
self.set_parent_port_admin_status(self.PHYSICAL_PORT_UNDER_TEST, "up")
self.create_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
fv_dict = {
ADMIN_STATUS: "up",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
fv_dict = {
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict)
# Change sub port interface admin status to down
self.set_sub_port_intf_admin_status(self.SUB_PORT_INTERFACE_UNDER_TEST, "down")
# Verify that sub port interface admin status change is synced to APPL_DB INTF_TABLE by Intfmgrd
fv_dict = {
ADMIN_STATUS: "down",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
# Verify that sub port router interface entry in ASIC_DB has the updated admin status
fv_dict = {
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "false",
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "false",
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict)
# Change sub port interface admin status to up
self.set_sub_port_intf_admin_status(self.SUB_PORT_INTERFACE_UNDER_TEST, "up")
# Verify that sub port interface admin status change is synced to APPL_DB INTF_TABLE by Intfmgrd
fv_dict = {
ADMIN_STATUS: "up",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
# Verify that sub port router interface entry in ASIC_DB has the updated admin status
fv_dict = {
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true",
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict)
# Remove IP addresses
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
# Remove a sub port interface
self.remove_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
def test_sub_port_intf_remove_ip_addrs(self, dvs):
self.connect_dbs(dvs)
old_rif_oids = self.get_oids(ASIC_RIF_TABLE)
self.set_parent_port_admin_status(self.PHYSICAL_PORT_UNDER_TEST, "up")
self.create_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
# Remove IPv4 address
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
# Verify that IPv4 address state ok is removed from STATE_DB INTERFACE_TABLE by Intfmgrd
self.check_sub_port_intf_key_removal(self.state_db, STATE_INTERFACE_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + "|" + self.IPV4_ADDR_UNDER_TEST)
# Verify that IPv4 address configuration is removed from APPL_DB INTF_TABLE by Intfmgrd
self.check_sub_port_intf_key_removal(self.appl_db, APP_INTF_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + ":" + self.IPV4_ADDR_UNDER_TEST)
# Verify that IPv4 subnet route entry is removed from ASIC_DB
# Verify that IPv4 ip2me route entry is removed from ASIC_DB
removed_route_entries = set([self.IPV4_TOME_UNDER_TEST, self.IPV4_SUBNET_UNDER_TEST])
self.check_sub_port_intf_route_entries_removal(removed_route_entries)
# Remove IPv6 address
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
# Verify that IPv6 address state ok is removed from STATE_DB INTERFACE_TABLE by Intfmgrd
self.check_sub_port_intf_key_removal(self.state_db, STATE_INTERFACE_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + "|" + self.IPV6_ADDR_UNDER_TEST)
# Verify that IPv6 address configuration is removed from APPL_DB INTF_TABLE by Intfmgrd
self.check_sub_port_intf_key_removal(self.appl_db, APP_INTF_TABLE_NAME, \
self.SUB_PORT_INTERFACE_UNDER_TEST + ":" + self.IPV6_ADDR_UNDER_TEST)
# Verify that IPv6 subnet route entry is removed from ASIC_DB
# Verify that IPv6 ip2me route entry is removed from ASIC_DB
removed_route_entries.update([self.IPV6_TOME_UNDER_TEST, self.IPV6_SUBNET_UNDER_TEST])
self.check_sub_port_intf_route_entries_removal(removed_route_entries)
# Verify that sub port router interface entry still exists in ASIC_DB
self.check_sub_port_intf_key_existence(self.asic_db, ASIC_RIF_TABLE, rif_oid)
# Remove a sub port interface
self.remove_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
def test_sub_port_intf_removal(self, dvs):
self.connect_dbs(dvs)
old_rif_oids = self.get_oids(ASIC_RIF_TABLE)
self.set_parent_port_admin_status(self.PHYSICAL_PORT_UNDER_TEST, "up")
self.create_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.add_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids)
fv_dict = {
"state": "ok",
}
self.check_sub_port_intf_fvs(self.state_db, STATE_PORT_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
fv_dict = {
ADMIN_STATUS: "up",
}
self.check_sub_port_intf_fvs(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST, fv_dict)
# Remove IP addresses
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV4_ADDR_UNDER_TEST)
self.remove_sub_port_intf_ip_addr(self.SUB_PORT_INTERFACE_UNDER_TEST, self.IPV6_ADDR_UNDER_TEST)
# Remove a sub port interface
self.remove_sub_port_intf_profile(self.SUB_PORT_INTERFACE_UNDER_TEST)
# Verify that sub port interface state ok is removed from STATE_DB by Intfmgrd
self.check_sub_port_intf_key_removal(self.state_db, STATE_PORT_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST)
# Verify that sub port interface configuration is removed from APPL_DB INTF_TABLE by Intfmgrd
self.check_sub_port_intf_key_removal(self.appl_db, APP_INTF_TABLE_NAME, self.SUB_PORT_INTERFACE_UNDER_TEST)
# Verify that sub port router interface entry is removed from ASIC_DB
self.check_sub_port_intf_key_removal(self.asic_db, ASIC_RIF_TABLE, rif_oid)
|
py | 1a4f6cfef24d7f05106c26bb141dbc3bac554763 | # -*- coding: utf-8 -*-
"""These are the exceptions thrown by Ferenda. Any of the python
built-in exceptions may be thrown as well, but exceptions in used
third-party libraries should be wrapped in one of these."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
class FerendaException(Exception):
"""Base class for anything that can go wrong in ferenda."""
class DownloadError(FerendaException):
"""Raised when a download fails in a non-recoverable way."""
class DownloadFileNotFoundError(DownloadError):
"""Raised when we had indication that a particular document should
exist (we have a basefile for it) but on closer examination, it
turns that it doesn't exist after all. This is used when we
can't raise a requests.exceptions.HTTPError 404 error for some
reason."""
pass
class ParseError(FerendaException):
"""Raised when :py:meth:`~ferenda.DocumentRepository.parse` fails in
any way.
"""
class FSMStateError(ParseError):
"""Raised whenever the current state and the current symbol in a
:py:class:`~ferenda.FSMParser` configuration does not have a
defined transition.
"""
class DocumentRemovedError(FerendaException):
"""Raised whenever a particular document has been found to be removed
-- this can happen either during
:py:meth:`~ferenda.DocumentRepository.download` or
:py:meth:`~ferenda.DocumentRepository.parse` (which may be the
case if there exists a physical document, but whose contents are
essentially a placeholder saying that the document has been
removed).
You can set the attribute ``dummyfile`` on this exception when
raising it, preferably to the parsed_path that would be created,
if not this exception had occurred.. If present,
``ferenda-build.py`` (or rather :meth:`ferenda.manager.run`) will
use this to create a dummy file at the indicated path. This
prevents endless re-parsing of expired documents.
"""
def __init__(self, value="", dummyfile=None):
super(DocumentRemovedError, self).__init__(value)
self.dummyfile = dummyfile
class DocumentSkippedError(DocumentRemovedError):
"""Raised if the document should not be processed (even though it may
exist) since it's not interesting."""
class DocumentRenamedError(FerendaException):
def __init__(self, value, returnvalue, oldbasefile, newbasefile):
super(DocumentRenamedError, self).__init__(value)
self.returnvalue = returnvalue
self.oldbasefile = oldbasefile
self.newbasefile = newbasefile
class PatchError(ParseError):
"""Raised if a patch cannot be applied by
:py:meth:`~ferenda.DocumentRepository.patch_if_needed`."""
class NoDownloadedFileError(ParseError):
"""Raised on an attempt to parse a basefile for which there doesn't
exist a downloaded file."""
class InvalidTree(ParseError):
"""Raised when the parsed XHTML tree fails internal validation."""
class AttachmentNameError(ValueError):
"""Raised whenever an invalid attachment name is used with any method
of :py:class:`~ferenda.DocumentStore`."""
class AttachmentPolicyError(ValueError):
"""Raised on any attempt to store an attachment using
:py:class:`~ferenda.DocumentStore` when ``storage_policy`` is not
set to ``dir``.
"""
class ArchivingPolicyError(ValueError):
"""Raised when calling archive_path with
:py:meth:`~ferenda.DocumentStore.archiving_policy` not set to ``zip``."""
class DocumentNotFound(FerendaException):
"""Raised whenever an attempt to remove all traces of a particular basefile failed since there were no traces of any such basefile."""
class ArchivingError(FerendaException):
"""Raised whenever an attempt to archive a document version using
:py:meth:`~ferenda.DocumentStore.archive` fails (for example,
because the archive version already exists).
"""
class ValidationError(FerendaException):
"""Raised whenever a created document doesn't validate using the
appropriate schema."""
class TransformError(FerendaException):
"""Raised whenever a XSLT transformation fails for any reason."""
class ExternalCommandError(FerendaException):
"""Raised whenever any invocation of an external commmand fails for
any reason."""
class ExternalCommandNotFound(FerendaException):
"""Raised whenever any invocation of an external commmand fails """
class ConfigurationError(FerendaException):
"""Raised when a configuration file cannot be found in it's expected
location or when it cannot be used due to corruption, file permissions
or other reasons"""
class TriplestoreError(FerendaException):
"""Raised whenever communications with the triple store fails, for
whatever reason."""
class SparqlError(TriplestoreError):
"""Raised whenever a SPARQL query fails. The Exception should contain
whatever error message that the Triple store returned, so the
exact formatting may be dependent on which store is used.
"""
class IndexingError(FerendaException):
"""Raised whenever an attempt to put text into the fulltext index fails."""
class SearchingError(FerendaException):
"""Raised whenever an attempt to do a full-text search fails."""
class SchemaConflictError(FerendaException):
"""Raised whenever a fulltext index is opened with repo arguments that
result in a different schema than what's currently in
use. Workaround this by removing the fulltext index and
recreating.
"""
class SchemaMappingError(FerendaException):
"""Raised whenever a given field in a schema cannot be mapped to or
from the underlying native field object in an actual
fulltextindex store.
"""
class MaxDownloadsReached(FerendaException):
"""Raised whenever a recursive download operation has reached a
globally set maximum number of requests.
"""
pass
class ResourceNotFound(FerendaException):
"""Raised when :py:class:`~ferenda.ResourceLoader` method is called
with the name of a non-existing resource. """
pass
class PDFFileIsEmpty(FerendaException):
"""Raised when
:py:class:`~ferenda.pdfreader.StreamingPDFReader.convert` tries to
parse the textual content of a PDF, but finds that it has no text
information (maybe because it only contains scanned images).
"""
class PDFDecodeError(FerendaException):
"""Raised when a BaseTextDecoder or subclass encounters a problem decoding a non-standard encoding"""
class RequestHandlerError(FerendaException):
"""Raised when :py:class:`~ferenda.RequestHandler` attempts to handle
an incoming request that it thinks it can support, but fails."""
|
py | 1a4f6d04b3328ebb491660fb7bb38843aeb5be30 | # Generated by Django 3.1.5 on 2021-01-09 05:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0003_listing_interestedusers'),
]
operations = [
migrations.AlterField(
model_name='bid',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=19),
),
migrations.AlterField(
model_name='listing',
name='startingBid',
field=models.DecimalField(decimal_places=2, max_digits=19),
),
]
|
py | 1a4f6d8eae8699f972bada89572a8f1267ba502e | import json
import requests
import sys
import time
from argparse import ArgumentParser
from collections import deque
from os.path import isfile
from tabber import Tabber
def _argparse():
arg_parse = ArgumentParser(description="Crawl last.fm for finnish users, given a seed person or a reference to a "
"file containing one seed name per line. Either a seed name or a seed file "
"is required")
arg_parse.add_argument("api-key", type=str,
help="last.fm api key to use for crawling.")
arg_parse.add_argument("-n", "--name", type=str, default=None,
help="Seed name for crawling names")
arg_parse.add_argument("-i", "--input", type=str, default=None,
help="Seed file for crawling. One name per line.")
arg_parse.add_argument("-o", "--output", type=str, default="fi_names.txt",
help="Output file for the names. One name per line.")
return arg_parse
class User:
def __init__(self, api_key, user_name=None, password=None):
self.api_key = api_key
self.user_name = user_name
self.password = password
class Connection:
def __init__(self, user, base_url):
self.user = user
self.base_url = base_url
self.base_time = time.time()
def get(self, payload):
payload["api_key"] = self.user.api_key
payload["format"] = "json"
if self.base_time + 1 > time.time():
time.sleep(1)
self.base_time = time.time()
r = requests.get(self.base_url, params=payload)
sys.stderr.write("{}: Retrieved {}\n".format(r.status_code, r.url))
sys.stderr.flush()
if r.status_code == 200:
return json.loads(r.text)
return None
def get_user_info(conn : Connection, user_name):
return conn.get({"method": "user.getinfo", "user": user_name})
def get_user_friends(conn : Connection, user_name):
fp = conn.get({"method": "user.getfriends", "user": user_name})
if not fp:
return None
pc = int(fp["friends"]["@attr"]["totalPages"])
fl = fp["friends"]["user"]
if pc < 2:
return fl
for i in range(2, pc + 1):
fp = conn.get({"method": "user.getfriends", "user": user_name, "page": str(i)})
if fp:
fl.extend(fp["friends"]["user"])
return fl
def main(conn, entry_points):
fil = len(entry_points)
nfil = 0
people = 0
with Tabber("retrieved lists", "finns", "other") as tabb, open("fi_names.txt", 'w') as out_file:
out_file.write("".join(["{}\n".format(n) for n in entry_points]))
found = entry_points
uq = deque()
uq.extend(entry_points)
while len(uq) > 0 and fil < 100000:
un = uq.popleft()
fl = get_user_friends(conn, un)
people += 1
if not fl:
continue
for fr in fl:
if "country" in fr and fr["country"] == "Finland" and "name" in fr and fr["name"] not in found:
out_file.write("{}\n".format(fr["name"]))
fil += 1
uq.append(fr["name"])
found.add(fr["name"])
elif "name" in fr and fr["name"] not in found:
found.add(fr["name"])
nfil += 1
tabb(people, fil, nfil)
print("Found {} people with country == Finland".format(fil))
print("Found {} people where not country == Finland".format(nfil))
def read_names(seed_file):
with open(seed_file) as in_file:
return {e[:-1] for e in in_file.readlines()}
if __name__ == "__main__":
args = _argparse().parse_args()
assert args.name or args.input, "either seed file or seed string needs to be supplied"
seed = [args.name]
if args.input and isfile(args.input):
seed = read_names(args.input)
main(Connection(User(sys.argv[1]), "http://ws.audioscrobbler.com/2.0/"), seed)
|
py | 1a4f6dae64e77cc8d48bf7444d02243ce537acfb | ###############################################################################
# #
# MeerKAT 1-Million star target list: #
# download_large_query.py #
# #
# Written by Logan A. Pearce (2018) #
# #
###############################################################################
#
# This script queries the Gaia archive in batches of 3M results (the query limit
# without special authorization) for all Gaia targets meeting the data quality filters
# and returns the results as .csv files. Depending on the Gaia server speed, it takes
# around 12 hours to complete as written.
#
# Requires:
# python packages numpy, astroquery
#
# Input:
# none
#
# Output:
# .csv files containing the results of the queries in 3M row batches ordered
# by parallax value descending
#
# Useage:
# download_large_query.py
import numpy as np
from astroquery.gaia import Gaia
import time
start = time.time()
# Initial max parallax value:
para_i = 800
# this is set to 800 because it is larger than the parallax for Proxima Centauri, but smaller than parallaxes
# for the solar system objects in the catalog.
# Max interations: If we get a max of 3M results per query, and we eventually want 32M,
# count_max should be 11 to get 33M (the last one will be truncated)
count_max = 11
count = 0
while count<count_max:
start2 = time.time()
print 'Performing query ',count+1
querystring = "SELECT source_id, ref_epoch, ra, ra_error, dec, dec_error, parallax, parallax_error, parallax_over_error, parallax_error/parallax AS frac_para_error, pmra, pmra_error, pmdec, pmdec_error, astrometric_n_obs_al, astrometric_chi2_al, astrometric_excess_noise, astrometric_excess_noise_sig, visibility_periods_used, phot_g_n_obs, phot_g_mean_flux, phot_g_mean_flux_error, phot_g_mean_flux_over_error, phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag, bp_rp,bp_G,G_rp, radial_velocity,radial_velocity_error, l,b,ecl_lat,ecl_lon,priam_flags, teff_val,teff_percentile_lower,teff_percentile_upper, a_g_val, a_g_percentile_lower,a_g_percentile_upper, radius_val,radius_percentile_upper,radius_percentile_lower, lum_val,lum_percentile_upper,lum_percentile_lower \
FROM gaiadr2.gaia_source \
WHERE parallax < "+str(para_i)+" \
AND parallax_over_error > 20 \
AND phot_g_mean_flux_over_error>50 \
AND phot_rp_mean_flux_over_error>20 \
AND phot_bp_mean_flux_over_error>20 \
AND phot_bp_rp_excess_factor < 1.3+0.06*power(phot_bp_mean_mag-phot_rp_mean_mag,2) \
AND phot_bp_rp_excess_factor > 1.0+0.015*power(phot_bp_mean_mag-phot_rp_mean_mag,2) \
AND visibility_periods_used>=8 AND astrometric_chi2_al/(astrometric_n_good_obs_al-5)<1.44*greatest(1,exp(-0.4*(phot_g_mean_mag-19.5)))\
ORDER BY parallax DESC"
job = Gaia.launch_job_async(query=querystring,\
verbose=False, dump_to_file=True, output_format='csv')
c=job.get_results()
para_i = np.min(c['parallax'])
count=count+1
end2 = time.time()
print 'Took ',(end2 - start2)/60./60.,' hours'
print ''
print job
#jobid = raw_input('Enter Job ID')
#j = Gaia.remove_jobs(str(jobid))
print 'Done.'
end = time.time()
print 'Took ',(end - start)/60./60.,' hours'
|
py | 1a4f6eaaae6037660bf229c0b6ba84efc86638bb | #! /usr/bin/env python
# Author: Abhilash Raj
#
# This is the primary deployment script for the docker-mailman repo. It does
# deployment for stable and rolling releases both. It should be *always* invoked
# and it will make the deployment decision based on the environment variables
# that it sees.
#
# There are two kinds of deploymnets primarily:
# 1. Rolling tags, which are built from each commit. These are typically run
# in CI every day as well. These always update the "rolling" tag in the
# docker registry.
# 2. Stable tags, which are built from git tags with "va.b.c" tags. We don't
# do the tag verification because for now, Circle CI does this for us. We
# will tag and release a stable version whenever the right ENV var is set.
#
# Publishing:
# We are typically publishing all the images to three repositories:
# 1. Docker Hub: This is now rate-limited and might cause issues for people
# pulling too frequently.
# 2. Quay: This is an old registry that we started publishing too, so let's
# continue publishing here too.
# 3. Github Registry: This is the newest one in the mix and supports free
# uploads and downloads (without very strict rate limits like Dockerhub.)
import os
import subprocess
#: Default user, which owns the repositories.
USER = 'maxking'
TAG_VAR = 'CIRCLE_TAG'
BRANCH_VAR = 'CIRCLE_BRANCH'
PRIMARY_BRANCH = 'master'
def tag(original, final):
"""Tag the source image with final tag."""
try:
print('Tagging {0} to {1}'.format(original, final))
subprocess.run(
['docker', 'tag', original, final],
check=True,
)
except subprocess.CalledProcessError:
print('Failed to tag {0}'.format(original))
def login(url):
"""Login to the registry."""
if 'quay' in url.lower():
password = os.environ['QUAY_PASSWORD']
elif 'docker' in url.lower():
password = os.environ['DOCKER_PASSWORD']
elif 'ghcr' in url.lower():
password = os.environ['GITHUB_PASSWORD']
else:
print('Password not found for {0}'.format(url))
return None
print('Logging in to {0}'.format(url))
subprocess.run(
['docker', 'login', '-u', USER, '-p', password, url],
check=True
)
print('Logged in to {0}'.format(url))
def push(image):
"""Push all the images."""
print('Pushing {}'.format(image))
subprocess.run(['docker', 'push', image], check=True)
def tag_and_push(image_names, url, img_tag):
"""Given the URL to repository, tag and push the images."""
# Tag recently built images.
source, final = image_names
tag(source, final)
# Finall, push all the images.
push(final)
def main():
"""Primary entrypoint to this script."""
if os.environ.get(TAG_VAR) not in (None, ''):
img_tag = os.environ.get(TAG_VAR)
elif os.environ.get(BRANCH_VAR) == PRIMARY_BRANCH:
img_tag = 'rolling'
else:
print(f'Not running on master branch or Git tag so not publishing...')
exit(0)
for url in ('quay.io', 'docker.io', 'ghcr.io'):
core = ('maxking/mailman-core:rolling',
'{0}/maxking/mailman-core:{1}'.format(url, img_tag))
web = ('maxking/mailman-web:rolling',
'{0}/maxking/mailman-web:{1}'.format(url, img_tag))
postorius = ('maxking/postorius:rolling',
'{0}/maxking/postorius:{1}'.format(url, img_tag))
try:
login(url)
except subprocess.CalledProcessError:
print('Failed to login to {}'.format(url))
continue
tag_and_push(core, url, img_tag)
tag_and_push(web, url, img_tag)
tag_and_push(postorius, url, img_tag)
if __name__ == '__main__':
main()
|
py | 1a4f6ebe5b8856426cfd140b6f3f7139cb07f23b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import json
import os
import tempfile
import tarfile
import sys
from astropy.extern import six
from astropy.io import fits
from astropy import log
import astropy.units
import astropy.io.votable as votable
from ..query import BaseQuery
from ..utils import commons
from ..utils import async_to_sync
from . import conf
from ..exceptions import TableParseError
from .. import version
from astropy.coordinates.name_resolve import sesame_database
@async_to_sync
class ESASkyClass(BaseQuery):
URLbase = conf.urlBase
TIMEOUT = conf.timeout
DEFAULT_ROW_LIMIT = conf.row_limit
__FITS_STRING = ".fits"
__FTZ_STRING = ".FTZ"
__TAR_STRING = ".tar"
__ALL_STRING = "all"
__CATALOGS_STRING = "catalogs"
__OBSERVATIONS_STRING = "observations"
__MISSION_STRING = "mission"
__TAP_TABLE_STRING = "tapTable"
__TAP_NAME_STRING = "tapName"
__LABEL_STRING = "label"
__METADATA_STRING = "metadata"
__PRODUCT_URL_STRING = "product_url"
__SOURCE_LIMIT_STRING = "sourceLimit"
__POLYGON_NAME_STRING = "polygonNameTapColumn"
__POLYGON_RA_STRING = "polygonRaTapColumn"
__POLYGON_DEC_STRING = "polygonDecTapColumn"
__POS_TAP_STRING = "posTapColumn"
__ORDER_BY_STRING = "orderBy"
__IS_SURVEY_MISSION_STRING = "isSurveyMission"
__ZERO_ARCMIN_STRING = "0 arcmin"
__MIN_RADIUS_CATALOG_STRING = "5 arcsec"
__HERSCHEL_STRING = 'herschel'
__HST_STRING = 'hst'
__INTEGRAL_STRING = 'integral'
__HERSCHEL_FILTERS = {
'psw': '250',
'pmw': '350',
'plw': '500',
'mapb_blue': '70',
'mapb_green': '100',
'mapr_': '160'}
_MAPS_DOWNLOAD_DIR = "Maps"
_isTest = ""
def list_maps(self):
"""
Get a list of the mission names of the available observations in ESASky
"""
return self._json_object_field_to_list(
self._get_observation_json(), self.__MISSION_STRING)
def list_catalogs(self):
"""
Get a list of the mission names of the available catalogs in ESASky
"""
return self._json_object_field_to_list(
self._get_catalogs_json(), self.__MISSION_STRING)
def query_object_maps(self, position, missions=__ALL_STRING,
get_query_payload=False, cache=True):
"""
This method queries a chosen object or coordinate for all available maps
which have observation data on the chosen position. It returns a
TableList with all the found maps metadata for the chosen missions
and object.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
and observations available for the chosen missions and object.
It is structured in a TableList like this:
TableList with 8 tables:
'0:HERSCHEL' with 8 column(s) and 25 row(s)
'1:HST' with 8 column(s) and 735 row(s)
Examples
--------
query_object_maps("m101", "all")
query_object_maps("265.05, 69.0", "Herschel")
query_object_maps("265.05, 69.0", ["Herschel", "HST"])
"""
return self.query_region_maps(position=position,
radius=self.__ZERO_ARCMIN_STRING,
missions=missions,
get_query_payload=get_query_payload,
cache=cache)
def query_object_catalogs(self, position, catalogs=__ALL_STRING,
row_limit=DEFAULT_ROW_LIMIT,
get_query_payload=False, cache=True):
"""
This method queries a chosen object or coordinate for all available
catalogs and returns a TableList with all the found catalogs metadata
for the chosen missions and object. To account for errors in telescope
position, the method will look for any sources within a radius of
5 arcsec of the chosen position.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
catalogs : string or list, optional
Can be either a specific catalog or a list of catalogs (all catalog
names are found in list_catalogs()) or 'all' to search in all
catalogs. Defaults to 'all'.
row_limit : int, optional
Determines how many rows that will be fetched from the database
for each mission. Can be -1 to select maximum (currently 100 000).
Defaults to 10000.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
of the catalogs available for the chosen mission and object.
It is structured in a TableList like this:
TableList with 8 tables:
'0:Gaia DR1 TGA' with 8 column(s) and 25 row(s)
'1:HSC' with 8 column(s) and 75 row(s)
Examples
--------
query_object_catalogs("m101", "all")
query_object_catalogs("265.05, 69.0", "Gaia DR1 TGA")
query_object_catalogs("265.05, 69.0", ["Gaia DR1 TGA", "HSC"])
"""
return self.query_region_catalogs(position=position,
radius=self.__ZERO_ARCMIN_STRING,
catalogs=catalogs,
row_limit=row_limit,
get_query_payload=get_query_payload,
cache=cache)
def query_region_maps(self, position, radius, missions=__ALL_STRING,
get_query_payload=False, cache=True):
"""
This method queries a chosen region for all available maps and returns a
TableList with all the found maps metadata for the chosen missions and
region.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`
The radius of a region.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
and observations available for the chosen missions and region.
It is structured in a TableList like this:
TableList with 8 tables:
'0:HERSCHEL' with 8 column(s) and 25 row(s)
'1:HST' with 8 column(s) and 735 row(s)
Examples
--------
query_region_maps("m101", "14'", "all")
import astropy.units as u
query_region_maps("265.05, 69.0", 14*u.arcmin, "Herschel")
query_region_maps("265.05, 69.0", ["Herschel", "HST"])
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_missions = self._sanitize_input_mission(missions)
query_result = {}
sesame_database.set('simbad')
coordinates = commons.parse_coordinates(sanitized_position)
self._store_query_result_maps(query_result, sanitized_missions,
coordinates, sanitized_radius,
get_query_payload, cache)
if (get_query_payload):
return query_result
return commons.TableList(query_result)
def query_region_catalogs(self, position, radius, catalogs=__ALL_STRING,
row_limit=DEFAULT_ROW_LIMIT,
get_query_payload=False, cache=True):
"""
This method queries a chosen region for all available catalogs and
returns a TableList with all the found catalogs metadata for the chosen
missions and region.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`
The radius of a region.
catalogs : string or list, optional
Can be either a specific catalog or a list of catalogs (all catalog
names are found in list_catalogs()) or 'all' to search in all
catalogs. Defaults to 'all'.
row_limit : int, optional
Determines how many rows that will be fetched from the database
for each mission. Can be -1 to select maximum (currently 100 000).
Defaults to 10000.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata of
the catalogs available for the chosen mission and region.
It is structured in a TableList like this:
TableList with 8 tables:
'0:Gaia DR1 TGA' with 8 column(s) and 25 row(s)
'1:HSC' with 8 column(s) and 75 row(s)
Examples
--------
query_region_catalogs("m101", "14'", "all")
import astropy.units as u
query_region_catalogs("265.05, 69.0", 14*u.arcmin, "Gaia DR1 TGA")
query_region_catalogs("265.05, 69.0", 14*u.arcmin, ["Gaia DR1 TGA", "HSC"])
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_catalogs = self._sanitize_input_catalogs(catalogs)
sanitized_row_limit = self._sanitize_input_row_limit(row_limit)
sesame_database.set('simbad')
coordinates = commons.parse_coordinates(sanitized_position)
query_result = {}
self._store_query_result_catalogs(query_result, sanitized_catalogs,
coordinates, sanitized_radius,
sanitized_row_limit,
get_query_payload, cache)
if (get_query_payload):
return query_result
return commons.TableList(query_result)
def get_maps(self, query_table_list, missions=__ALL_STRING,
download_dir=_MAPS_DOWNLOAD_DIR, cache=True):
"""
This method takes the dictionary of missions and metadata as returned by
query_region_maps and downloads all maps to the selected folder.
The method returns a dictionary which is divided by mission.
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
Parameters
----------
query_table_list : `~astroquery.utils.TableList`
A TableList with all the missions wanted and their respective
metadata. Usually the return value of query_region_maps.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
download_dir : string, optional
The folder where all downloaded maps should be stored.
Defaults to a folder called 'Maps' in the current working directory.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
maps : `dict`
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
It is structured in a dictionary like this:
dict: {
'HERSCHEL': [{'70': [HDUList], '160': [HDUList]}, {'70': [HDUList], '160': [HDUList]}, ...],
'HST':[[HDUList], [HDUList], [HDUList], [HDUList], [HDUList], ...],
'XMM-EPIC' : [[HDUList], [HDUList], [HDUList], [HDUList], ...]
...
}
Examples
--------
get_maps(query_region_catalogs("m101", "14'", "all"))
"""
sanitized_query_table_list = self._sanitize_input_table_list(query_table_list)
sanitized_missions = self._sanitize_input_mission(missions)
maps = dict()
for query_mission in sanitized_query_table_list.keys():
for mission in sanitized_missions:
# INTEGRAL does not have a product url yet.
if (query_mission.lower() == self.__INTEGRAL_STRING):
print("INTEGRAL does not yet support downloading of "
"fits files")
break
if (query_mission.lower() == mission.lower()):
maps[query_mission] = (
self._get_maps_for_mission(
sanitized_query_table_list[query_mission],
query_mission,
download_dir,
cache))
break
if (len(sanitized_query_table_list) > 0):
log.info("Maps available at %s" % os.path.abspath(download_dir))
else:
print("No maps found")
return maps
def get_images(self, position, radius=__ZERO_ARCMIN_STRING, missions=__ALL_STRING,
download_dir=_MAPS_DOWNLOAD_DIR, cache=True):
"""
This method gets the fits files available for the selected position and
mission and downloads all maps to the the selected folder.
The method returns a dictionary which is divided by mission.
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`, optional
The radius of a region. Defaults to 0.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
download_dir : string, optional
The folder where all downloaded maps should be stored.
Defaults to a folder called 'Maps' in the current working directory.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
maps : `dict`
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
It is structured in a dictionary like this:
dict: {
'HERSCHEL': [{'70': [HDUList], '160': [HDUList]}, {'70': [HDUList], '160': [HDUList]}, ...],
'HST':[[HDUList], [HDUList], [HDUList], [HDUList], [HDUList], ...],
'XMM-EPIC' : [[HDUList], [HDUList], [HDUList], [HDUList], ...]
...
}
Examples
--------
get_images("m101", "14'", "all")
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_missions = self._sanitize_input_mission(missions)
maps = dict()
map_query_result = self.query_region_maps(sanitized_position,
sanitized_radius,
sanitized_missions,
get_query_payload=False,
cache=cache)
for query_mission in map_query_result.keys():
# INTEGRAL does not have a product url yet.
if (query_mission.lower() == self.__INTEGRAL_STRING):
print("INTEGRAL does not yet support downloading of "
"fits files")
continue
maps[query_mission] = (
self._get_maps_for_mission(
map_query_result[query_mission],
query_mission,
download_dir,
cache))
print("Maps available at %s" % os.path.abspath(download_dir))
return maps
def _sanitize_input_position(self, position):
if (isinstance(position, str) or isinstance(position,
commons.CoordClasses)):
return position
else:
raise ValueError("Position must be either a string or "
"astropy.coordinates")
def _sanitize_input_radius(self, radius):
if (isinstance(radius, str) or isinstance(radius,
astropy.units.Quantity)):
return radius
else:
raise ValueError("Radius must be either a string or "
"astropy.units.Quantity")
def _sanitize_input_mission(self, missions):
if (isinstance(missions, list)):
return missions
if (isinstance(missions, str)):
if (missions.lower() == self.__ALL_STRING):
return self.list_maps()
else:
return [missions]
raise ValueError("Mission must be either a string or a list of "
"missions")
def _sanitize_input_catalogs(self, catalogs):
if (isinstance(catalogs, list)):
return catalogs
if (isinstance(catalogs, str)):
if (catalogs.lower() == self.__ALL_STRING):
return self.list_catalogs()
else:
return [catalogs]
raise ValueError("Catalog must be either a string or a list of "
"catalogs")
def _sanitize_input_table_list(self, table_list):
if (isinstance(table_list, commons.TableList)):
return table_list
raise ValueError("Query_table_list must be an astropy.utils.TableList")
def _sanitize_input_row_limit(self, row_limit):
if (isinstance(row_limit, int)):
return row_limit
raise ValueError("Row_limit must be an integer")
def _get_maps_for_mission(self, maps_table, mission, download_dir, cache):
maps = []
if (len(maps_table[self.__PRODUCT_URL_STRING]) > 0):
mission_directory = self._create_mission_directory(mission,
download_dir)
print("Starting download of %s data. (%d files)"
% (mission, len(maps_table[self.__PRODUCT_URL_STRING])))
for index in range(len(maps_table)):
product_url = maps_table[self.__PRODUCT_URL_STRING][index].decode('utf-8')
if(mission.lower() == self.__HERSCHEL_STRING):
observation_id = maps_table["observation_id"][index].decode('utf-8')
else:
observation_id = (maps_table[self._get_tap_observation_id(mission)][index]
.decode('utf-8'))
print("Downloading Observation ID: %s from %s"
% (observation_id, product_url), end=" ")
sys.stdout.flush()
directory_path = mission_directory + "/"
if (mission.lower() == self.__HERSCHEL_STRING):
maps.append(self._get_herschel_map(
product_url,
directory_path,
cache))
else:
response = self._request(
'GET',
product_url,
cache=cache,
headers=self._get_header())
file_name = ""
if (product_url.endswith(self.__FITS_STRING)):
file_name = (directory_path +
self._extract_file_name_from_url(product_url))
else:
file_name = (directory_path +
self._extract_file_name_from_response_header(response.headers))
fits_data = response.content
with open(file_name, 'wb') as fits_file:
fits_file.write(fits_data)
fits_file.close()
maps.append(fits.open(file_name))
print("[Done]")
print("Downloading of %s data complete." % mission)
return maps
def _get_herschel_map(self, product_url, directory_path, cache):
observation = dict()
tar_file = tempfile.NamedTemporaryFile(delete=False)
response = self._request(
'GET',
product_url,
cache=cache,
headers=self._get_header())
tar_file.write(response.content)
tar_file.close()
with tarfile.open(tar_file.name, 'r') as tar:
i = 0
for member in tar.getmembers():
member_name = member.name.lower()
if ('hspire' in member_name or 'hpacs' in member_name):
herschel_filter = self._get_herschel_filter_name(member_name)
tar.extract(member, directory_path)
observation[herschel_filter] = fits.open(
directory_path +
member.name)
i += 1
os.remove(tar_file.name)
return observation
def _get_herschel_filter_name(self, member_name):
for herschel_filter in self.__HERSCHEL_FILTERS.keys():
if herschel_filter in member_name:
return self.__HERSCHEL_FILTERS[herschel_filter]
def _remove_extra_herschel_directory(self, file_and_directory_name,
directory_path):
full_directory_path = os.path.abspath(directory_path)
file_name = file_and_directory_name[file_and_directory_name.index("/") + 1:]
os.renames(os.path.join(full_directory_path, file_and_directory_name),
os.path.join(full_directory_path, file_name))
return file_name
def _create_mission_directory(self, mission, download_dir):
if (download_dir == self._MAPS_DOWNLOAD_DIR):
mission_directory = self._MAPS_DOWNLOAD_DIR + "/" + mission
else:
mission_directory = (download_dir + "/" + self._MAPS_DOWNLOAD_DIR +
"/" + mission)
if not os.path.exists(mission_directory):
os.makedirs(mission_directory)
return mission_directory
def _extract_file_name_from_response_header(self, headers):
content_disposition = headers.get('Content-Disposition')
filename_string = "filename="
start_index = (content_disposition.index(filename_string) +
len(filename_string))
if (content_disposition[start_index] == '\"'):
start_index += 1
if (self.__FITS_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__FITS_STRING, start_index + 1) +
len(self.__FITS_STRING))
return content_disposition[start_index: end_index]
elif (self.__FTZ_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__FTZ_STRING, start_index + 1) +
len(self.__FTZ_STRING))
return content_disposition[start_index: end_index]
elif (self.__TAR_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__TAR_STRING, start_index + 1) +
len(self.__TAR_STRING))
return content_disposition[start_index: end_index]
else:
raise ValueError("Could not find file name in header. "
"Content disposition: %s." % content_disposition)
def _extract_file_name_from_url(self, product_url):
start_index = product_url.rindex("/") + 1
return product_url[start_index:]
def _query_region_maps(self, coordinates, radius, observation_name,
get_query_payload, cache):
observation_tap_name = (
self._find_observation_tap_table_name(observation_name))
query = (
self._build_observation_query(coordinates, radius,
self._find_observation_parameters(observation_tap_name)))
request_payload = self._create_request_payload(query)
if (get_query_payload):
return request_payload
return self._get_and_parse_from_tap(request_payload, cache)
def _query_region_catalog(self, coordinates, radius, catalog_name, row_limit,
get_query_payload, cache):
catalog_tap_name = self._find_catalog_tap_table_name(catalog_name)
query = self._build_catalog_query(coordinates, radius, row_limit,
self._find_catalog_parameters(catalog_tap_name))
request_payload = self._create_request_payload(query)
if (get_query_payload):
return request_payload
return self._get_and_parse_from_tap(request_payload, cache)
def _build_observation_query(self, coordinates, radius, json):
raHours, dec = commons.coord_to_radec(coordinates)
ra = raHours * 15.0 # Converts to degrees
radiusDeg = commons.radius_to_unit(radius, unit='deg')
select_query = "SELECT DISTINCT "
metadata = json[self.__METADATA_STRING]
metadata_tap_names = ", ".join(["%s" % entry[self.__TAP_NAME_STRING]
for entry in metadata])
from_query = " FROM %s" % json[self.__TAP_TABLE_STRING]
if (radiusDeg != 0 or json[self.__IS_SURVEY_MISSION_STRING]):
if (json[self.__IS_SURVEY_MISSION_STRING]):
where_query = (" WHERE 1=CONTAINS(pos, CIRCLE('ICRS', %f, %f, %f));"
% (ra, dec, radiusDeg))
else:
where_query = (" WHERE 1=INTERSECTS(CIRCLE('ICRS', %f, %f, %f), fov);"
% (ra, dec, radiusDeg))
else:
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', %f, %f), fov);"
% (ra, dec))
query = "".join([
select_query,
metadata_tap_names,
from_query,
where_query])
return query
def _build_catalog_query(self, coordinates, radius, row_limit, json):
raHours, dec = commons.coord_to_radec(coordinates)
ra = raHours * 15.0 # Converts to degrees
radiusDeg = commons.radius_to_unit(radius, unit='deg')
select_query = "SELECT "
if(row_limit > 0):
select_query = "".join([select_query, "TOP %s " % row_limit])
elif(not row_limit == -1):
raise ValueError("Invalid value of row_limit")
metadata = json[self.__METADATA_STRING]
metadata_tap_names = ", ".join(["%s" % entry[self.__TAP_NAME_STRING]
for entry in metadata])
from_query = " FROM %s" % json[self.__TAP_TABLE_STRING]
if (radiusDeg == 0):
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', ra, dec), CIRCLE('ICRS', %f, %f, %f))"
% (ra,
dec,
commons.radius_to_unit(
self.__MIN_RADIUS_CATALOG_STRING,
unit='deg')))
else:
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', ra, dec), CIRCLE('ICRS', %f, %f, %f))"
% (ra, dec, radiusDeg))
order_by_query = " ORDER BY %s;" % json[self.__ORDER_BY_STRING]
query = "".join([select_query, metadata_tap_names, from_query,
where_query, order_by_query])
return query
def _store_query_result_maps(self, query_result, missions, coordinates,
radius, get_query_payload, cache):
for mission in missions:
mission_table = self._query_region_maps(coordinates, radius,
mission, get_query_payload,
cache)
if (len(mission_table) > 0):
query_result[mission.upper()] = mission_table
def _store_query_result_catalogs(self, query_result, catalogs, coordinates,
radius, row_limit, get_query_payload, cache):
for catalog in catalogs:
catalog_table = self._query_region_catalog(coordinates, radius,
catalog, row_limit,
get_query_payload, cache)
if (len(catalog_table) > 0):
query_result[catalog.upper()] = catalog_table
def _find_observation_parameters(self, mission_name):
return self._find_mission_parameters_in_json(mission_name,
self._get_observation_json())
def _find_catalog_parameters(self, catalog_name):
return self._find_mission_parameters_in_json(catalog_name,
self._get_catalogs_json())
def _find_mission_parameters_in_json(self, mission_tap_name, json):
for mission in json:
if (mission[self.__TAP_TABLE_STRING] == mission_tap_name):
return mission
raise ValueError("Input tap name %s not available." % mission_tap_name)
def _find_observation_tap_table_name(self, mission_name):
return self._find_mission_tap_table_name(
self._fetch_and_parse_json(self.__OBSERVATIONS_STRING),
mission_name)
def _find_catalog_tap_table_name(self, mission_name):
return self._find_mission_tap_table_name(
self._fetch_and_parse_json(self.__CATALOGS_STRING),
mission_name)
def _find_mission_tap_table_name(self, json, mission_name):
for index in range(len(json)):
if (json[index][self.__MISSION_STRING].lower() == mission_name.lower()):
return json[index][self.__TAP_TABLE_STRING]
raise ValueError("Input %s not available." % mission_name)
return None
def _get_observation_json(self):
return self._fetch_and_parse_json(self.__OBSERVATIONS_STRING)
def _get_catalogs_json(self):
return self._fetch_and_parse_json(self.__CATALOGS_STRING)
def _fetch_and_parse_json(self, object_name):
url = self.URLbase + "/" + object_name
response = self._request(
'GET',
url,
cache=False,
headers=self._get_header())
string_response = response.content.decode('utf-8')
json_response = json.loads(string_response)
return json_response["descriptors"]
def _json_object_field_to_list(self, json, field_name):
response_list = []
for index in range(len(json)):
response_list.append(json[index][field_name])
return response_list
def _get_json_data_for_mission(self, json, mission):
for index in range(len(json)):
if(json[index][self.__MISSION_STRING].lower() == mission.lower()):
return json[index]
def _get_tap_observation_id(self, mission):
return self._get_json_data_for_mission(self._get_observation_json(), mission)["tapObservationId"]
def _create_request_payload(self, query):
return {'REQUEST': 'doQuery', 'LANG': 'ADQL', 'FORMAT': 'VOTABLE',
'QUERY': query}
def _get_and_parse_from_tap(self, request_payload, cache):
response = self._send_get_request("/tap/sync", request_payload, cache)
return self._parse_xml_table(response)
def _send_get_request(self, url_extension, request_payload, cache):
url = self.URLbase + url_extension
return self._request('GET',
url,
params=request_payload,
timeout=self.TIMEOUT,
cache=cache,
headers=self._get_header())
def _parse_xml_table(self, response):
# try to parse the result into an astropy.Table, else
# return the raw result with an informative error message.
try:
tf = six.BytesIO(response.content)
vo_table = votable.parse(tf, pedantic=False)
first_table = vo_table.get_first_table()
table = first_table.to_table(use_names_over_ids=True)
return table
except Exception as ex:
self.response = response
self.table_parse_error = ex
raise TableParseError(
"Failed to parse ESASky VOTABLE result! The raw response can be "
"found in self.response, and the error in "
"self.table_parse_error.")
def _get_header(self):
user_agent = 'astropy:astroquery.esasky.{vers} {isTest}'.format(
vers=version.version,
isTest=self._isTest)
return {'User-Agent': user_agent}
ESASky = ESASkyClass()
|
py | 1a4f6f368c5a7cdeb720eac84c14a0fb9796763e | from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('task.apis.urls')), # for apis
path('', include('task.forms.urls')), # for forms forms
path('auth/', include('rest_auth.urls')), # for login
path('accounts/', include('django.contrib.auth.urls'))
]
urlpatterns += staticfiles_urlpatterns()
|
py | 1a4f71bb3a8b2d6132229f95b5733ea903965d27 | import sys
import struct
import collections
from . import filter_nan
from .ins401_field_parser import decode_value
from ...framework.utils.print import print_yellow
from ...framework.context import APP_CONTEXT
# input packet
error_decode_packet = 0
def _format_string(data_buffer):
parsed = bytearray(data_buffer) if data_buffer and len(
data_buffer) > 0 else None
formatted = ''
if parsed is not None:
try:
if sys.version_info < (3, 0):
formatted = str(struct.pack(
'{0}B'.format(len(parsed)), *parsed))
else:
formatted = str(struct.pack(
'{0}B'.format(len(parsed)), *parsed), 'utf-8')
except UnicodeDecodeError:
APP_CONTEXT.get_logger().logger.error('Parse data as string failed')
formatted = ''
return formatted
def string_parser(payload, user_configuration):
error = False
data = ''
data_str = _format_string(payload)
if data_str and (data_str.find('INS401') > -1) \
and (data_str.find('RTK_INS App') > -1) \
and (data_str.find('Bootloader') > -1):
data = data_str
else:
error = True
return data, error
def get_all_parameters_parser(payload, user_configuration):
'''
gA parser
'''
error = False
data = []
data_len = 0
for parameter in user_configuration:
param_id = parameter['paramId']
param_type = parameter['type']
name = parameter['name']
if param_type == 'uint8' or param_type == 'int8':
value = decode_value(
param_type, payload[data_len:data_len + 1])
data_len = data_len + 1
elif param_type == 'uint16' or param_type == 'int16':
value = decode_value(
param_type, payload[data_len:data_len + 2])
data_len = data_len + 2
elif param_type == 'uint32' or param_type == 'int32' or param_type == 'float':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'uint64' or param_type == 'int64' or param_type == 'double':
value = decode_value(
param_type, payload[data_len:data_len + 8])
data_len = data_len + 8
elif param_type == 'ip4':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'ip6':
value = decode_value(
param_type, payload[data_len:data_len + 6])
data_len = data_len + 6
elif 'char' in param_type:
ctype_n = param_type.replace('char', '')
ctype_l = int(ctype_n)
value = decode_value(
param_type, payload[data_len:data_len + ctype_l])
data_len = data_len + ctype_l
else:
print(
"no [{0}] when unpack_input_packet".format(param_type))
value = False
data.append(
{"paramId": param_id, "name": name, "value": value})
return data, error
def get_parameters_by_block_parser(payload, user_configuration):
'''
gB parser
'''
data = []
error = False
start_param_id = payload[0]
end_param_id = payload[1]
data_len = 2
for i in range(start_param_id, end_param_id+1, 1):
exist_param_conf = next((param_conf for param_conf in user_configuration
if param_conf['paramId'] == i), None)
if exist_param_conf:
param_type = exist_param_conf['type']
if param_type == 'uint8' or param_type == 'int8':
value = decode_value(
param_type, payload[data_len:data_len + 1])
data_len = data_len + 1
elif param_type == 'uint16' or param_type == 'int16':
value = decode_value(
param_type, payload[data_len:data_len + 2])
data_len = data_len + 2
elif param_type == 'uint32' or param_type == 'int32' or param_type == 'float':
value = decode_value(
param_type, payload[data_len:data_len + 4], exist_param_conf)
data_len = data_len + 4
elif param_type == 'uint64' or param_type == 'int64' or param_type == 'double':
value = decode_value(
param_type, payload[data_len:data_len + 8])
data_len = data_len + 8
elif param_type == 'ip4':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'ip6':
value = decode_value(
param_type, payload[data_len:data_len + 6])
data_len = data_len + 6
elif 'char' in param_type:
ctype_n = param_type.replace('char', '')
ctype_l = int(ctype_n)
value = decode_value(
param_type, payload[data_len:data_len + ctype_l])
data_len = data_len + ctype_l
else:
print(
"no [{0}] when unpack_input_packet".format(param_type))
value = False
data.append({
"paramId": i,
"name": exist_param_conf['name'],
"value": value
})
return data, error
def get_parameter_parser(payload, user_configuration):
'''
gP Parser
'''
data = None
error = False
param_id = decode_value('uint32', payload[0:4])
if param_id is not False:
param = filter(lambda item: item['paramId'] ==
param_id, user_configuration)
try:
first_item = next(iter(param), None)
param_value = decode_value(
first_item['type'], payload[4:12], first_item)
data = {"paramId": param_id,
"name": first_item['name'], "value": param_value}
except StopIteration:
error = True
except Exception:
error = True
else:
error = True
return data, error
def update_parameter_parser(payload, user_configuration):
'''
uP parser
'''
error = False
data = decode_value('int32', payload[0:4])
if data != 0:
error = True
return data, error
def update_parameters_parser(payload, user_configuration):
'''
uB parser
'''
error = False
data = decode_value('uint32', payload[0:4])
if data:
error = True
return data, error
def common_input_parser(payload, user_configuration):
'''
General input packet parser
'''
print('common_input_parser:', payload)
return payload, False
def read_eeprom_parser(payload, user_configuration=None):
return payload[3:], False
# output packet
def common_continuous_parser(payload, configuration):
'''
Unpack output packet
'''
if configuration is None:
return
data = None
is_list = 0
length = 0
pack_fmt = '<'
for value in configuration['payload']:
if value['type'] == 'float':
pack_fmt += 'f'
length += 4
elif value['type'] == 'uint32':
pack_fmt += 'I'
length += 4
elif value['type'] == 'int32':
pack_fmt += 'i'
length += 4
elif value['type'] == 'int16':
pack_fmt += 'h'
length += 2
elif value['type'] == 'uint16':
pack_fmt += 'H'
length += 2
elif value['type'] == 'double':
pack_fmt += 'd'
length += 8
elif value['type'] == 'int64':
pack_fmt += 'q'
length += 8
elif value['type'] == 'uint64':
pack_fmt += 'Q'
length += 8
elif value['type'] == 'char':
pack_fmt += 'c'
length += 1
elif value['type'] == 'uchar':
pack_fmt += 'B'
length += 1
elif value['type'] == 'uint8':
pack_fmt += 'B'
length += 1
len_fmt = '{0}B'.format(length)
has_list = configuration.__contains__('isList')
if has_list:
is_list = configuration['isList']
if is_list == 1:
packet_num = len(payload) // length
data = []
for i in range(packet_num):
payload_c = payload[i*length:(i+1)*length]
try:
pack_item = struct.pack(len_fmt, *payload_c)
item = struct.unpack(pack_fmt, pack_item)
out = [(value['name'], item[idx])
for idx, value in enumerate(configuration['payload'])]
item = collections.OrderedDict(out)
data.append(item)
except Exception as ex: # pylint: disable=broad-except
print(
"error happened when decode the payload, pls restart driver: {0}"
.format(ex))
else:
try:
pack_item = struct.pack(len_fmt, *payload)
data = struct.unpack(pack_fmt, pack_item)
out = [(
value['name'],
filter_nan(data[idx])
) for idx, value in enumerate(configuration['payload'])]
data = collections.OrderedDict(out)
except Exception as ex: # pylint: disable=broad-except
global error_decode_packet
error_decode_packet = error_decode_packet + 1
if error_decode_packet == 100 or error_decode_packet == 400 or error_decode_packet == 700:
print_yellow(
"warning: your firmware may not suitable for this driver, pls update firmware or driver")
if error_decode_packet % 300 == 0:
APP_CONTEXT.get_logger().logger.warning(
"error happened when decode the payload of packets, pls restart driver: {0}"
.format(ex))
return data
def other_output_parser(payload):
return payload
# packet handler
def match_command_handler(packet_type):
'''
Find the handler for specified packet
'''
parser_dict = {
b'\x01\xcc': string_parser,
b'\x02\xcc': get_parameter_parser,
b'\x03\xcc': update_parameter_parser,
b'\x04\xcc': update_parameter_parser,
b'\x01\x0b': common_input_parser,
b'\x02\x0b': common_input_parser
}
return parser_dict.get(packet_type)
|
py | 1a4f72247d7c42f1d41bd9b8dc005620efdef184 | import datetime
import json
import os
from dotenv import load_dotenv
from Santander.SantanderScrapper import SantanderScrapper
load_dotenv(verbose=True)
from Clear.ClearScrapper import ClearScrapper
from GuiaBolso.GuiaBolsoScrapper import GuiaBolsoScrapper
from Rico.RicoScrapper import RicoScrapper
from SmarttBot.SmarttBotScrapper import SmarttBotScrapper
def save_output(provider, data):
date = datetime.datetime.today().strftime('%Y-%m-%d')
time = datetime.datetime.today().strftime('%X')
dir = 'output/' + date + '/'
if not os.path.exists(dir):
os.makedirs(dir)
f = open(dir + provider + '.json', 'w')
data['date'] = date
data['time'] = time
data['label'] = provider
f.write(json.dumps(data))
f.close()
def scrap_rico():
if os.getenv("RICO_USR"):
rico_scrapper = RicoScrapper(os.getenv("RICO_USR"), os.getenv("RICO_PWD"))
res = rico_scrapper.init()
save_output('rico', res)
def scrap_clear():
if os.getenv("CLEAR_CPF"):
clear_scrapper = ClearScrapper(os.getenv("CLEAR_CPF"), os.getenv("CLEAR_PWD"), os.getenv("CLEAR_BIRTHDATE"))
res = clear_scrapper.init()
save_output('clear', res)
def scrap_smartt_bot():
if os.getenv("SMARTT_BOT_USR"):
smartt_bot_scrapper = SmarttBotScrapper(os.getenv("SMARTT_BOT_USR"), os.getenv("SMARTT_BOT_PWD"))
res = smartt_bot_scrapper.init()
save_output('smartt_bot', res)
def scrap_guiabolso():
if os.getenv("GUIABOLSO_USR"):
guiabolso_scrapper = GuiaBolsoScrapper(os.getenv("GUIABOLSO_USR"), os.getenv("GUIABOLSO_PWD"))
res = guiabolso_scrapper.init()
save_output('guiabolso', res)
def scrap_santander():
guiabolso_scrapper = SantanderScrapper(os.getenv("SANTANDER_CPF"), os.getenv("SANTANDER_PWD"),
os.getenv("SANTANDER_LAST_DIGITS"))
res = guiabolso_scrapper.init()
def scrap_all():
scrap_guiabolso()
scrap_clear()
scrap_rico()
scrap_smartt_bot()
# scrap_smartt_bot()
# scrap_clear()
# scrap_guiabolso()
scrap_all()
# scrap_rico()
# scrap_santander()
|
py | 1a4f72ce6aafcc2f5cd87db3744a0abb1e32d82e | import string
from spacy.lang.pl import STOP_WORDS as stop_words
try:
import morfeusz2
morph = morfeusz2.Morfeusz()
except ImportError:
print('Warning: Morfeusz couldn\'t be imported')
morph = None
letters = string.ascii_letters + 'ąćęłńóśźż'
class Word:
def __init__(self, text):
self.text = text
self.lemma = self.lemma()
self.is_stop = self.is_stop()
@classmethod
def from_text(cls, text):
if len(text) == 0:
return None
return cls(text)
@classmethod
def generator(cls, text):
separators = string.whitespace
while len(text) > 0:
positions = {separator: text.find(separator) for separator in separators}
positions = {separator: positions[separator] for separator in separators if positions[separator] > -1}
position_extractor = lambda separator: positions[separator]
next_separator = min(positions, key=position_extractor) if len(positions) > 0 else None
if next_separator is None:
result = cls.from_text(text)
if result is not None:
yield result
return
result = cls.from_text(text[:positions[next_separator] + 1])
if result is not None:
yield result
text = text[positions[next_separator] + 1:]
def lemma(self):
if self.text.find(string.digits) > -1:
return '#NUMERIC'
main_text = ''.join(char for char in self.text if char in letters)
if morph is None:
return main_text
morph_analysis = morph.analyse(main_text)
if len(morph_analysis) == 0:
return main_text
return morph_analysis[0][2][1].split(':')[0]
def is_stop(self):
return self.text in stop_words or self.lemma in stop_words
def __str__(self):
return self.text
|
py | 1a4f730e4f81e8cd2e7f9dea4dde324b38ea7a6b | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mission_pkg"
PROJECT_SPACE_DIR = "/home/master/Documents/Guerledan/ROSonPi/glider_dir/install"
PROJECT_VERSION = "0.0.0"
|
py | 1a4f731503f8e5e36ac9975194cc35d35a5c996e | from __future__ import unicode_literals
from mayan.apps.common.tests import BaseTestCase
from mayan.apps.documents.tests import DocumentTestMixin, TEST_HYBRID_DOCUMENT
from ..parsers import PopplerParser
from .literals import TEST_DOCUMENT_CONTENT
class ParserTestCase(DocumentTestMixin, BaseTestCase):
test_document_filename = TEST_HYBRID_DOCUMENT
def test_poppler_parser(self):
parser = PopplerParser()
parser.process_document_version(self.test_document.latest_version)
self.assertTrue(
TEST_DOCUMENT_CONTENT in self.test_document.pages.first().content.content
)
|
py | 1a4f73d6475e5ea984cf6824a657149d5977d2e7 | """
Streaming Parallel Data Processing
===================================================================
Neuraxle steps for streaming data in parallel in the pipeline
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
from abc import abstractmethod
from multiprocessing import Queue
from multiprocessing.context import Process
from threading import Thread
from typing import Tuple, List, Union, Iterable, Any
from neuraxle.base import NamedTupleList, ExecutionContext, BaseStep, MetaStep, BaseSaver, _FittableStep, \
BaseTransformer, NonFittableMixin
from neuraxle.data_container import DataContainer, ListDataContainer, AbsentValuesNullObject
from neuraxle.pipeline import Pipeline, MiniBatchSequentialPipeline, Joiner
from neuraxle.steps.numpy import NumpyConcatenateOuterBatch
class ObservableQueueMixin:
"""
A class to represent a step that can put items in a queue.
It can also notify other queues that have subscribed to him using subscribe.
.. seealso::
:class:`BaseStep`,
:class:`QueuedPipelineTask`,
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, queue):
self.queue = queue
self.observers = []
self._add_observable_queue_step_saver()
def teardown(self):
self.queue = None
return self
def _add_observable_queue_step_saver(self):
if not hasattr(self, 'savers'):
warnings.warn(
'Please initialize Mixins in the good order. ObservableQueueMixin should be initialized after '
'Appending the ObservableQueueStepSaver to the savers. Saving might fail.'
)
self.savers = [ObservableQueueStepSaver()]
else:
self.savers.append(ObservableQueueStepSaver())
def subscribe(self, observer_queue_worker: 'ObservableQueueMixin') -> 'ObservableQueueMixin':
"""
Subscribe a queue worker.
The subscribed queue workers get notified when :func:`~neuraxle.distributed.streaming.ObservableQueueMixin.notify` is called.
"""
self.observers.append(observer_queue_worker.queue)
return self
def get(self) -> 'QueuedPipelineTask':
"""
Get last item in queue.
"""
return self.queue.get()
def put(self, value: DataContainer):
"""
Put a queued pipeline task in queue.
"""
self.queue.put(QueuedPipelineTask(step_name=self.name, data_container=value.copy()))
def notify(self, value):
"""
Notify all subscribed queue workers
"""
for observer in self.observers:
observer.put(value)
class QueuedPipelineTask(object):
"""
Data object to contain the tasks processed by the queued pipeline.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, data_container, step_name=None):
self.step_name = step_name
self.data_container = data_container
class ObservableQueueStepSaver(BaseSaver):
"""
Saver for observable queue steps.
.. seealso::
:class:`QueueWorker`,
:class:`neuraxle.base.BaseSaver`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def save_step(self, step: BaseTransformer, context: 'ExecutionContext') -> BaseTransformer:
step.queue = None
step.observers = []
return step
def can_load(self, step: BaseTransformer, context: 'ExecutionContext') -> bool:
return True
def load_step(self, step: 'BaseTransformer', context: 'ExecutionContext') -> 'BaseTransformer':
step.queue = Queue()
return step
class QueueWorker(ObservableQueueMixin, MetaStep):
"""
Start multiple Process or Thread that process items from the queue of batches to process.
It is both an observable, and observer.
It notifies the results of the wrapped step handle transform method.
It receives the next data container to process.
.. seealso::
:class:`Observer`,
:class:`Observable`,
:class:`MetaStepMixin`,
:class:`BaseStep`
"""
def __init__(
self,
wrapped: BaseTransformer,
max_queue_size: int,
n_workers: int,
use_threading: bool,
additional_worker_arguments=None,
use_savers=False
):
if not additional_worker_arguments:
additional_worker_arguments = [[] for _ in range(n_workers)]
MetaStep.__init__(self, wrapped)
ObservableQueueMixin.__init__(self, Queue(maxsize=max_queue_size))
self.use_threading: bool = use_threading
self.workers: List[Process] = []
self.n_workers: int = n_workers
self.observers: List[Queue] = []
self.additional_worker_arguments = additional_worker_arguments
self.use_savers = use_savers
def start(self, context: ExecutionContext):
"""
Start multiple processes or threads with the worker function as a target.
:param context: execution context
:type context: ExecutionContext
:return:
"""
target_function = worker_function
if self.use_savers:
self.save(context, full_dump=True)
target_function = worker_function
self.workers = []
for _, worker_arguments in zip(range(self.n_workers), self.additional_worker_arguments):
if self.use_threading:
p = Thread(target=target_function, args=(self, context, self.use_savers, worker_arguments))
else:
p = Process(target=target_function, args=(self, context, self.use_savers, worker_arguments))
p.daemon = True
p.start()
self.workers.append(p)
def teardown(self):
"""
Stop all processes on teardown.
:return: teardowned self
"""
self.stop()
return self
def stop(self):
"""
Stop all of the workers.
:return:
"""
if not self.use_threading:
[w.terminate() for w in self.workers]
self.workers = []
self.observers = []
def worker_function(queue_worker: QueueWorker, context: ExecutionContext, use_savers: bool,
additional_worker_arguments):
"""
Worker function that transforms the items inside the queue of items to process.
:param queue_worker: step to transform
:param context: execution context
:param use_savers: use savers
:param additional_worker_arguments: any additional arguments that need to be passed to the workers
:return:
"""
step = queue_worker.get_step()
if use_savers:
saved_queue_worker: QueueWorker = context.load(queue_worker.get_name())
step = saved_queue_worker.get_step()
additional_worker_arguments = tuple(
additional_worker_arguments[i: i + 2] for i in range(0, len(additional_worker_arguments), 2)
)
for argument_name, argument_value in additional_worker_arguments:
step.__dict__.update({argument_name: argument_value})
while True:
try:
task: QueuedPipelineTask = queue_worker.get()
summary_id = task.data_container.summary_id
data_container = step.handle_transform(task.data_container, context)
data_container = data_container.set_summary_id(summary_id)
queue_worker.notify(QueuedPipelineTask(step_name=queue_worker.name, data_container=data_container))
except Exception as err:
queue_worker.notify(QueuedPipelineTask(step_name=queue_worker.name, data_container=err))
QueuedPipelineStepsTuple = Union[
BaseTransformer, # step
Tuple[int, BaseTransformer], # (n_workers, step)
Tuple[str, BaseTransformer], # (step_name, step)
Tuple[str, int, BaseTransformer], # (step_name, n_workers, step)
Tuple[str, int, int, BaseTransformer], # (step_name, n_workers, max_queue_size, step)
Tuple[str, int, List[Tuple], BaseTransformer], # (step_name, n_workers, additional_worker_arguments, step)
Tuple[str, int, List[Tuple], BaseTransformer] # (step_name, n_workers, additional_worker_arguments, step)
]
class BaseQueuedPipeline(MiniBatchSequentialPipeline):
"""
Sub class of :class:`Pipeline`.
Transform data in many pipeline steps at once in parallel in the pipeline using multiprocessing Queues.
Example usage :
.. code-block:: python
# step name, step
p = QueuedPipeline([
('step_a', Identity()),
('step_b', Identity()),
], n_workers=1, batch_size=10, max_queue_size=10)
# step name, number of workers, step
p = QueuedPipeline([
('step_a', 1, Identity()),
('step_b', 1, Identity()),
], batch_size=10, max_queue_size=10)
# step name, number of workers, and max size
p = QueuedPipeline([
('step_a', 1, 10, Identity()),
('step_b', 1, 10, Identity()),
], batch_size=10)
# step name, number of workers for each step, and additional argument for each worker
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
# step name, number of workers for each step, additional argument for each worker, and max size
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
:param steps: pipeline steps
:param batch_size: number of elements to combine into a single batch
:param n_workers_per_step: number of workers to spawn per step
:param max_queue_size: max number of elements inside the processing queue
:param data_joiner: transformer step to join streamed batches together at the end of the pipeline
:param use_threading: (Optional.) use threading for parallel processing. multiprocessing.context.Process is used by default.
:param use_savers: use savers to serialize steps for parallel processing.
:param include_incomplete_batch: (Optional.) A bool representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
:param default_value_data_inputs: expected_outputs default fill value
for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`
to trim absent values from the batch
:param default_value_expected_outputs: expected_outputs default fill value
for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`
to trim absent values from the batch
:param cache_folder: cache_folder if its at the root of the pipeline
.. seealso::
:class:`QueueWorker`,
:class:`QueueJoiner`,
:class:`CustomPipelineMixin`,
:class:`Pipeline`
"""
def __init__(
self,
steps: List[QueuedPipelineStepsTuple],
batch_size: int,
n_workers_per_step: int = None,
max_queue_size: int = None,
data_joiner = None,
use_threading: bool = False,
use_savers: bool = False,
include_incomplete_batch: bool = False,
default_value_data_inputs: Union[Any, AbsentValuesNullObject] = None,
default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = None,
cache_folder: str = None,
):
if data_joiner is None:
data_joiner = NumpyConcatenateOuterBatch()
self.data_joiner = data_joiner
self.max_queue_size = max_queue_size
self.batch_size = batch_size
self.n_workers_per_step = n_workers_per_step
self.use_threading = use_threading
self.use_savers = use_savers
self.batch_size: int = batch_size
self.include_incomplete_batch: bool = include_incomplete_batch
self.default_value_data_inputs: Union[Any, AbsentValuesNullObject] = default_value_data_inputs
self.default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = default_value_expected_outputs
MiniBatchSequentialPipeline.__init__(
self,
steps=self._initialize_steps_as_tuple(steps),
cache_folder=cache_folder,
batch_size=batch_size,
include_incomplete_batch=include_incomplete_batch,
default_value_data_inputs=default_value_data_inputs,
default_value_expected_outputs=default_value_expected_outputs
)
self._refresh_steps()
def _initialize_steps_as_tuple(self, steps):
"""
Wrap each step by a :class:`QueueWorker` to allow data to flow in many pipeline steps at once in parallel.
:param steps: (name, n_workers, step)
:type steps: NameNWorkerStepTupleList
:return: steps as tuple
:rtype: NamedTupleList
"""
steps_as_tuple: NamedTupleList = []
for step in steps:
queue_worker = self._create_queue_worker(step)
steps_as_tuple.append((queue_worker.name, queue_worker))
steps_as_tuple.append(('queue_joiner', QueueJoiner(batch_size=self.batch_size)))
return steps_as_tuple
def _create_queue_worker(self, step: QueuedPipelineStepsTuple):
name, n_workers, additional_worker_arguments, max_queue_size, actual_step = self._get_step_params(step)
return QueueWorker(
actual_step,
n_workers=n_workers,
use_threading=self.use_threading,
max_queue_size=max_queue_size,
additional_worker_arguments=additional_worker_arguments,
use_savers=self.use_savers
).set_name('QueueWorker{}'.format(name))
def _get_step_params(self, step):
"""
Return all params necessary to create the QueuedPipeline for the given step.
:param step: tuple
:type step: QueuedPipelineStepsTupleList
:return: return name, n_workers, max_queue_size, actual_step
:rtype: tuple(str, int, int, BaseStep)
"""
if isinstance(step, BaseTransformer):
actual_step = step
name = step.name
max_queue_size = self.max_queue_size
n_workers = self.n_workers_per_step
additional_arguments = []
elif len(step) == 2:
if isinstance(step[0], str):
name, actual_step = step
n_workers = self.n_workers_per_step
else:
n_workers, actual_step = step
name = actual_step.name
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 3:
name, n_workers, actual_step = step
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 4:
if isinstance(step[2], Iterable):
name, n_workers, additional_arguments, actual_step = step
max_queue_size = self.max_queue_size
else:
name, n_workers, max_queue_size, actual_step = step
additional_arguments = []
elif len(step) == 5:
name, n_workers, additional_arguments, max_queue_size, actual_step = step
else:
raise Exception('Invalid Queued Pipeline Steps Shape.')
return name, n_workers, additional_arguments, max_queue_size, actual_step
def _will_process(self, data_container: DataContainer, context: ExecutionContext) -> (
DataContainer, ExecutionContext):
"""
Setup streaming pipeline before any handler methods.
:param data_container: data container
:param context: execution context
:return:
"""
self.setup(context=context)
return data_container, context
def setup(self, context: ExecutionContext = None) -> 'BaseTransformer':
"""
Connect the queued workers together so that the data can correctly flow through the pipeline.
:param context: execution context
:return: step
:rtype: BaseStep
"""
if not self.is_initialized:
self.connect_queued_pipeline()
super().setup(context=context)
return self
def fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> (
'Pipeline', DataContainer):
"""
Fit transform sequentially if any step is fittable. Otherwise transform in parallel.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return:
"""
all_steps_are_not_fittable = True
for _, step in self[:-1]:
if isinstance(step.get_step(), _FittableStep) and not isinstance(step.get_step(), NonFittableMixin):
all_steps_are_not_fittable = False
if all_steps_are_not_fittable:
data_container = self.transform_data_container(data_container, context)
data_container = self._did_transform(data_container, context)
return self, data_container
self.is_invalidated = True
return super().fit_transform_data_container(data_container, context)
def transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Transform data container
:param data_container: data container to transform.
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
"""
data_container_batches = data_container.minibatches(
batch_size=self.batch_size,
include_incomplete_batch=self.include_incomplete_batch,
default_value_data_inputs=self.default_value_data_inputs,
default_value_expected_outputs=self.default_value_expected_outputs
)
n_batches = self.get_n_batches(data_container)
self[-1].set_n_batches(n_batches)
for name, step in self[:-1]:
step.start(context)
batch_index = 0
for data_container_batch in data_container_batches:
self.send_batch_to_queued_pipeline(batch_index=batch_index, data_container=data_container_batch)
batch_index += 1
data_container = self[-1].join(original_data_container=data_container)
return data_container
def _did_transform(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Stop all of the workers after transform. Also, join the data using self.data_joiner.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
:rtype: DataContainer
"""
for name, step in self[:-1]:
step.stop()
return self.data_joiner.handle_transform(data_container, context)
@abstractmethod
def get_n_batches(self, data_container) -> int:
"""
Get the total number of batches that the queue joiner is supposed to receive.
:param data_container: data container to transform
:type data_container: DataContainer
:return:
"""
raise NotImplementedError()
@abstractmethod
def connect_queued_pipeline(self):
"""
Connect all the queued workers together so that the data can flow through each step.
:return:
"""
raise NotImplementedError()
@abstractmethod
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to queued pipeline. It is blocking if there is no more space available in the multiprocessing queues.
Workers might return batches in a different order, but the queue joiner will reorder them at the end.
The queue joiner will use the summary ids to reorder all of the received batches.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
raise NotImplementedError()
class SequentialQueuedPipeline(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps sequentially even if they are in separate processes or threads.
.. seealso::
:func:`~neuraxle.data_container.DataContainer.minibatches`,
:class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`,
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container) -> int:
"""
Get the number of batches to process.
:param data_container: data container to transform
:return: number of batches
"""
return data_container.get_n_batches(
batch_size=self.batch_size,
include_incomplete_batch=self.include_incomplete_batch
)
def connect_queued_pipeline(self):
"""
Sequentially connect of the queued workers.
:return:
"""
for i, (name, step) in enumerate(self[1:]):
self[i].subscribe(step)
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to the first queued worker.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
self[0].put(data_container)
class ParallelQueuedFeatureUnion(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps in parallel using QueueWorkers.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`SequentialQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container):
"""
Get the number of batches to process by the queue joiner.
:return:
"""
return data_container.get_n_batches(self.batch_size) * (len(self) - 1)
def connect_queued_pipeline(self):
"""
Connect the queue joiner to all of the queued workers to process data in parallel.
:return:
"""
for name, step in self[:-1]:
step.subscribe(self[-1])
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to all of the queued workers.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
for name, step in self[:-1]:
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
step.put(data_container)
class QueueJoiner(ObservableQueueMixin, Joiner):
"""
Observe the results of the queue worker of type :class:`QueueWorker`.
Synchronize all of the workers together.
.. seealso::
:class:`QueuedPipeline`,
:class:`Observer`,
:class:`ListDataContainer`,
:class:`DataContainer`
"""
def __init__(self, batch_size, n_batches=None):
self.n_batches_left_to_do = n_batches
self.summary_ids = []
self.result = {}
Joiner.__init__(self, batch_size=batch_size)
ObservableQueueMixin.__init__(self, Queue())
def teardown(self) -> 'BaseTransformer':
"""
Properly clean queue, summary ids, and results during teardown.
:return: teardowned self
"""
ObservableQueueMixin.teardown(self)
Joiner.teardown(self)
self.summary_ids = []
self.result = {}
return self
def set_n_batches(self, n_batches):
self.n_batches_left_to_do = n_batches
def join(self, original_data_container: DataContainer) -> DataContainer:
"""
Return the accumulated results received by the on next method of this observer.
:return: transformed data container
:rtype: DataContainer
"""
while self.n_batches_left_to_do > 0:
task: QueuedPipelineTask = self.queue.get()
self.n_batches_left_to_do -= 1
step_name = task.step_name
if step_name not in self.result:
if not isinstance(task.data_container, DataContainer):
summary_id = None
else:
summary_id = task.data_container.summary_id
self.result[step_name] = ListDataContainer(
current_ids=[],
data_inputs=[],
expected_outputs=[],
summary_id=summary_id
)
self.result[step_name].append_data_container_in_data_inputs(task.data_container)
data_containers = self._join_all_step_results()
self.result = {}
return original_data_container.set_data_inputs(data_containers)
def _join_all_step_results(self):
"""
Concatenate all resulting data containers together.
:return:
"""
results = []
for step_name, data_containers in self.result.items():
self._raise_exception_throwned_by_workers_if_needed(data_containers)
step_results = self._join_step_results(data_containers)
results.append(step_results)
return results
def _raise_exception_throwned_by_workers_if_needed(self, data_containers):
for dc in data_containers.data_inputs:
if isinstance(dc, Exception):
# an exception has been throwned by the worker so reraise it here!
exception = dc
raise exception
def _join_step_results(self, data_containers):
# reorder results by summary id
data_containers.data_inputs.sort(key=lambda dc: self.summary_ids.index(dc.summary_id))
step_results = ListDataContainer.empty()
for data_container in data_containers.data_inputs:
data_container = data_container.set_summary_id(data_containers.data_inputs[-1].summary_id)
step_results.concat(data_container)
return step_results
|
py | 1a4f7407b679a970e9237a756604ea5bd9a33420 | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, index, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
index (int) -- #-th image of a batch is going to be displayed
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[index].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255. # post-processing: tranpose and scaling
# image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new |
py | 1a4f7436ffd3c19378005080371c3c4b4f00840d | #!/usr/bin/env python
import gzip, sys, threading
import crawle
class SaveURLHandler(crawle.Handler):
"""This handler simply saves all pages. into a gziped file. Any reponses
with status other than 200 is placed back on the queue.
This example also demonstrates the importance of synchronization as
multiple threads can attempt to write to the file conncurrently.
"""
def __init__(self, output):
self.output = gzip.open(output,'ab')
self.lock = threading.Lock()
self.exit = False
def process(self, req_res, queue):
if not req_res.response_status:
print req_res.error
return
if req_res.response_status != 200:
print "%d - putting %s back on queue" % (req_res.response_status,
req_res.response_url)
queue.put(req_res.response_url)
else:
self.lock.acquire()
if self.exit:
self.lock.release()
return
self.output.write(req_res.response_body)
self.output.write("===*===\n")
self.lock.release()
def stop(self):
self.exit = True
self.output.close()
if __name__ == '__main__':
crawle.run_crawle(sys.argv, handler=SaveURLHandler('output.gz'))
|
py | 1a4f74919b3ca3d608826dbd9ef87239304d1440 | import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from skimage import morphology
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
def get_roc_plot_and_threshold(predictions, gt_list):
# calculate image-level ROC AUC score
# img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
predictions = np.asarray(predictions)
gt_list = np.asarray(gt_list)
fpr, tpr, thresholds = roc_curve(gt_list, predictions)
img_roc_auc = roc_auc_score(gt_list, predictions)
fig, ax = plt.subplots(1, 1)
fig_img_rocauc = ax
fig_img_rocauc.plot(fpr, tpr, label="ROC Curve (area = {:.2f})".format(img_roc_auc))
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
ax.set_title('Receiver operating characteristic')
ax.legend(loc="lower right")
precision, recall, thresholds = precision_recall_curve(gt_list, predictions)
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
best_threshold = thresholds[np.argmax(f1)]
return (fig, ax), best_threshold
def _embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
def get_embedding(features_1, features_2, features_3, embedding_ids, device):
embedding = features_1
embedding = _embedding_concat(embedding, features_2).to(device)
embedding = _embedding_concat(embedding, features_3).to(device)
# Select a random amount of embeddings
embedding = torch.index_select(embedding, dim=1, index=embedding_ids)
return embedding
def create_mask(img_score: np.ndarray, threshold):
idx_above_threshold = img_score > threshold
idx_below_threshold = img_score <= threshold
mask = img_score
mask[idx_above_threshold] = 1
mask[idx_below_threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
# mask *= 255
return mask
|
py | 1a4f74d96cfdfc0fa163c35d03d026cb11d55b6a | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_parser_tensor_assign """
import pytest
import numpy as np
import mindspore as ms
from mindspore import context
from mindspore.nn import ReLU
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_parser_tensor_assign_slice():
class Net(Cell):
def __init__(self, U):
super(Net, self).__init__()
self.relu = ReLU()
self.U = U
def construct(self, x):
x = self.relu(x)
x[..., :2] = U
return x
input_np_x = np.random.rand(4, 4, 4)
input_me_x = Tensor(input_np_x, ms.float32)
U = 1.0
net = Net(U)
out_me = net(input_me_x)
input_np_x[..., :2] = U
assert np.allclose(out_me.asnumpy(), input_np_x, rtol=0.01, atol=0.01)
def test_parser_tensor_assign_slice_002():
class Net(Cell):
def __init__(self, U):
super(Net, self).__init__()
self.relu = ReLU()
self.U = U
def construct(self, x):
x = self.relu(x)
x[::, :, :1] = self.U
return x
input_np_x = np.random.rand(4, 4, 4)
input_me_x = Tensor(input_np_x, ms.float32)
U = 1.0
net = Net(U)
out_me = net(input_me_x)
input_np_x[::, :, :1] = U
assert np.allclose(out_me.asnumpy(), input_np_x, rtol=0.01, atol=0.01)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_parser_tensor_assign_bool():
class Net(Cell):
def __init__(self, U):
super(Net, self).__init__()
self.relu = ReLU()
self.U = U
def construct(self, x, tensorB):
x = self.relu(x)
x[tensorB] = self.U
return x
input_np_x = np.random.rand(4, 4, 4)
input_me_x = Tensor(input_np_x, ms.float32)
numpy_B = np.random.randn(4, 4, 4) > 0
tensor_B = Tensor(numpy_B)
U = np.array([1])
net = Net(Tensor(U))
out_me = net(input_me_x, tensor_B)
input_np_x[numpy_B] = U
assert np.allclose(out_me.asnumpy(), input_np_x, rtol=0.01, atol=0.01)
def test_parser_tensor_assign_bool_002():
class Net(Cell):
def __init__(self, U):
super(Net, self).__init__()
self.relu = ReLU()
self.U = U
self.fill = P.Fill()
def construct(self, x, tensorB):
x = self.relu(x)
x[tensorB] = self.U
return x
input_np_x = np.random.rand(2, 2, 2)
input_me_x = Tensor(input_np_x, ms.float32)
numpy_B = np.random.randn(2, 2, 2) > 0
tensor_B = Tensor(numpy_B)
U = 1
net = Net(U)
out_me = net(input_me_x, tensor_B)
input_np_x[numpy_B] = U
assert np.allclose(out_me.asnumpy(), input_np_x, rtol=0.01, atol=0.01)
|
py | 1a4f75e8029c85529564cf02bfaf31bd62fc8e58 | """
CEASIOMpy: Conceptual Aircraft Design Software
Developed by CFS ENGINEERING, 1015 Lausanne, Switzerland
Module containing the utilitary functions for the workflowcreator and optimization modules
Python version: >=3.6
| Author: Aidan Jungo
| Creation: 2020-02-25
| Last modifiction: 2020-04-24
TODO:
* ...
"""
#==============================================================================
# IMPORTS
#==============================================================================
import os
import subprocess
import shutil
import ceasiompy.utils.moduleinterfaces as mi
from ceasiompy.SettingsGUI.settingsgui import create_settings_gui
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
import ceasiompy.__init__
LIB_DIR = os.path.dirname(ceasiompy.__init__.__file__)
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MODULE_NAME = os.path.basename(os.getcwd())
SU2_XPATH = '/cpacs/toolspecific/CEASIOMpy/aerodynamics/su2'
#==============================================================================
# FUNCTIONS
#==============================================================================
def copy_module_to_module(module_from, io_from, module_to, io_to):
""" Transfer CPACS file from one module to another.
Function 'copy_module_to_module' copy the CPACS file form ToolInput or
ToolOutput of 'module_from' to ToolInput or ToolOutput of 'module_to'
Args:
module_from (str): Name of the module the CPACS file is copy from
io_from (str): "in" or "out", for ToolInput or ToolOutput
module_to (str): Name of the module where the CPACS file will be copy
io_to (str): "in" or "out", for ToolInput or ToolOutput
"""
in_list = ['in','In','IN','iN','input','Input','INPUT','ToolInput','toolinput']
if io_from in in_list:
file_copy_from = mi.get_toolinput_file_path(module_from)
else: # 'out' or anything else ('out' by default)
file_copy_from = mi.get_tooloutput_file_path(module_from)
log.info('Copy CPACS from:'+ file_copy_from)
if io_to in in_list:
file_copy_to = mi.get_toolinput_file_path(module_to)
else: # 'out' or anything else ('out' by default)
file_copy_to = mi.get_tooloutput_file_path(module_to)
log.info('Copy CPACS to:'+ file_copy_to)
shutil.copy(file_copy_from,file_copy_to)
def run_subworkflow(module_to_run,cpacs_path_in='',cpacs_path_out=''):
"""Function to run a list of module in order.
Function 'run_subworkflow' will exectute in order all the module contained
in 'module_to_run' list. Every time the resuts of one module (generaly CPACS
file) will be copied as input for the next module.
Args:
module_to_run (list): List of mododule to run (in order)
cpacs_path_in (str): Path of the CPACS file use, if not already in the
ToolInput folder of the first submodule
cpacs_path_out (str): Path of the output CPACS file use, if not already
in the ToolInput folder of the first submodule
"""
if not module_to_run:
log.info('No module to run')
return 0
# Check non existing module
submodule_list = mi.get_submodule_list()
for module in module_to_run:
if module not in submodule_list:
raise ValueError('No module named "' + module + '"!')
# Copy the cpacs file in the first module
if cpacs_path_in:
shutil.copy(cpacs_path_in,mi.get_toolinput_file_path(module_to_run[0]))
log.info('The following modules will be executed: ' + str(module_to_run))
for m, module in enumerate(module_to_run):
log.info('\n')
log.info('######################################################################################')
log.info('Run module: ' + module)
log.info('######################################################################################\n')
# Go to the module directory
module_path = os.path.join(LIB_DIR,module)
print('\n Going to ',module_path,'\n')
os.chdir(module_path)
# Copy CPACS file from previous module to this one
if m > 0:
copy_module_to_module(module_to_run[m-1],'out',module,'in')
if module == 'SettingsGUI':
cpacs_path = mi.get_toolinput_file_path(module)
cpacs_out_path = mi.get_tooloutput_file_path(module)
create_settings_gui(cpacs_path,cpacs_out_path,module_to_run[m:])
else:
# Find the python file to run
for file in os.listdir(module_path):
if file.endswith('.py'):
if not file.startswith('__'):
main_python = file
# Run the module
error = subprocess.call(['python',main_python])
if error:
raise ValueError('An error ocured in the module '+ module)
# Copy the cpacs file in the first module
if cpacs_path_out:
shutil.copy(mi.get_tooloutput_file_path(module_to_run[-1]),cpacs_path_out)
|
py | 1a4f76edfb7e37e5b8ee9664268ffd3efeee64f7 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
|
py | 1a4f77134b6f737b1b2e740a69633cb575937853 | import datetime
import minerl
import namesgenerator
from sacred import Experiment
import basalt_utils.wrappers as wrapper_utils
from minerl.herobraine.wrappers.video_recording_wrapper import VideoRecordingWrapper
from basalt_utils.sb3_compat.policies import SpaceFlatteningActorCriticPolicy
from basalt_utils.sb3_compat.cnns import MAGICALCNN
from basalt_utils.wrappers import SaveObsAndActions
from basalt_utils.callbacks import BatchEndIntermediateRolloutEvaluator, MultiCallback, BCModelSaver
from stable_baselines3.common.policies import ActorCriticCnnPolicy
from stable_baselines3.common.vec_env import DummyVecEnv
import collections
from imitation.algorithms.bc import BC
import imitation.data.rollout as il_rollout
import logging
import torch as th
from basalt_utils import utils
import os
import imitation.util.logger as imitation_logger
from sacred.observers import FileStorageObserver
from stable_baselines3.common.utils import get_device
from time import time
bc_baseline = Experiment("basalt_bc_baseline")
WRAPPERS = [# Maps from a string version of enum (found in the dataset) to an int version (expected for spaces.Discrete)
(wrapper_utils.EnumStrToIntWrapper, dict()),
# Transforms continuous camera action into discrete up/down/no-change buckets on both pitch and yaw
(wrapper_utils.CameraDiscretizationWrapper, dict()),
# Flattens a Dict action space into a Box, but retains memory of how to expand back out
(wrapper_utils.ActionFlatteningWrapper, dict()),
# Pull out only the POV observation from the observation space; transpose axes for SB3 compatibility
(utils.ExtractPOVAndTranspose, dict())] #,
def make_unique_timestamp() -> str:
"""Make a timestamp along with a random word descriptor: e.g. 2021-06-06_1236_boring_wozniac"""
ISO_TIMESTAMP = "%Y%m%d_%H%M"
timestamp = datetime.datetime.now().strftime(ISO_TIMESTAMP)
return f"{timestamp}_{namesgenerator.get_random_name()}"
@bc_baseline.config
def default_config():
task_name = "MineRLBasaltFindCave-v0"
train_batches = None
train_epochs = None
log_interval = 1
# TODO fix this
data_root = os.getenv('MINERL_DATA_ROOT')
# SpaceFlatteningActorCriticPolicy is a policy that supports a flattened Dict action space by
# maintaining multiple sub-distributions and merging their results
policy_class = SpaceFlatteningActorCriticPolicy
wrappers = WRAPPERS
save_dir_base = "results/"
save_dir = None
policy_filename = 'trained_policy.pt'
use_rollout_callback = False
rollout_callback_batch_interval = 1000
policy_save_interval = 1000
callback_rollouts = 5
save_videos = True
mode = 'train'
test_policy_path = 'train/trained_policy.pt'
test_n_rollouts = 5
# Note that `batch_size` needs to be less than the number of trajectories available for the task you're training on
batch_size = 32
n_traj = None
buffer_size = 15000
lr = 1e-4
_ = locals()
del _
@bc_baseline.config
def default_save_dir(save_dir_base, save_dir, task_name):
"""
Calculates a save directory by combining the base `save_dir` ("results" by default) with
the task name and a timestamp that contains both the time and a random name
"""
if save_dir is None:
save_dir = os.path.join(save_dir_base, task_name, make_unique_timestamp())
_ = locals()
del _
@bc_baseline.named_config
def normal_policy_class():
"""
This is a sacred named_config, which means that when `normal_policy_class` is added as a parameter
to a call of this experiment, the policy class will be set to ActorCriticCnnPolicy
"Normal" here is just used to mean the default CNN policy from Stable Baselines, rather than the one explicitly designed
to deal with multimodal action spaces (SpaceFlatteningActorCriticPolicy)
"""
policy_class = ActorCriticCnnPolicy
_ = locals()
del _
@bc_baseline.main
def main(mode):
if mode == 'train':
train_bc()
if mode == 'test':
test_bc()
@bc_baseline.capture
def test_bc(task_name, data_root, wrappers, test_policy_path, test_n_rollouts, save_dir):
os.makedirs(save_dir, exist_ok=True)
# Add a wrapper to the environment that records video and saves it in the
# the `save_dir` we have constructed for this run.
wrappers = [(VideoRecordingWrapper, {'video_directory':
os.path.join(save_dir, 'videos')}),
(SaveObsAndActions, {'save_dir':
os.path.join(save_dir, 'obs_and_actions')})] + wrappers
data_pipeline, wrapped_env = utils.get_data_pipeline_and_env(task_name, data_root, wrappers, dummy=False)
vec_env = DummyVecEnv([lambda: wrapped_env])
policy = th.load(test_policy_path, map_location=th.device(get_device('auto')))
trajectories = il_rollout.generate_trajectories(policy, vec_env, il_rollout.min_episodes(test_n_rollouts))
stats = il_rollout.rollout_stats(trajectories)
stats = collections.OrderedDict([(key, stats[key])
for key in sorted(stats)])
# print it out
kv_message = '\n'.join(f" {key}={value}"
for key, value in stats.items())
logging.info(f"Evaluation stats on '{task_name}': {kv_message}")
@bc_baseline.capture
def train_bc(task_name, batch_size, data_root, wrappers, train_epochs, n_traj, lr,
policy_class, train_batches, log_interval, save_dir, policy_filename,
use_rollout_callback, rollout_callback_batch_interval, callback_rollouts, save_videos,
buffer_size, policy_save_interval):
# This code is designed to let you either train for a fixed number of batches, or for a fixed number of epochs
assert train_epochs is None or train_batches is None, \
"Only one of train_batches or train_epochs should be set"
assert not (train_batches is None and train_epochs is None), \
"You cannot have both train_batches and train_epochs set to None"
# If you've set the `save_videos` flag, add a VideoRecordingWrapper with a directory set
# to the current `save_dir` to the environment wrappers
if save_videos:
wrappers = [(VideoRecordingWrapper, {'video_directory':
os.path.join(save_dir, 'videos')}),
(SaveObsAndActions, {'save_dir':
os.path.join(save_dir, 'obs_and_actions')})] + wrappers
# This `get_data_pipeline_and_env` utility is designed to be shared across multiple baselines
# It takes in a task name, data root, and set of wrappers and returns
# (1) An env object with the same environment spaces as you'd getting from making the env associated
# with this task and wrapping it in `wrappers`. Depending on the parameter passed into `dummy`, this is
# either the real wrapped environment, or a dummy environment that displays the same spaces,
# but without having to actually start up Minecraft
# (2) A MineRL DataPipeline that can be used to construct a batch_iter used by BC, and also as a handle to clean
# up that iterator after training.
data_pipeline, wrapped_env = utils.get_data_pipeline_and_env(task_name, data_root, wrappers,
dummy=not use_rollout_callback)
# This utility creates a data iterator that is basically a light wrapper around the baseline MineRL data iterator
# that additionally:
# (1) Applies all observation and action transformations specified by the wrappers in `wrappers`, and
# (2) Calls `np.squeeze` recursively on all the nested dict spaces to remove the sequence dimension, since we're
# just doing single-frame BC here
data_iter = utils.create_data_iterator(wrapped_env,
data_pipeline=data_pipeline,
batch_size=batch_size,
num_epochs=train_epochs,
num_batches=train_batches,
buffer_size=buffer_size)
if policy_class == SpaceFlatteningActorCriticPolicy:
policy = policy_class(observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
env=wrapped_env,
lr_schedule=lambda _: 1e-4,
features_extractor_class=MAGICALCNN)
else:
policy = policy_class(observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
lr_schedule=lambda _: 1e-4,
features_extractor_class=MAGICALCNN)
os.makedirs(save_dir, exist_ok=True)
imitation_logger.configure(save_dir, ["stdout", "tensorboard"])
callbacks = [BCModelSaver(policy=policy,
save_dir=os.path.join(save_dir, 'policy_checkpoints'),
save_interval_batches=policy_save_interval)]
if use_rollout_callback:
callbacks.append(BatchEndIntermediateRolloutEvaluator(policy=policy,
env=wrapped_env,
save_dir=os.path.join(save_dir, 'policy_rollouts'),
evaluate_interval_batches=rollout_callback_batch_interval,
n_rollouts=callback_rollouts))
callback_op = MultiCallback(callbacks)
bc_trainer = BC(
observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
policy_class= lambda **kwargs: policy,
policy_kwargs=None,
expert_data=data_iter,
device='auto',
optimizer_cls=th.optim.Adam,
optimizer_kwargs=dict(lr=lr),
ent_weight=1e-3,
l2_weight=1e-5)
bc_trainer.train(n_epochs=train_epochs,
n_batches=train_batches,
log_interval=log_interval,
on_batch_end=callback_op)
bc_trainer.save_policy(policy_path=os.path.join(save_dir, policy_filename))
bc_baseline.add_artifact(os.path.join(save_dir, policy_filename))
bc_baseline.log_scalar(f'run_location={save_dir}', 1)
print("Training complete; cleaning up data pipeline!")
data_iter.close()
if __name__ == "__main__":
bc_baseline.observers.append(FileStorageObserver("sacred_results"))
bc_baseline.run_commandline()
|
py | 1a4f777c48677c165c49e6b30c81bf325d4e9581 | import asyncio
import typing
import warnings
from ..utils.logger import logger
from .auto_reload import _auto_reload
CallableAwaitable = typing.Union[typing.Callable, typing.Awaitable]
class TaskManager:
def __init__(
self,
loop: asyncio.AbstractEventLoop = None,
*,
on_shutdown: typing.Callable = None,
on_startup: typing.Callable = None,
auto_reload: bool = False,
auto_reload_dir: str = ".",
asyncio_debug_mode: bool = False,
):
self.tasks: typing.List[typing.Callable] = []
self.loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop()
self.on_shutdown: CallableAwaitable = on_shutdown
self.on_startup: CallableAwaitable = on_startup
self.auto_reload: bool = auto_reload
self.auto_reload_dir: str = auto_reload_dir
self.loop.set_debug(asyncio_debug_mode)
def run(
self, **abandoned,
):
if len(abandoned):
warnings.warn("Pass options through __init__")
for option in abandoned:
setattr(self, option, abandoned[option])
if len(self.tasks) < 1:
raise RuntimeError("Count of tasks - 0. Add tasks.")
try:
if self.on_startup is not None:
self.loop.run_until_complete(self.on_startup())
if self.auto_reload:
self.loop.create_task(_auto_reload(self.auto_reload_dir))
[self.loop.create_task(task) for task in self.tasks]
self.loop.run_forever()
except KeyboardInterrupt:
logger.info("Keyboard Interrupt")
self.close()
finally:
if self.on_shutdown is not None:
self.loop.run_until_complete(self.on_shutdown())
if not self.loop.is_running():
self.close()
def close(self):
self.loop.close()
def add_task(self, task: typing.Union[typing.Coroutine, typing.Callable]):
if asyncio.iscoroutinefunction(task):
self.tasks.append(task())
elif asyncio.iscoroutine(task):
self.tasks.append(task)
else:
raise RuntimeError("Unexpected task. Tasks may be only coroutine functions")
def run_task(self, task: typing.Union[typing.Coroutine, typing.Callable]):
if asyncio.iscoroutinefunction(task):
self.loop.create_task(task())
elif asyncio.iscoroutine(task):
self.loop.create_task(task)
else:
raise RuntimeError("Unexpected task. Tasks may be only coroutine functions")
|
py | 1a4f78274a2e080495a65521157d52636236e0cf | from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP
from pytorch_widedeep.models.transformers._encoders import SaintEncoder
from pytorch_widedeep.models.transformers._embeddings_layers import (
CatAndContEmbeddings,
)
class SAINT(nn.Module):
r"""Defines a ``SAINT`` model
(`arXiv:2106.01342 <https://arxiv.org/abs/2106.01342>`_) that can be used
as the ``deeptabular`` component of a Wide & Deep model.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List
List of Tuples with the column name and number of unique values
e.g. [('education', 11), ...]
embed_dropout: float, default = 0.1
Dropout to be applied to the embeddings matrix
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
embed_continuous_activation: str, default = None
String indicating the activation function to be applied to the
continuous embeddings, if any. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported.
cont_norm_layer: str, default = None,
Type of normalization layer applied to the continuous features before
they are embedded. Options are: ``layernorm``, ``batchnorm`` or
``None``.
input_dim: int, default = 32
The so-called *dimension of the model*. In general is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks. 1 in the paper.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. ``tanh``, ``relu``,
``leaky_relu``, ``gelu``, ``geglu`` and ``reglu`` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP input dimension
mlp_activation: str, default = "relu"
MLP activation function. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
transformer_blks: ``nn.Sequential``
Sequence of SAINT-Transformer blocks
transformer_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, embed_input=embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: Optional[List[Tuple[str, int]]] = None,
embed_dropout: float = 0.1,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
embed_continuous_activation: str = None,
cont_norm_layer: str = None,
input_dim: int = 32,
use_bias: bool = False,
n_heads: int = 8,
n_blocks: int = 2,
attn_dropout: float = 0.1,
ff_dropout: float = 0.2,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(SAINT, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_dropout = embed_dropout
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.embed_continuous_activation = embed_continuous_activation
self.cont_norm_layer = cont_norm_layer
self.input_dim = input_dim
self.use_bias = use_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = "cls_token" in column_idx
self.n_cat = len(embed_input) if embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
self.cat_and_cont_embed = CatAndContEmbeddings(
input_dim,
column_idx,
embed_input,
embed_dropout,
full_embed_dropout,
shared_embed,
add_shared_embed,
frac_shared_embed,
False, # use_embed_bias
continuous_cols,
True, # embed_continuous,
embed_continuous_activation,
True, # use_cont_bias
cont_norm_layer,
)
self.transformer_blks = nn.Sequential()
for i in range(n_blocks):
self.transformer_blks.add_module(
"saint_block" + str(i),
SaintEncoder(
input_dim,
n_heads,
use_bias,
attn_dropout,
ff_dropout,
transformer_activation,
self.n_feats,
),
)
attn_output_dim = (
self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
)
if not mlp_hidden_dims:
mlp_hidden_dims = [
attn_output_dim,
attn_output_dim * 4,
attn_output_dim * 2,
]
else:
assert mlp_hidden_dims[0] == attn_output_dim, (
f"The input dim of the MLP must be {attn_output_dim}. "
f"Got {mlp_hidden_dims[0]} instead"
)
self.transformer_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = x_cat
if x_cont is not None:
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
x = self.transformer_blks(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
return self.transformer_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.transformer_blks:
attention_weights.append(
(blk.col_attn.attn_weights, blk.row_attn.attn_weights)
)
return attention_weights
|
py | 1a4f79482b350654e66edb871327a97e0f8dbf7e | # coding: utf-8
from __future__ import print_function, unicode_literals
try:
import queue
except ImportError:
import Queue as queue
|
py | 1a4f79b94f7c4676b40205b026d3876b4065b244 | # Automatically generated by pb2py
# fmt: off
SupplyChange_Increase = 1
SupplyChange_Decrease = 2
|
py | 1a4f7a07cd7d40e03bb449594d892784a57ee768 | # -*- coding: utf-8 -*-
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Sphinx-Themes template'
copyright = '2018, sphinx-themes.org'
author = 'sphinx-themes.org'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# If you want to have a consistent, platform independent look
# sphinxemoji_style = 'twemoji'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"external_links": [
("Github", "https://github.com/romnnn/sphinx_press_theme")
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# The default `html_sidebars` of Press theme: ['util/searchbox.html', 'util/sidetoc.html']
#
# html_sidebars = {'**': ['util/sidetoc.html']}
html_logo = 'https://mirrors.creativecommons.org/presskit/icons/heart.black.png'
#---sphinx-themes-----
html_theme = 'press'
|
py | 1a4f7c78d575a151c20c428b3ca0a311ecb5782b | import math
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
app = FastAPI()
app.mount("/assets", StaticFiles(directory="assets"), name="assets")
templates = Jinja2Templates(directory="templates")
@app.get("/{id}")
def e_to_the_x(id: str):
intValue = int(id)
return { "e to the power of x" : math.exp(intValue)}
@app.get("/html/{id}", response_class=HTMLResponse)
async def index(request: Request, id:str):
intValue = int(id)
message = "e to the power of x: " + str(math.exp(intValue))
return templates.TemplateResponse("index.html", {"request": request, "message" : message})
|
py | 1a4f7c966422077901602a280ca587bc2b5a21e2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma215.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
py | 1a4f7cad55b0616b9f8182b025debb710edcb542 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_explosion2754_all_of
except ImportError:
bt_explosion2754_all_of = sys.modules[
"onshape_client.oas.models.bt_explosion2754_all_of"
]
try:
from onshape_client.oas.models import bt_explosion_step_feature3008
except ImportError:
bt_explosion_step_feature3008 = sys.modules[
"onshape_client.oas.models.bt_explosion_step_feature3008"
]
try:
from onshape_client.oas.models import bt_microversion_id_and_configuration2338
except ImportError:
bt_microversion_id_and_configuration2338 = sys.modules[
"onshape_client.oas.models.bt_microversion_id_and_configuration2338"
]
try:
from onshape_client.oas.models import btm_assembly_feature887
except ImportError:
btm_assembly_feature887 = sys.modules[
"onshape_client.oas.models.btm_assembly_feature887"
]
try:
from onshape_client.oas.models import btm_feature134
except ImportError:
btm_feature134 = sys.modules["onshape_client.oas.models.btm_feature134"]
try:
from onshape_client.oas.models import btm_individual_query_with_occurrence_base904
except ImportError:
btm_individual_query_with_occurrence_base904 = sys.modules[
"onshape_client.oas.models.btm_individual_query_with_occurrence_base904"
]
try:
from onshape_client.oas.models import btm_parameter1
except ImportError:
btm_parameter1 = sys.modules["onshape_client.oas.models.btm_parameter1"]
class BTExplosion2754(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"explode_steps": (
[bt_explosion_step_feature3008.BTExplosionStepFeature3008],
), # noqa: E501
"starting_position_id": (
bt_microversion_id_and_configuration2338.BTMicroversionIdAndConfiguration2338,
), # noqa: E501
"feature_id": (str,), # noqa: E501
"feature_type": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"name": (str,), # noqa: E501
"namespace": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"parameters": ([btm_parameter1.BTMParameter1],), # noqa: E501
"return_after_subfeatures": (bool,), # noqa: E501
"sub_features": ([btm_feature134.BTMFeature134],), # noqa: E501
"suppressed": (bool,), # noqa: E501
"auxiliary_assembly_feature": (bool,), # noqa: E501
"feature_list_field_index": (int,), # noqa: E501
"occurrence_queries_from_all_configurations": (
[
btm_individual_query_with_occurrence_base904.BTMIndividualQueryWithOccurrenceBase904
],
), # noqa: E501
"version": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"explode_steps": "explodeSteps", # noqa: E501
"starting_position_id": "startingPositionId", # noqa: E501
"feature_id": "featureId", # noqa: E501
"feature_type": "featureType", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"name": "name", # noqa: E501
"namespace": "namespace", # noqa: E501
"node_id": "nodeId", # noqa: E501
"parameters": "parameters", # noqa: E501
"return_after_subfeatures": "returnAfterSubfeatures", # noqa: E501
"sub_features": "subFeatures", # noqa: E501
"suppressed": "suppressed", # noqa: E501
"auxiliary_assembly_feature": "auxiliaryAssemblyFeature", # noqa: E501
"feature_list_field_index": "featureListFieldIndex", # noqa: E501
"occurrence_queries_from_all_configurations": "occurrenceQueriesFromAllConfigurations", # noqa: E501
"version": "version", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_explosion2754.BTExplosion2754 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
explode_steps ([bt_explosion_step_feature3008.BTExplosionStepFeature3008]): [optional] # noqa: E501
starting_position_id (bt_microversion_id_and_configuration2338.BTMicroversionIdAndConfiguration2338): [optional] # noqa: E501
feature_id (str): [optional] # noqa: E501
feature_type (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
parameters ([btm_parameter1.BTMParameter1]): [optional] # noqa: E501
return_after_subfeatures (bool): [optional] # noqa: E501
sub_features ([btm_feature134.BTMFeature134]): [optional] # noqa: E501
suppressed (bool): [optional] # noqa: E501
auxiliary_assembly_feature (bool): [optional] # noqa: E501
feature_list_field_index (int): [optional] # noqa: E501
occurrence_queries_from_all_configurations ([btm_individual_query_with_occurrence_base904.BTMIndividualQueryWithOccurrenceBase904]): [optional] # noqa: E501
version (int): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_explosion2754_all_of.BTExplosion2754AllOf,
btm_assembly_feature887.BTMAssemblyFeature887,
],
"oneOf": [],
}
|
py | 1a4f7d207efa250726c6539bd72d67f0b697e20c | import re
from collections import Counter
def unique_words_counter(file_path):
with open(file_path, "r", encoding="utf-8") as file:
all_words = re.findall(r"[0-9a-zA-Z-']+", file.read())
all_words = [word.upper() for word in all_words]
print("Total Words: ", len(all_words))
words_counter = Counter()
for word in all_words:
words_counter[word] += 1
print("\nTop 20 words:")
for word in words_counter.most_common(20):
value, count = word
print(value, " : ", count)
if __name__ == "__main__":
unique_words_counter("data/shakespeare.txt")
|
py | 1a4f7e7909c7a2c75caffb851df06c3405ded1b5 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import (
Iterator,
List,
Optional,
Union,
)
from amundsen_rds.models import RDSModel
from amundsen_rds.models.table import TableFollower as RDSTableFollower
from amundsen_rds.models.user import User as RDSUser
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.follower_constants import FOLLOWER_OF_OBJECT_RELATION_TYPE, FOLLOWER_RELATION_TYPE
from databuilder.models.table_metadata import TableMetadata
from databuilder.models.table_serializable import TableSerializable
from databuilder.models.user import User
class Follower(GraphSerializable, TableSerializable):
LABELS_PERMITTED_TO_HAVE_FOLLOWER = ['Table', 'Dashboard']
def __init__(
self,
start_label: str,
start_key: str,
follower_emails: Union[List, str],
) -> None:
if start_label not in Follower.LABELS_PERMITTED_TO_HAVE_FOLLOWER:
raise Exception(f'followers for {start_label} are not supported')
self.start_label = start_label
self.start_key = start_key
if isinstance(follower_emails, str):
follower_emails = follower_emails.split(',')
self.follower_emails = [
email.strip().lower() for email in follower_emails
]
self._node_iter = self._create_node_iterator()
self._relation_iter = self._create_relation_iterator()
self._record_iter = self._create_record_iterator()
def __repr__(self) -> str:
return f'follower({self.start_label!r}, {self.start_key!r}, {self.follower_emails!r})'
def create_next_node(self) -> Optional[GraphNode]:
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iter)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
for email in self.follower_emails:
if email:
yield GraphNode(key=User.get_user_model_key(email=email),
label=User.USER_NODE_LABEL,
attributes={
User.USER_NODE_EMAIL: email,
})
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
for email in self.follower_emails:
if email:
yield GraphRelationship(
start_label=self.start_label,
start_key=self.start_key,
end_label=User.USER_NODE_LABEL,
end_key=User.get_user_model_key(email=email),
type=FOLLOWER_RELATION_TYPE,
reverse_type=FOLLOWER_OF_OBJECT_RELATION_TYPE,
attributes={})
def _create_record_iterator(self) -> Iterator[RDSModel]:
for email in self.follower_emails:
if email:
user_record = RDSUser(rk=User.get_user_model_key(email=email),
email=email)
yield user_record
if self.start_label == TableMetadata.TABLE_NODE_LABEL:
yield RDSTableFollower(
table_rk=self.start_key,
user_rk=User.get_user_model_key(email=email),
)
else:
raise Exception(
f'{self.start_label}<>follower relationship is not table serializable'
)
|
py | 1a4f7f139f62a29117b372bd7c14f20761be4989 | #!/usr/bin/env python
# coding=utf-8
import asyncio
import aiohttp
from .config import HEADERS, REQUEST_TIMEOUT, REQUEST_DELAY
async def _get_page(url, sleep):
"""
获取并返回网页内容
"""
async with aiohttp.ClientSession() as session:
try:
await asyncio.sleep(sleep)
async with session.get(
url, headers=HEADERS, timeout=REQUEST_TIMEOUT
) as resp:
return await resp.text()
except:
return ""
def requests(url, sleep=REQUEST_DELAY):
"""
请求方法,用于获取网页内容
:param url: 请求链接
:param sleep: 延迟时间(秒)
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
html = loop.run_until_complete(asyncio.gather(_get_page(url, sleep)))
loop.close()
if html:
return "".join(html)
|
py | 1a4f7f2c26636c142682b64fff446bec3bbdb675 | """
Organisation voicemail settings
"""
from typing import Optional
from ..api_child import ApiChild
from ..base import ApiModel
__all__ = ['OrganisationVoicemailSettings', 'OrganisationVoicemailSettingsAPI']
class OrganisationVoicemailSettings(ApiModel):
"""
voicemail settings for and organization.
"""
#: When enabled, you can set the deletion conditions for expired messages.
message_expiry_enabled: bool
#: Number of days after which messages expire.
number_of_days_for_message_expiry: int
#: When enabled, all read and unread voicemail messages will be deleted based on the time frame you set. When
#: disabled, all unread voicemail messages will be kept.
strict_deletion_enabled: Optional[bool]
#: When enabled, people in the organization can configure the email forwarding of voicemails.
voice_message_forwarding_enabled: Optional[bool]
@staticmethod
def default() -> 'OrganisationVoicemailSettings':
return OrganisationVoicemailSettings(message_expiry_enabled=False,
number_of_days_for_message_expiry=15,
strict_deletion_enabled=False,
voice_message_forwarding_enabled=False)
class OrganisationVoicemailSettingsAPI(ApiChild, base='telephony/config/voicemail/settings'):
"""
API for Organisation voicemail settings
"""
def read(self, *, org_id: str = None) -> OrganisationVoicemailSettings:
"""
Get Voicemail Settings
Retrieve the organization's voicemail settings.
Organizational voicemail settings determines what voicemail features a person can configure and automatic
message expiration.
Retrieving organization's voicemail settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param org_id: Retrieve voicemail settings for this organization.
:type org_id: str
:return: VM settings
:rtype: OrganisationVoicemailSettings
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
return OrganisationVoicemailSettings.parse_obj(self.get(url, params=params))
def update(self, *, settings: OrganisationVoicemailSettings, org_id: str = None):
"""
Update the organization's voicemail settings.
Organizational voicemail settings determines what voicemail features a person can configure and automatic
message expiration.
Updating organization's voicemail settings requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param settings: new settings
:type settings: OrganisationVoicemailSettings
:param org_id: Update voicemail settings for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
data = settings.json()
self.put(url, data=data, params=params)
|
py | 1a4f7fb2536bc8773bdece08659be3ab42528392 | from setuptools import setup, find_packages
setup(
name="flexible-data-parser",
description="A configurable semi-structured data parser",
long_description=open("readme.md").read(),
url="https://github.com/glitchassassin/flexible-data-parser",
author="Jon Winsley",
author_email="[email protected]",
license="MIT",
version="v1.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
keywords="data parse",
packages=find_packages()
) |
py | 1a4f7fcd2c758a6f7cd2513b55c58946236e6602 | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from concurrent import futures
from collections import namedtuple
import copy
import logging
import sys
import threading
from s3transfer.compat import MAXINT
from s3transfer.compat import six
from s3transfer.exceptions import CancelledError, TransferNotDoneError
from s3transfer.utils import FunctionContainer
from s3transfer.utils import TaskSemaphore
logger = logging.getLogger(__name__)
class TransferFuture(object):
def __init__(self, meta=None, coordinator=None):
"""The future associated to a submitted transfer request
:type meta: TransferMeta
:param meta: The metadata associated to the request. This object
is visible to the requester.
:type coordinator: TransferCoordinator
:param coordinator: The coordinator associated to the request. This
object is not visible to the requester.
"""
self._meta = meta
if meta is None:
self._meta = TransferMeta()
self._coordinator = coordinator
if coordinator is None:
self._coordinator = TransferCoordinator()
@property
def meta(self):
"""The metadata associated to the TransferFuture"""
return self._meta
def done(self):
"""Determines if a TransferFuture has completed
:returns: True if completed. False, otherwise.
"""
return self._coordinator.done()
def result(self):
"""Waits until TransferFuture is done and returns the result
If the TransferFuture succeeded, it will return the result. If the
TransferFuture failed, it will raise the exception associated to the
failure.
"""
try:
# Usually the result() method blocks until the transfer is done,
# however if a KeyboardInterrupt is raised we want want to exit
# out of this and propogate the exception.
return self._coordinator.result()
except KeyboardInterrupt as e:
self.cancel()
raise e
def cancel(self):
"""Cancels the request associated with the TransferFuture"""
self._coordinator.cancel()
def set_exception(self, exception):
"""Sets the exception on the future."""
if not self.done():
raise TransferNotDoneError(
'set_exception can only be called once the transfer is '
'complete.')
self._coordinator.set_exception(exception, override=True)
class TransferMeta(object):
"""Holds metadata about the TransferFuture"""
def __init__(self, call_args=None, transfer_id=None):
self._call_args = call_args
self._transfer_id = transfer_id
self._size = None
self._user_context = {}
@property
def call_args(self):
"""The call args used in the transfer request"""
return self._call_args
@property
def transfer_id(self):
"""The unique id of the transfer"""
return self._transfer_id
@property
def size(self):
"""The size of the transfer request if known"""
return self._size
@property
def user_context(self):
"""A dictionary that requesters can store data in"""
return self._user_context
def provide_transfer_size(self, size):
"""A method to provide the size of a transfer request
By providing this value, the TransferManager will not try to
call HeadObject or use the use OS to determine the size of the
transfer.
"""
self._size = size
class TransferCoordinator(object):
"""A helper class for managing TransferFuture"""
def __init__(self, transfer_id=None):
self.transfer_id = transfer_id
self._status = 'not-started'
self._result = None
self._exception = None
self._associated_futures = set()
self._failure_cleanups = []
self._done_callbacks = []
self._done_event = threading.Event()
self._lock = threading.Lock()
self._associated_futures_lock = threading.Lock()
self._done_callbacks_lock = threading.Lock()
self._failure_cleanups_lock = threading.Lock()
def __repr__(self):
return '%s(transfer_id=%s)' % (
self.__class__.__name__, self.transfer_id)
@property
def exception(self):
return self._exception
@property
def associated_futures(self):
"""The list of futures associated to the inprogress TransferFuture
Once the transfer finishes this list becomes empty as the transfer
is considered done and there should be no running futures left.
"""
with self._associated_futures_lock:
# We return a copy of the list because we do not want to
# processing the returned list while another thread is adding
# more futures to the actual list.
return copy.copy(self._associated_futures)
@property
def failure_cleanups(self):
"""The list of callbacks to call when the TransferFuture fails"""
return self._failure_cleanups
@property
def status(self):
"""The status of the TransferFuture
The currently supported states are:
* not-started - Has yet to start. If in this state, a transfer
can be canceled immediately and nothing will happen.
* queued - SubmissionTask is about to submit tasks
* running - Is inprogress. In-progress as of now means that
the SubmissionTask that runs the transfer is being executed. So
there is no guarantee any transfer requests had been made to
S3 if this state is reached.
* cancelled - Was cancelled
* failed - An exception other than CancelledError was thrown
* success - No exceptions were thrown and is done.
"""
return self._status
def set_result(self, result):
"""Set a result for the TransferFuture
Implies that the TransferFuture succeeded. This will always set a
result because it is invoked on the final task where there is only
ever one final task and it is ran at the very end of a transfer
process. So if a result is being set for this final task, the transfer
succeeded even if something came a long and canceled the transfer
on the final task.
"""
with self._lock:
self._exception = None
self._result = result
self._status = 'success'
def set_exception(self, exception, override=False):
"""Set an exception for the TransferFuture
Implies the TransferFuture failed.
:param exception: The exception that cause the transfer to fail.
:param override: If True, override any existing state.
"""
with self._lock:
if not self.done() or override:
self._exception = exception
self._status = 'failed'
def result(self):
"""Waits until TransferFuture is done and returns the result
If the TransferFuture succeeded, it will return the result. If the
TransferFuture failed, it will raise the exception associated to the
failure.
"""
# Doing a wait() with no timeout cannot be interrupted in python2 but
# can be interrupted in python3 so we just wait with the largest
# possible value integer value, which is on the scale of billions of
# years...
self._done_event.wait(MAXINT)
# Once done waiting, raise an exception if present or return the
# final result.
if self._exception:
raise self._exception
return self._result
def cancel(self, msg='', exc_type=CancelledError):
"""Cancels the TransferFuture
:param msg: The message to attach to the cancellation
:param exc_type: The type of exception to set for the cancellation
"""
with self._lock:
if not self.done():
should_announce_done = False
logger.debug('%s cancel(%s) called', self, msg)
self._exception = exc_type(msg)
if self._status == 'not-started':
should_announce_done = True
self._status = 'cancelled'
if should_announce_done:
self.announce_done()
def set_status_to_queued(self):
"""Sets the TransferFutrue's status to running"""
self._transition_to_non_done_state('queued')
def set_status_to_running(self):
"""Sets the TransferFuture's status to running"""
self._transition_to_non_done_state('running')
def _transition_to_non_done_state(self, desired_state):
with self._lock:
if self.done():
raise RuntimeError(
'Unable to transition from done state %s to non-done '
'state %s.' % (self.status, desired_state))
self._status = desired_state
def submit(self, executor, task, tag=None):
"""Submits a task to a provided executor
:type executor: s3transfer.futures.BoundedExecutor
:param executor: The executor to submit the callable to
:type task: s3transfer.tasks.Task
:param task: The task to submit to the executor
:type tag: s3transfer.futures.TaskTag
:param tag: A tag to associate to the submitted task
:rtype: concurrent.futures.Future
:returns: A future representing the submitted task
"""
logger.debug(
"Submitting task %s to executor %s for transfer request: %s." % (
task, executor, self.transfer_id)
)
future = executor.submit(task, tag=tag)
# Add this created future to the list of associated future just
# in case it is needed during cleanups.
self.add_associated_future(future)
future.add_done_callback(
FunctionContainer(self.remove_associated_future, future))
return future
def done(self):
"""Determines if a TransferFuture has completed
:returns: False if status is equal to 'failed', 'cancelled', or
'success'. True, otherwise
"""
return self.status in ['failed', 'cancelled', 'success']
def add_associated_future(self, future):
"""Adds a future to be associated with the TransferFuture"""
with self._associated_futures_lock:
self._associated_futures.add(future)
def remove_associated_future(self, future):
"""Removes a future's association to the TransferFuture"""
with self._associated_futures_lock:
self._associated_futures.remove(future)
def add_done_callback(self, function, *args, **kwargs):
"""Add a done callback to be invoked when transfer is done"""
with self._done_callbacks_lock:
self._done_callbacks.append(
FunctionContainer(function, *args, **kwargs)
)
def add_failure_cleanup(self, function, *args, **kwargs):
"""Adds a callback to call upon failure"""
with self._failure_cleanups_lock:
self._failure_cleanups.append(
FunctionContainer(function, *args, **kwargs))
def announce_done(self):
"""Announce that future is done running and run associated callbacks
This will run any failure cleanups if the transfer failed if not
they have not been run, allows the result() to be unblocked, and will
run any done callbacks associated to the TransferFuture if they have
not already been ran.
"""
if self.status != 'success':
self._run_failure_cleanups()
self._done_event.set()
self._run_done_callbacks()
def _run_done_callbacks(self):
# Run the callbacks and remove the callbacks from the internal
# list so they do not get ran again if done is announced more than
# once.
with self._done_callbacks_lock:
self._run_callbacks(self._done_callbacks)
self._done_callbacks = []
def _run_failure_cleanups(self):
# Run the cleanup callbacks and remove the callbacks from the internal
# list so they do not get ran again if done is announced more than
# once.
with self._failure_cleanups_lock:
self._run_callbacks(self.failure_cleanups)
self._failure_cleanups = []
def _run_callbacks(self, callbacks):
for callback in callbacks:
self._run_callback(callback)
def _run_callback(self, callback):
try:
callback()
# We do not want a callback interrupting the process, especially
# in the failure cleanups. So log and catch, the excpetion.
except Exception:
logger.debug("Exception raised in %s." % callback, exc_info=True)
class BoundedExecutor(object):
EXECUTOR_CLS = futures.ThreadPoolExecutor
def __init__(self, max_size, max_num_threads, tag_semaphores=None,
executor_cls=None):
"""An executor implentation that has a maximum queued up tasks
The executor will block if the number of tasks that have been
submitted and is currently working on is past its maximum.
:params max_size: The maximum number of inflight futures. An inflight
future means that the task is either queued up or is currently
being executed. A size of None or 0 means that the executor will
have no bound in terms of the number of inflight futures.
:params max_num_threads: The maximum number of threads the executor
uses.
:type tag_semaphores: dict
:params tag_semaphores: A dictionary where the key is the name of the
tag and the value is the semaphore to use when limiting the
number of tasks the executor is processing at a time.
:type executor_cls: BaseExecutor
:param underlying_executor_cls: The executor class that
get bounded by this executor. If None is provided, the
concurrent.futures.ThreadPoolExecutor class is used.
"""
self._max_num_threads = max_num_threads
if executor_cls is None:
executor_cls = self.EXECUTOR_CLS
self._executor = executor_cls(max_workers=self._max_num_threads)
self._semaphore = TaskSemaphore(max_size)
self._tag_semaphores = tag_semaphores
def submit(self, task, tag=None, block=True):
"""Submit a task to complete
:type task: s3transfer.tasks.Task
:param task: The task to run __call__ on
:type tag: s3transfer.futures.TaskTag
:param tag: An optional tag to associate to the task. This
is used to override which semaphore to use.
:type block: boolean
:param block: True if to wait till it is possible to submit a task.
False, if not to wait and raise an error if not able to submit
a task.
:returns: The future assocaited to the submitted task
"""
semaphore = self._semaphore
# If a tag was provided, use the semaphore associated to that
# tag.
if tag:
semaphore = self._tag_semaphores[tag]
# Call acquire on the semaphore.
acquire_token = semaphore.acquire(task.transfer_id, block)
# Create a callback to invoke when task is done in order to call
# release on the semaphore.
release_callback = FunctionContainer(
semaphore.release, task.transfer_id, acquire_token)
# Submit the task to the underlying executor.
future = ExecutorFuture(self._executor.submit(task))
# Add the Semaphore.release() callback to the future such that
# it is invoked once the future completes.
future.add_done_callback(release_callback)
return future
def shutdown(self, wait=True):
self._executor.shutdown(wait)
class ExecutorFuture(object):
def __init__(self, future):
"""A future returned from the executor
Currently, it is just a wrapper around a concurrent.futures.Future.
However, this can eventually grow to implement the needed functionality
of concurrent.futures.Future if we move off of the library and not
affect the rest of the codebase.
:type future: concurrent.futures.Future
:param future: The underlying future
"""
self._future = future
def result(self):
return self._future.result()
def add_done_callback(self, fn):
"""Adds a callback to be completed once future is done
:parm fn: A callable that takes no arguments. Note that is different
than concurrent.futures.Future.add_done_callback that requires
a single argument for the future.
"""
# The done callback for concurrent.futures.Future will always pass a
# the future in as the only argument. So we need to create the
# proper signature wrapper that will invoke the callback provided.
def done_callback(future_passed_to_callback):
return fn()
self._future.add_done_callback(done_callback)
def done(self):
return self._future.done()
class BaseExecutor(object):
"""Base Executor class implementation needed to work with s3transfer"""
def __init__(self, max_workers=None):
pass
def submit(self, fn, *args, **kwargs):
raise NotImplementedError('submit()')
def shutdown(self, wait=True):
raise NotImplementedError('shutdown()')
class NonThreadedExecutor(BaseExecutor):
"""A drop-in replacement non-threaded version of ThreadPoolExecutor"""
def submit(self, fn, *args, **kwargs):
future = NonThreadedExecutorFuture()
try:
result = fn(*args, **kwargs)
future.set_result(result)
except Exception:
e, tb = sys.exc_info()[1:]
logger.debug(
'Setting exception for %s to %s with traceback %s',
future, e, tb
)
future.set_exception_info(e, tb)
return future
def shutdown(self, wait=True):
pass
class NonThreadedExecutorFuture(object):
"""The Future returned from NonThreadedExecutor
Note that this future is **not** thread-safe as it is being used
from the context of a non-threaded environment.
"""
def __init__(self):
self._result = None
self._exception = None
self._traceback = None
self._done = False
self._done_callbacks = []
def set_result(self, result):
self._result = result
self._set_done()
def set_exception_info(self, exception, traceback):
self._exception = exception
self._traceback = traceback
self._set_done()
def result(self, timeout=None):
if self._exception:
six.reraise(
type(self._exception), self._exception, self._traceback)
return self._result
def _set_done(self):
self._done = True
for done_callback in self._done_callbacks:
self._invoke_done_callback(done_callback)
self._done_callbacks = []
def _invoke_done_callback(self, done_callback):
return done_callback(self)
def done(self):
return self._done
def add_done_callback(self, fn):
if self._done:
self._invoke_done_callback(fn)
else:
self._done_callbacks.append(fn)
TaskTag = namedtuple('TaskTag', ['name'])
IN_MEMORY_UPLOAD_TAG = TaskTag('in_memory_upload')
IN_MEMORY_DOWNLOAD_TAG = TaskTag('in_memory_download')
|
py | 1a4f80d04d432e4868493ce2df28ca7ccda0d084 | from .starshaped_sample import Starshaped_Sample
from .hybrid_sampling import Hybrid_Sampling |
py | 1a4f81061a75db8ac18bba2be452cb33eb364dc8 | from a10sdk.common.A10BaseClass import A10BaseClass
class SslCert(A10BaseClass):
"""Class Description::
SSL Cert File(enter bulk when import an archive file).
Class ssl-cert supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param pfx_password: {"description": "The password for certificate file (pfx type only)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}
:param csr_generate: {"default": 0, "optional": true, "type": "number", "description": "Generate CSR file", "format": "flag"}
:param remote_file: {"optional": true, "type": "string", "description": "profile name for remote url", "format": "url"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param period: {"description": "Specify the period in second", "format": "number", "type": "number", "maximum": 31536000, "minimum": 60, "optional": true}
:param certificate_type: {"optional": true, "enum": ["pem", "der", "pfx", "p7b"], "type": "string", "description": "'pem': pem; 'der': der; 'pfx': pfx; 'p7b': p7b; ", "format": "enum"}
:param ssl_cert: {"description": "SSL Cert File(enter bulk when import an archive file)", "format": "string", "minLength": 1, "optional": false, "maxLength": 255, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/import-periodic/ssl-cert/{ssl_cert}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "ssl_cert"]
self.b_key = "ssl-cert"
self.a10_url="/axapi/v3/import-periodic/ssl-cert/{ssl_cert}"
self.DeviceProxy = ""
self.pfx_password = ""
self.csr_generate = ""
self.remote_file = ""
self.use_mgmt_port = ""
self.period = ""
self.certificate_type = ""
self.ssl_cert = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
py | 1a4f81c22d00c17c0b76b75b032d3664b456ca80 | # -*- coding: utf-8 -*-
__title__ = 'stimson-web-scraper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
__maintainer__ = "The Stimson Center"
__maintainer_email = "[email protected]"
VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']
VIDEO_PROVIDERS = ['youtube', 'youtu.be', 'twitch', 'vimeo', 'dailymotion', 'kewego']
class Video(object):
"""Video object
"""
def __init__(self):
# type of embed
# embed, object, iframe
self.embed_type = None
# video provider name
self.provider = None
# width
self.width = None
# height
self.height = None
# embed code
self.embed_code = None
# src
self.src = None
class VideoExtractor(object):
"""Extracts a list of video from Article top node
"""
def __init__(self, config, top_node):
self.config = config
self.parser = self.config.get_parser()
self.top_node = top_node
self.candidates = []
self.movies = []
def get_embed_code(self, node):
return "".join([
line.strip()
for line in self.parser.node_to_string(node).splitlines()])
def get_embed_type(self, node):
return self.parser.get_tag(node)
def get_width(self, node):
return self.parser.get_attribute(node, 'width')
def get_height(self, node):
return self.parser.get_attribute(node, 'height')
def get_src(self, node):
return self.parser.get_attribute(node, 'src')
# noinspection PyMethodMayBeStatic
def get_provider(self, src):
if src:
for provider in VIDEO_PROVIDERS:
if provider in src:
return provider
return None
def get_video(self, node):
"""Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video
def get_iframe_tag(self, node):
return self.get_video(node)
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def get_video_tag(self, node):
"""Extract html video tags
"""
return Video()
def get_embed_tag(self, node):
# embed node may have an object node as parent
# in this case we want to retrieve the object node
# instead of the embed
parent = self.parser.get_parent(node)
if parent is not None:
parent_tag = self.parser.get_tag(parent)
if parent_tag == 'object':
return self.get_object_tag(node)
return self.get_video(node)
def get_object_tag(self, node):
# test if object tag has en embed child
# in this case we want to remove the embed from
# the candidate list to avoid parsing it twice
child_embed_tag = self.parser.get_elements_by_tag(node, 'embed')
if child_embed_tag and child_embed_tag[0] in self.candidates:
self.candidates.remove(child_embed_tag[0])
# get the object source
# if we don't have a src node don't coninue
src_node = self.parser.get_elements_by_tag(
node, tag="param", attr="name", value="movie")
if not src_node:
return None
src = self.parser.get_attribute(src_node[0], "value")
# check provider
provider = self.get_provider(src)
if not provider:
return None
video = self.get_video(node)
video.provider = provider
video.src = src
return video
def get_videos(self):
self.candidates = self.parser.get_elements_by_tags(
self.top_node, VIDEOS_TAGS)
# loop all candidates
# and check if src attribute belongs to a video provider
for candidate in self.candidates:
tag = self.parser.get_tag(candidate)
attr = "get_%s_tag" % tag
if hasattr(self, attr):
movie = getattr(self, attr)(candidate)
if movie is not None and movie.provider is not None:
self.movies.append(movie)
return list(self.movies)
# append movies list to article
# self.article.movies = list(self.movies)
|
py | 1a4f821ffac9d93ea79dd165c9f8942ae2c1f5f2 | from setuptools import setup, find_packages
from os import path
__version__ = "0.3.15"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
dependencies = [line for line in f if line]
setup(
name="office365",
version=__version__,
description="A wrapper around O365 offering subclasses with additional utility methods.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/matthewgdv/office",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["tests*"]),
install_requires=dependencies,
setup_requires=['setuptools_scm'],
include_package_data=True,
author="Matt GdV",
author_email="[email protected]"
)
|
py | 1a4f837768d2ed1be06d056097165375f11dcd1f | # File: S (Python 2.4)
from SCColorScheme import SCColorScheme
from otp.otpbase import OTPLocalizer
class SCSettings:
def __init__(self, eventPrefix, whisperMode = 0, colorScheme = None, submenuOverlap = OTPLocalizer.SCOsubmenuOverlap, topLevelOverlap = None):
self.eventPrefix = eventPrefix
self.whisperMode = whisperMode
if colorScheme is None:
colorScheme = SCColorScheme()
self.colorScheme = colorScheme
self.submenuOverlap = submenuOverlap
self.topLevelOverlap = topLevelOverlap
|
py | 1a4f8395f38c30bdb68791a6213e01433e530705 | from typing import Optional
from src.models.channel import Channel
from .exceptions import ChannelExistedException, ChannelNotExistException
class ChannelController:
@staticmethod
def get_all_channels():
return Channel.get_all_channels()
@staticmethod
def create_channel(channel: Channel):
if ChannelController.is_channel_existed(channel.id):
raise ChannelExistedException(channel.id)
channel.save()
@staticmethod
def get_channel(id: str) -> Optional[Channel]:
return Channel.get_channel_by_id(id)
@staticmethod
def delete_channel(id: str):
if not ChannelController.is_channel_existed(id):
raise ChannelNotExistException(id)
Channel.delete_by_id(id)
@staticmethod
def is_channel_existed(channel_id) -> bool:
return Channel.get_channel_by_id(channel_id) is not None
|
py | 1a4f84e2c0fd2f38b17053ff48939b68c496c2fe | import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import BlockManager, SingleBlockManager
from pandas.core.internals.blocks import Block, NonConsolidatableMixIn
class CustomBlock(NonConsolidatableMixIn, Block):
_holder = np.ndarray
def formatting_values(self):
return np.array(["Val: {}".format(i) for i in self.values])
def concat_same_type(self, to_concat, placement=None):
"""
Always concatenate disregarding self.ndim as the values are
always 1D in this custom Block
"""
values = np.concatenate([blk.values for blk in to_concat])
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1)
)
@pytest.fixture
def df():
df1 = pd.DataFrame({"a": [1, 2, 3]})
blocks = df1._data.blocks
values = np.arange(3, dtype="int64")
custom_block = CustomBlock(values, placement=slice(1, 2))
blocks = blocks + (custom_block,)
block_manager = BlockManager(blocks, [pd.Index(["a", "b"]), df1.index])
return pd.DataFrame(block_manager)
def test_custom_repr():
values = np.arange(3, dtype="int64")
# series
block = CustomBlock(values, placement=slice(0, 3))
s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3)))
assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64"
# dataframe
block = CustomBlock(values, placement=slice(0, 1))
blk_mgr = BlockManager([block], [["col"], range(3)])
df = pd.DataFrame(blk_mgr)
assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2"
def test_concat_series():
# GH17728
values = np.arange(3, dtype="int64")
block = CustomBlock(values, placement=slice(0, 3))
s = pd.Series(block, pd.RangeIndex(3), fastpath=True)
res = pd.concat([s, s])
assert isinstance(res._data.blocks[0], CustomBlock)
def test_concat_dataframe(df):
# GH17728
res = pd.concat([df, df])
assert isinstance(res._data.blocks[1], CustomBlock)
def test_concat_axis1(df):
# GH17954
df2 = pd.DataFrame({"c": [0.1, 0.2, 0.3]})
res = pd.concat([df, df2], axis=1)
assert isinstance(res._data.blocks[1], CustomBlock)
|
py | 1a4f86364b78e063b9cc072fd4d614897e9aea0e | from django.urls import path
from graphene_django.views import GraphQLView
from quiz.schema import schema
urlpatterns = [
# Only a single URL to acces GraphQL
path("quiz", GraphQLView.as_view(graphiql=True, schema=schema)),
] |
py | 1a4f866b1210384e2eaff165e5ad4581c1309e7b | # Generated by Django 2.2.6 on 2019-10-14 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.CharField(max_length=255)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parsing_date', models.DateTimeField(auto_created=True)),
('name', models.CharField(max_length=255)),
('slug', models.CharField(max_length=255)),
('description', models.TextField()),
('published', models.DateField()),
('essay_source', models.URLField(max_length=255)),
('cat', models.ManyToManyField(to='catalog.Category')),
('tag', models.ManyToManyField(to='catalog.Tag')),
],
),
]
|
py | 1a4f86b7f32e3b07a7e5b5fd0b929302db924b5c | from typing import Optional
import pytest
from odmantic.field import Field
from odmantic.model import EmbeddedModel, Model
from odmantic.reference import Reference
def test_field_defined_as_primary_key_and_custom_name():
with pytest.raises(
ValueError, match="cannot specify a primary field with a custom key_name"
):
Field(primary_field=True, key_name="not _id")
def test_field_defined_as_primary_key_default_name():
f = Field(primary_field=True)
assert f.key_name == "_id"
def test_field_define_key_as__id_without_setting_as_primary():
with pytest.raises(
ValueError,
match="cannot specify key_name='_id' without defining the field as primary",
):
Field(key_name="_id")
def test_pos_key_name():
class M(Model):
field: int = Field(key_name="alternate_name")
assert +M.field == "alternate_name"
assert ++M.field == "$alternate_name"
def test_unknown_attr_embedded_model():
class E(EmbeddedModel):
...
class M(Model):
field: E
with pytest.raises(AttributeError, match="attribute unknown_attr not found in E"):
M.field.unknown_attr # type: ignore
@pytest.mark.parametrize("operator_name", ("lt", "lte", "gt", "gte", "match"))
def test_reference_field_operator_not_allowed(operator_name: str):
class E(Model):
...
class M(Model):
field: E = Reference()
with pytest.raises(
AttributeError,
match=f"operator {operator_name} not allowed for ODMReference fields",
):
getattr(M.field, operator_name)
def test_field_required_in_doc_without_default():
class M(Model):
field: str
assert M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_with_default():
class M(Model):
field: str = Field("hi")
assert not M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_implicit_optional_default():
class M(Model):
field: Optional[str]
assert not M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_default_factory_disabled():
class M(Model):
field: str = Field(default_factory=lambda: "hi")
assert M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_default_factory_enabled():
class M(Model):
field: str = Field(default_factory=lambda: "hi")
class Config:
parse_doc_with_default_factories = True
assert not M.__odm_fields__["field"].is_required_in_doc()
|
py | 1a4f86d2281df2996b23e23c0c6f372145ab96a1 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import MonicoinTestFramework
from test_framework.util import assert_equal, str_to_b64str
import http.client
import urllib.parse
class HTTPBasicsTest (MonicoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.supports_cli = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is None #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #connection must be closed because monicoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
py | 1a4f87cd50f22cdf06f6a9c5a423e4708f123c58 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnGatewayRoutePropagation']
class VpnGatewayRoutePropagation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
route_table_id: Optional[pulumi.Input[str]] = None,
vpn_gateway_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Requests automatic route propagation between a VPN gateway and a route table.
> **Note:** This resource should not be used with a route table that has
the `propagating_vgws` argument set. If that argument is set, any route
propagation not explicitly listed in its value will be removed.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.VpnGatewayRoutePropagation("example",
vpn_gateway_id=aws_vpn_gateway["example"]["id"],
route_table_id=aws_route_table["example"]["id"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] route_table_id: The id of the `ec2.RouteTable` to propagate routes into.
:param pulumi.Input[str] vpn_gateway_id: The id of the `ec2.VpnGateway` to propagate routes from.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if route_table_id is None:
raise TypeError("Missing required property 'route_table_id'")
__props__['route_table_id'] = route_table_id
if vpn_gateway_id is None:
raise TypeError("Missing required property 'vpn_gateway_id'")
__props__['vpn_gateway_id'] = vpn_gateway_id
super(VpnGatewayRoutePropagation, __self__).__init__(
'aws:ec2/vpnGatewayRoutePropagation:VpnGatewayRoutePropagation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
route_table_id: Optional[pulumi.Input[str]] = None,
vpn_gateway_id: Optional[pulumi.Input[str]] = None) -> 'VpnGatewayRoutePropagation':
"""
Get an existing VpnGatewayRoutePropagation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] route_table_id: The id of the `ec2.RouteTable` to propagate routes into.
:param pulumi.Input[str] vpn_gateway_id: The id of the `ec2.VpnGateway` to propagate routes from.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["route_table_id"] = route_table_id
__props__["vpn_gateway_id"] = vpn_gateway_id
return VpnGatewayRoutePropagation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="routeTableId")
def route_table_id(self) -> pulumi.Output[str]:
"""
The id of the `ec2.RouteTable` to propagate routes into.
"""
return pulumi.get(self, "route_table_id")
@property
@pulumi.getter(name="vpnGatewayId")
def vpn_gateway_id(self) -> pulumi.Output[str]:
"""
The id of the `ec2.VpnGateway` to propagate routes from.
"""
return pulumi.get(self, "vpn_gateway_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a4f883ca98a7f2f1862a06d45381e22308e1614 | """Views for observations of categories."""
from django.core.exceptions import PermissionDenied
from django.views.decorators.gzip import gzip_page
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.users.models import User
from geokey.projects.models import Project
from geokey.core.exceptions import InputError
from ..renderers.geojson import GeoJsonRenderer
from ..parsers.geojson import GeoJsonParser
from .base import SingleAllContribution
from ..serializers import ContributionSerializer
class GZipView(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'META'):
setattr(self, 'META', {})
if request.META.get('HTTP_ACCEPT_ENCODING'):
self.META['HTTP_ACCEPT_ENCODING'] = request.META['HTTP_ACCEPT_ENCODING']
return super(GZipView, self).dispatch(request, *args, **kwargs)
class GeoJsonView(APIView):
renderer_classes = (GeoJsonRenderer,)
parser_classes = (GeoJsonParser,)
class ProjectObservations(GZipView, GeoJsonView):
"""
Public API endpoint to add new contributions to a project
/api/projects/:project_id/contributions
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Adds a new contribution to a project
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised contribution
"""
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
data = request.data
project = Project.objects.as_contributor(request.user, project_id)
if (not data.get('meta').get('status') == 'draft' and
project.can_moderate(user)):
data['meta']['status'] = 'active'
serializer = ContributionSerializer(
data=data, context={'user': user, 'project': project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@gzip_page
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
Handle GET request.
Return a list of all contributions of the project accessible to the
user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Respone
Contains the serialized contributions.
"""
project = Project.objects.get_single(request.user, project_id)
try:
contributions = project.get_all_contributions(
request.user,
search=request.GET.get('search'),
subset=request.GET.get('subset'),
bbox=request.GET.get('bbox')
).select_related('location', 'creator', 'updator', 'category')
except InputError as e:
return Response(e, status=status.HTTP_406_NOT_ACCEPTABLE)
serializer = ContributionSerializer(
contributions,
many=True,
context={
'user': request.user,
'project': project,
'search': request.GET.get('search'),
'bbox': request.GET.get('bbox')
}
)
return Response(serializer.data, status=status.HTTP_200_OK)
# ############################################################################
#
# SINGLE CONTRIBUTION
#
# ############################################################################
class SingleContributionAPIView(GeoJsonView):
"""
Abstract APIView for handling requests to single observations
"""
def get_and_respond(self, request, observation):
"""
Returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
serializer = ContributionSerializer(
observation,
context={'user': request.user, 'project': observation.project}
)
return Response(serializer.data, status=status.HTTP_200_OK)
def update_and_respond(self, request, observation):
"""
Updates and returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
data = request.data
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
new_status = None
if data.get('meta') is not None:
new_status = data.get('meta').get('status')
user_can_moderate = observation.project.can_moderate(user)
user_is_owner = (observation.creator == user)
under_review = observation.comments.filter(
review_status='open').exists()
if (new_status is not None and new_status != observation.status):
if not (
(new_status == 'pending' and
(user_is_owner or user_can_moderate)) or
(new_status == 'active' and
observation.status == 'draft' and user_is_owner) or
(new_status == 'active' and
observation.status == 'pending' and user_can_moderate)):
raise PermissionDenied('You are not allowed to update the '
'status of the contribution from "%s" '
'to "%s"' % (
observation.status,
new_status
))
elif not (user_is_owner or user_can_moderate):
raise PermissionDenied('You are not allowed to update the'
'contribution')
if new_status == 'active' and under_review:
data['meta']['status'] = 'review'
if ((new_status == 'active' and observation.status == 'draft') and
not user_can_moderate):
default_status = observation.category.default_status
data['meta']['status'] = default_status
serializer = ContributionSerializer(
observation,
data=data,
context={'user': user, 'project': observation.project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def delete_and_respond(self, request, observation):
"""
Deletes a single observation
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be deleted
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
if (observation.creator == request.user or
observation.project.can_moderate(request.user)):
observation.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
raise PermissionDenied('You are not allowed to delete this'
'contribution')
class SingleAllContributionAPIView(
SingleAllContribution, SingleContributionAPIView):
"""
Public API endpoint for updating a single observation in a project
/api/projects/:project_id/observations/:observation_id
"""
@handle_exceptions_for_ajax
def get(self, request, project_id, observation_id):
"""
Returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.get_and_respond(request, contribution)
@handle_exceptions_for_ajax
def patch(self, request, project_id, observation_id):
"""
Updates and returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.update_and_respond(request, contribution)
@handle_exceptions_for_ajax
def delete(self, request, project_id, observation_id):
"""
Deletes a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.delete_and_respond(request, contribution)
|
py | 1a4f887023fad437132a74dbbf9ffcd935622b83 | #!/usr/local/bin/python
from os import system
from sys import argv
cl = argv[1]
liste = open('/usr/local/share/operator/editor').read()
if "sudo" in argv[1:]:
print("Can't use sudo with operator")
elif ">" in argv[1:]:
print("Can't use > with operator")
elif cl in liste:
print(("Can't use %s with operator!" % argv[1]))
else:
cmd = ''
for line in argv[1:]:
cmd += line + " "
system(cmd) |
py | 1a4f88c2822747ab931b60f2082d193506dd24ad | class TypeLibFuncFlags(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the original settings of the FUNCFLAGS in the COM type library from where this method was imported.
enum (flags) TypeLibFuncFlags,values: FBindable (4),FDefaultBind (32),FDefaultCollelem (256),FDisplayBind (16),FHidden (64),FImmediateBind (4096),FNonBrowsable (1024),FReplaceable (2048),FRequestEdit (8),FRestricted (1),FSource (2),FUiDefault (512),FUsesGetLastError (128)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
FBindable = None
FDefaultBind = None
FDefaultCollelem = None
FDisplayBind = None
FHidden = None
FImmediateBind = None
FNonBrowsable = None
FReplaceable = None
FRequestEdit = None
FRestricted = None
FSource = None
FUiDefault = None
FUsesGetLastError = None
value__ = None
|
py | 1a4f89c511018443bfd366ba7278f3b194664184 | import unittest
import unittest.mock
import re
from g1.asyncs import agents
from g1.asyncs import kernels
from g1.asyncs.bases import locks
from g1.asyncs.bases import queues
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
class SuperviseAgentsTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.main_task = None
self.agent_queue = tasks.CompletionQueue()
self.graceful_exit = locks.Event()
self.signal_queue = queues.Queue()
mock = unittest.mock.patch(agents.__name__ + '.signals').start()
mock.SignalSource().__enter__().get = self.signal_queue.get
self._assert_logs = self.assertLogs(agents.__name__, level='DEBUG')
self.cm = self._assert_logs.__enter__()
def tearDown(self):
unittest.mock.patch.stopall()
self._assert_logs.__exit__(None, None, None)
super().tearDown()
def assert_state(self, closed, queue_size, graceful_exit, log_patterns):
self.assertEqual(self.agent_queue.is_closed(), closed)
self.assertEqual(len(self.agent_queue), queue_size)
self.assertEqual(self.graceful_exit.is_set(), graceful_exit)
message = 'expect patterns %r in %r' % (log_patterns, self.cm.output)
if len(self.cm.output) != len(log_patterns):
self.fail(message)
for log_line, log_pattern in zip(self.cm.output, log_patterns):
if not re.search(log_pattern, log_line):
self.fail(message)
def run_supervisor(self):
self.main_task = tasks.spawn(
agents.supervise_agents(self.agent_queue, self.graceful_exit, 5)
)
kernels.run(timeout=0.01)
@kernels.with_kernel
def test_graceful_exit_by_user(self):
self.graceful_exit.set()
self.run_supervisor()
self.assert_state(True, 0, True, [r'graceful exit: requested by user'])
self.assertIsNone(self.main_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_signal(self):
self.signal_queue.put_nonblocking(1)
self.run_supervisor()
self.assert_state(True, 0, True, [r'graceful exit: receive signal: 1'])
self.assertIsNone(self.main_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_repeated_signals(self):
sleep_task = self.agent_queue.spawn(timers.sleep(99))
self.assert_state(False, 1, False, [])
self.signal_queue.put_nonblocking(1)
with self.assertRaises(kernels.KernelTimeout):
self.run_supervisor()
self.assert_state(True, 1, True, [r'graceful exit: receive signal: 1'])
self.signal_queue.put_nonblocking(2)
kernels.run(timeout=1)
self.assert_state(True, 0, True, [r'graceful exit: receive signal: 1'])
with self.assertRaisesRegex(
agents.SupervisorError,
r'receive signal during graceful exit: 2',
):
self.main_task.get_result_nonblocking()
with self.assertRaises(tasks.Cancelled):
sleep_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_agent_exit(self):
noop_task = self.agent_queue.spawn(noop)
self.assert_state(False, 1, False, [])
self.run_supervisor()
self.assert_state(
True, 0, True, [r'no op', r'graceful exit: agent exit: ']
)
self.assertIsNone(noop_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_agent_error(self):
raises_task = self.agent_queue.spawn(raises(ValueError('some error')))
self.assert_state(False, 1, False, [])
self.run_supervisor()
self.assert_state(True, 0, False, [])
with self.assertRaisesRegex(
agents.SupervisorError,
r'agent err out: ',
):
self.main_task.get_result_nonblocking()
with self.assertRaisesRegex(ValueError, r'some error'):
raises_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
# Make self._assert_logs.__exit__ happy.
agents.LOG.debug('dummy')
@kernels.with_kernel
def test_grace_period_exceeded(self):
self.graceful_exit.set()
sleep_task = self.agent_queue.spawn(timers.sleep(99))
self.assert_state(False, 1, True, [])
self.main_task = tasks.spawn(
agents.supervise_agents(self.agent_queue, self.graceful_exit, 0)
)
kernels.run(timeout=0.01)
self.assert_state(True, 0, True, [r'graceful exit: requested by user'])
with self.assertRaisesRegex(
agents.SupervisorError,
r'grace period exceeded',
):
self.main_task.get_result_nonblocking()
with self.assertRaises(tasks.Cancelled):
sleep_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
async def noop():
agents.LOG.debug('no op')
async def raises(exc):
raise exc
if __name__ == '__main__':
unittest.main()
|
py | 1a4f8a8be83188a11a84cf2c602108cfd1b934f5 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "prefect_email/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s%s" % (tag_prefix, TAG_PREFIX_REGEX),
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
py | 1a4f8af6d1907eead43ec30e445e923e6e4a3fd7 | #coding=utf-8
from facebook.modules.profile.user.models import TestUser
from facebook.graph import GraphAPIError
from django.utils import simplejson
class TestUsers(object):
def __init__(self, graph):
self.graph = graph
# Friend requests need user access token
def update_access_token(self, access_token):
self.graph.access_token = access_token
def generate_new_test_user(self, installed=True, permissions=[]):
response = self.graph.request('%s/accounts/test-users' % self.graph.app_id, None,
{'installed': installed, 'permissions': ', '.join(permissions) })
user = TestUser()
user.save_from_facebook(response, app_id=self.graph.app_id)
return user
def get_test_users(self, login_url_required=False):
""" users is a dict array with the fields access_token, login_url and id. """
response = self.graph.request('%s/accounts/test-users' % self.graph.app_id,
{'access_token': self.graph.access_token })['data']
users=[]
for item in response:
# Facebook sometimes does not deliver a login-url. Ignore those users.
try:
testuser, created = TestUser.objects.get_or_create(id=item['id'],
defaults={'id': item['id'], 'login_url': item['login_url'],
'belongs_to': self.graph.app_id,
'_graph': simplejson.dumps(item) })
if created:
testuser.save_from_facebook(item, app_id=self.graph.app_id)
else:
testuser.login_url = item['login_url']
testuser._graph = simplejson.dumps(item)
testuser.save()
users.append(testuser)
except KeyError:
pass
# cleanup db
users_ids=[int(i['id']) for i in response]
testusers = TestUser.objects.select_related(depth=1).filter(belongs_to=self.graph.app_id)
for user in testusers:
if user.id not in users_ids:
user.delete()
elif not user._name and user.access_token:
self.graph.access_token = user.access_token
response = user.get_from_facebook(graph=self.graph, save=True)
return testusers
def friend_request(self, user1, user2):
graph = self.graph
graph.access_token = user1.access_token
return graph.request('%s/friends/%s' % (user1.id, user2.id), None, {})
def make_friends_with(self, user1, user2):
response = []
self.update_access_token(user1.access_token)
try:
response.append(self.friend_request(user1, user2))
except GraphAPIError as error: #No access token if the user is not authorized.
response.append(error)
self.update_access_token(user2.access_token)
try:
response.append(self.friend_request(user2, user1))
except GraphAPIError as error:
response.append(error)
return response
def unfriend(self, user1, user2):
pass
|
py | 1a4f8b521328036322ea78c88c72b5d686a708f6 | '''
Created by auto_sdk on 2020.01.14
'''
from dingtalk.api.base import RestApi
class OapiIndustryPackGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.industry.pack.get'
|
py | 1a4f8bef75f2d07dc707f7d846a6333472fb9090 | import unittest
from intcode import Intcode
class Hull:
def __init__(self, intc, initial = 0):
self.computer = Intcode(intc, initial)
self.computer.max_outputs = 2
self.computer.robot = self
self.x = 0
self.y = 0
self.dir = 'up' # down right left
self.map = {} # dvojice (x, y): barva pocatek je v 0, 0
self.computer.process()
def interpret(self, outputs):
color, direction = outputs
self.paint(color)
self.turn_and_move(direction)
return self.getcolor()
def paint(self, color):
self.map[(self.x, self.y)] = color
def turn_and_move(self, direction):
'''
direction the robot should turn: 0 means it should turn left 90 degrees, and 1 means it should turn right 90 degrees.
'''
if self.dir == 'up':
self.dir = 'left' if direction == 0 else 'right'
elif self.dir == 'down':
self.dir = 'right' if direction == 0 else 'left'
elif self.dir == 'left':
self.dir = 'down' if direction == 0 else 'up'
elif self.dir == 'right':
self.dir = 'up' if direction == 0 else 'down'
if self.dir == 'up':
self.y -=1
elif self.dir == 'down':
self.y +=1
elif self.dir == 'right':
self.x +=1
elif self.dir == 'left':
self.x -=1
def getcolor(self):
try:
return self.map[(self.x, self.y)]
except KeyError:
return 0
def showmap(self):
l = [str() for _ in range(6)]
for k, v in sorted(self.map.items()):
x, y = k
l[y] += '.' if v == 0 else '#'
return '\n'.join(l)
if __name__ == '__main__':
input_data = [3,8,1005,8,318,1106,0,11,0,0,0,104,1,104,0,3,8,102,-1,8,10,1001,10,1,10,4,10,1008,8,1,10,4,10,101,0,8,29,1,107,12,10,2,1003,8,10,3,8,102,-1,8,10,1001,10,1,10,4,10,1008,8,0,10,4,10,1002,8,1,59,1,108,18,10,2,6,7,10,2,1006,3,10,3,8,1002,8,-1,10,1001,10,1,10,4,10,1008,8,1,10,4,10,1002,8,1,93,1,1102,11,10,3,8,102,-1,8,10,1001,10,1,10,4,10,108,1,8,10,4,10,101,0,8,118,2,1102,10,10,3,8,102,-1,8,10,101,1,10,10,4,10,1008,8,0,10,4,10,101,0,8,145,1006,0,17,1006,0,67,3,8,1002,8,-1,10,101,1,10,10,4,10,1008,8,0,10,4,10,101,0,8,173,2,1109,4,10,1006,0,20,3,8,102,-1,8,10,1001,10,1,10,4,10,108,0,8,10,4,10,102,1,8,201,3,8,1002,8,-1,10,1001,10,1,10,4,10,1008,8,0,10,4,10,1002,8,1,224,1006,0,6,1,1008,17,10,2,101,5,10,3,8,1002,8,-1,10,1001,10,1,10,4,10,108,1,8,10,4,10,1001,8,0,256,2,1107,7,10,1,2,4,10,2,2,12,10,1006,0,82,3,8,1002,8,-1,10,1001,10,1,10,4,10,1008,8,1,10,4,10,1002,8,1,294,2,1107,2,10,101,1,9,9,1007,9,988,10,1005,10,15,99,109,640,104,0,104,1,21102,1,837548352256,1,21102,335,1,0,1105,1,439,21102,1,47677543180,1,21102,346,1,0,1106,0,439,3,10,104,0,104,1,3,10,104,0,104,0,3,10,104,0,104,1,3,10,104,0,104,1,3,10,104,0,104,0,3,10,104,0,104,1,21102,1,235190374592,1,21101,393,0,0,1105,1,439,21102,3451060455,1,1,21102,404,1,0,1105,1,439,3,10,104,0,104,0,3,10,104,0,104,0,21102,837896909668,1,1,21102,1,427,0,1105,1,439,21102,1,709580555020,1,21102,438,1,0,1105,1,439,99,109,2,21201,-1,0,1,21102,1,40,2,21102,1,470,3,21102,460,1,0,1106,0,503,109,-2,2105,1,0,0,1,0,0,1,109,2,3,10,204,-1,1001,465,466,481,4,0,1001,465,1,465,108,4,465,10,1006,10,497,1101,0,0,465,109,-2,2105,1,0,0,109,4,1201,-1,0,502,1207,-3,0,10,1006,10,520,21101,0,0,-3,21202,-3,1,1,22101,0,-2,2,21101,1,0,3,21101,0,539,0,1106,0,544,109,-4,2105,1,0,109,5,1207,-3,1,10,1006,10,567,2207,-4,-2,10,1006,10,567,21202,-4,1,-4,1105,1,635,22101,0,-4,1,21201,-3,-1,2,21202,-2,2,3,21101,0,586,0,1105,1,544,22102,1,1,-4,21102,1,1,-1,2207,-4,-2,10,1006,10,605,21102,1,0,-1,22202,-2,-1,-2,2107,0,-3,10,1006,10,627,21202,-1,1,1,21101,627,0,0,105,1,502,21202,-2,-1,-2,22201,-4,-2,-4,109,-5,2105,1,0]
robot = Hull(input_data)
#print(robot.computer.count_inputs)
#print(robot.computer.outputlist)
#print(robot.map)
print(len(robot.map.keys()))
robot = Hull(input_data, initial = 1)
print(robot.showmap())
|
py | 1a4f8bfd09d033ec64bc5dbc7c59391aa2346961 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import time
import weakref
from typing import Dict, List, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
from detectron2.utils.logger import _log_api_usage
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
"""
trainer: "TrainerBase" = None
"""
A weak reference to the trainer object. Set by the trainer when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
def state_dict(self):
"""
Hooks are stateless by default, but can be made checkpointable by
implementing `state_dict` and `load_state_dict`.
"""
return {}
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int = 0
self.start_iter: int = 0
self.max_iter: int
self.storage: EventStorage
_log_api_usage("trainer." + self.__class__.__name__)
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
def state_dict(self):
ret = {"iteration": self.iter}
hooks_state = {}
for h in self._hooks:
sd = h.state_dict()
if sd:
name = type(h).__qualname__
if name in hooks_state:
# TODO handle repetitive stateful hooks
continue
hooks_state[name] = sd
if hooks_state:
ret["hooks"] = hooks_state
return ret
def load_state_dict(self, state_dict):
logger = logging.getLogger(__name__)
self.iter = state_dict["iteration"]
for key, value in state_dict.get("hooks", {}).items():
for h in self._hooks:
try:
name = type(h).__qualname__
except AttributeError:
continue
if name == key:
h.load_state_dict(value)
break
else:
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(metrics_dict.values())
if not np.isfinite(total_losses_reduced):
raise FloatingPointError(
f"Loss became infinite or NaN at iteration={self.iter}!\n"
f"loss_dict = {metrics_dict}"
)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
def state_dict(self):
ret = super().state_dict()
ret["optimizer"] = self.optimizer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.optimizer.load_state_dict(state_dict["optimizer"])
class AMPTrainer(SimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
|
py | 1a4f8c7cc3e46821d4851ac8ea8b89921eaf6357 | #!/usr/bin/env python
# Valid arguments
setValueArgs = {"-ld" : "LD",
"-ldflags" : "LDFLAGS",
"-optldflags" : "OPTLDFLAGS",
"-impfilename" : "IMPFILENAME",
"-expflag" : "EXPFLAG",
"-maxlinelength" : "maxlinelength",
"-verbose" : "chatty",
"-directory" : "CHDIR" }
validArgs = setValueArgs.keys()
# Little function to generate a usage statement.
def printUsage():
print "Usage: generateImportFile <target lib base name>"
# Helper to execute the given command string in the host OS.
def execCommand(command, chatty):
if chatty:
print command
import os
os.system(command)
# Default values for the important vars
LD = "xlC_r"
LDFLAGS = "-G -qmkshrobj"
OPTLDFLAGS = ""
EXPFLAG = "-qexpfile"
maxlinelength = 4095
OBJS = ""
chatty = None
import sys
nargs = len(sys.argv)
if nargs < 2:
printUsage()
exit(1)
# Get the name of the lib target.
LIBTARGET = sys.argv[1]
if LIBTARGET[-3:] != ".so":
LIBTARGET += ".so"
# The default name for the export and import files we'll be generating.
EXPFILENAME = LIBTARGET[:-3] + ".exp"
IMPFILENAME = LIBTARGET[:-3] + ".imp"
CHDIR = "."
# Parse optional arguments
for iarg in xrange(2, nargs):
arg = sys.argv[iarg]
# Is this a -thingy=thingyValue argument?
if arg[0] == "-":
assert arg.count("=") == 1
argKey, argValue = arg.split("=")
if argKey not in validArgs:
printUsage()
exit(1)
exec("%s = '%s'" % (setValueArgs[argKey], argValue))
print "Setting " + setValueArgs[argKey] + " to " + argValue
# Is this an object file/lib that needs to be linked in?
elif arg[-2:] == ".o" or arg[-2:] == ".a" or arg[-3:] == ".so":
OBJS += arg + " "
else:
printUsage()
exit()
import os
os.chdir(CHDIR)
# Generate the export file by creating the shared obj library, which we then
# promptly delete.
command = LD + " " + LDFLAGS + " " + OPTLDFLAGS + " " + \
EXPFLAG + "=" + EXPFILENAME + " " + \
OBJS + " -o " + LIBTARGET
execCommand(command, chatty)
execCommand("rm -f " + LIBTARGET, chatty)
# Create the import file, and put the required tag as the first line indicating
# the name of the archive which is exporting these symbols.
impfile = open(IMPFILENAME, 'w')
impfile.write("#!" + LIBTARGET + "\n")
# Attach the list of symbols being exported to the import file.
expfile = open(EXPFILENAME, 'r')
symbol = expfile.readline()
while symbol:
if len(symbol) <= maxlinelength:
if symbol[-1] != '\n':
symbol += '\n'
impfile.write(symbol)
else:
print 'skipping', str(len(symbol)) + '-character symbol:',str(symbol)
symbol = expfile.readline()
impfile.close()
expfile.close()
#command = "cat " + EXPFILENAME + " >> " + IMPFILENAME
#execCommand(command, chatty)
# Remove the export file.
execCommand("rm -f " + EXPFILENAME, chatty)
|
py | 1a4f8ca35666697928a6be0b3ddb291125a8a512 | import asyncio
from ...exceptions import NodeJSNotRunning
from ...exceptions import NoMtProtoClientSet
from ...exceptions import NotInGroupCallError
from ...scaffold import Scaffold
from ...types import NotInGroupCall
from ...types.session import Session
class ResumeStream(Scaffold):
async def resume_stream(
self,
chat_id: int,
):
"""Resume the paused stream
This method allow to resume the paused streaming file
Parameters:
chat_id (``int``):
Unique identifier (int) of the target chat.
Raises:
NoMtProtoClientSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~pytgcalls.PyTgCalls.start` before
NotInGroupCallError: In case you try
to leave a non-joined group call
Returns:
``bool``:
On success, true is returned if was resumed
Example:
.. code-block:: python
:emphasize-lines: 10-12
from pytgcalls import Client
from pytgcalls import idle
...
app = PyTgCalls(client)
app.start()
... # Call API methods
app.resume_stream(
-1001185324811,
)
idle()
"""
if self._app is not None:
if self._wait_until_run is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
await self._binding.send({
'action': 'resume',
'chat_id': chat_id,
'solver_id': solver_id,
})
active_call = self._call_holder.get_active_call(chat_id)
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
return active_call.status == 'paused'
else:
raise NodeJSNotRunning()
else:
raise NoMtProtoClientSet()
|
py | 1a4f8d423f3a743758cc9bfadd4d2cfcf32b546d | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
print("Current file:",current_file, __file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201910244)
_num_samples = 128
_sample_size = 64
_sample_dims = (2,2,8)
_sample_size = functools.reduce(operator.mul, _sample_dims)
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=tools.str_list(_sample_dims)),
lbann.WeightsLayer(weights=x_weights,
dims=tools.str_list(_sample_dims)))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Identity(x, data_layout='data_parallel')
slice_points = (0, 4, 8)
x_slice = lbann.Slice(x, axis=2, slice_points=tools.str_list(slice_points),parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})
branch1 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':1,'enable_subgraph':True})
branch2 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':2,'enable_subgraph':True})
branch1 = lbann.L2Norm2(branch1)
branch2 = lbann.L2Norm2(branch2)
sum_branch = lbann.Sum([branch1,branch2],parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})
z = lbann.Identity(sum_branch)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).reshape(_sample_dims).astype(np.float64)
y = []
cross_sum = 0
for j in range(len(slice_points)-1):
x_slice = x[:,:,slice_points[j]:slice_points[j+1]]
x_l2 = tools.numpy_l2norm2(x_slice)
if(j==0):
cross_sum = x_l2
else:
cross_sum += x_l2
z = cross_sum
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,subgraph_communication=lbann.SubgraphCommunication.COLL_OPT,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
|
py | 1a4f8e0bfa9c25bd65e196679007efd1b52ad005 | # 开发阶段环境配置文件
"""
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os, sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps')) # 追加apps也为项目导包路径
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#lx2s*0y_37hs9zlz!7ji)a9b@o$j8gcbi%jcts_3nny*a%gu7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
# 注册应用
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig', # 用户模块应用
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
# 配置模板项
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2', # jinja2模板引擎
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 加载模板路径
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 补充Jinja2模板引擎环境
'environment': 'meiduo_mall.utils.jinja2_env.jinja2_environment',
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# 配置mysql数据库项
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # 数据库引擎
'HOST': '127.0.0.1', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'yy', # 数据库用户名
'PASSWORD': '123456', # 数据库用户密码
'NAME': 'meiduo_mall' # 数据库名字
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# 静态文件访问的路由前缀
STATIC_URL = '/static/'
# 配置静态文件加载路径
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# redis数据库配置
CACHES = {
"default": { # 默认
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"verify_code": { # 图形验证码
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# session配置
SESSION_ENGINE = "django.contrib.sessions.backends.cache" # 缓存
SESSION_CACHE_ALIAS = "session" # 缓存到redis的一号数据库
# 配置日志输出器
LOGGING = {
'version': 1, # 版本
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.dirname(BASE_DIR), 'logs/meiduo.log'), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
# 修改Django认证系统中的用户模型
AUTH_USER_MODEL = 'users.User'
|
py | 1a4f8f5ceeb40ea924aafa0e58ee9cabb6416e47 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from builtins import object
import ngraph as ng
class Rectlin(object):
"""
Rectified Linear Unit (ReLu) activation function, :math:`f(x) = \max(x, 0)`.
Can optionally set a slope which will make this a Leaky ReLu.
"""
def __init__(self, slope=0, **kwargs):
"""
Class constructor.
Arguments:
slope (float, optional): Slope for negative domain. Defaults to 0.
"""
self.slope = slope
def __call__(self, x):
"""
Returns the Rectified Linear activation
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: output activation
"""
return ng.maximum(x, 0) + self.slope * ng.minimum(0, x)
class Rectlinclip(object):
"""
Clipped ReLu activation function
Computes the function :math:`f(x) = min(max(0, x),cutoff)`
"""
def __init__(self, slope=0, cutoff=20.0, name=None):
self.cutoff = cutoff
self.slope = slope
def __call__(self, x):
"""
Returns the Clipped Rectified Linear activation
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: output activation
"""
return ng.minimum(ng.maximum(x, 0) + self.slope * ng.minimum(x, 0),
self.cutoff)
class Identity(object):
"""Identity activation function, :math:`f(x) = x`"""
def __call__(self, x):
"""
Returns the input as output.
Arguments:
x (Tensor or optree): input value
Returns:
Tensor or optree: identical to input
"""
return x
class Explin(object):
"""
Exponential Linear activation function, :math:`f(x) = \max(x, 0) + \\alpha (e^{\min(x, 0)}-1)`
From: Clevert, Unterthiner and Hochreiter, ICLR 2016.
"""
def __init__(self, alpha=1.0):
"""
Class constructor.
Arguments:
alpha (float): weight of exponential factor for negative values (default: 1.0).
name (string, optional): Name (default: None)
"""
self.alpha = alpha
def __call__(self, x):
"""
Returns the Exponential Linear activation
Arguments:
x (Tensor or optree): input value
Returns:
Tensor or optree: output activation
"""
return ng.maximum(x, 0) + self.alpha * (ng.exp(ng.minimum(x, 0)) - 1)
class Normalizer(object):
"""Normalize inputs by a fixed divisor."""
def __init__(self, divisor=128.):
"""
Class constructor.
Arguments:
divisor (float, optional): Normalization factor (default: 128)
name (string, optional): Name (default: None)
"""
self.divisor = divisor
def __call__(self, x):
"""
Returns the normalized value.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output :math:`x / N`
"""
return x / self.divisor
class Softmax(object):
"""SoftMax activation function. Ensures that the activation output sums to 1."""
def __call__(self, x):
"""
Returns the Softmax value.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return ng.softmax(x)
class Tanh(object):
"""Hyperbolic tangent activation function, :math:`f(x) = \\tanh(x)`."""
def __call__(self, x):
"""
Returns the hyperbolic tangent.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return ng.tanh(x)
class Logistic(object):
"""
Logistic sigmoid activation function, :math:`f(x) = 1 / (1 + \exp(-x))`
Squashes the input from range :math:`[-\infty,+\infty]` to :math:`[0, 1]`
"""
def __call__(self, x):
"""
Returns the sigmoidal activation.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return ng.sigmoid(x)
|
py | 1a4f8f9eb2c6c0d214e04412d4777cbf2ef03c94 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:kerasexa
Description: Keras 案例
https://keras.io/getting-started/sequential-model-guide/
Email : [email protected]
Date:2018/1/1
"""
import keras
import numpy as np
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import SGD
# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(x_train, y_train, epochs=200, batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
print(score)
|
py | 1a4f90304d2d0481ad79a455e4a29ea99c786de9 | #
# Documentation Django sur les formulaires :
# https://docs.djangoproject.com/fr/2.0/topics/db/models/
#
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from core.models import Comment, ContactMessage
class ContactForm(forms.ModelForm):
"""
Formulaire pour la page de contact
"""
class Meta:
model = ContactMessage
fields = ["first_name", "last_name", "email", "subject", "message"]
widgets = {
"first_name": forms.TextInput(attrs={"class": "form-control", "placeholder": "Prénom"}),
"last_name": forms.TextInput(attrs={"class": "form-control", "placeholder": "Nom"}),
"email": forms.EmailInput(attrs={"class": "form-control", "placeholder": "[email protected]", "value": ""}),
"subject": forms.TextInput(attrs={"class": "form-control", "placeholder": "Sujet"}),
"message": forms.Textarea(
attrs={"class": "form-control", "placeholder": "Entrez votre commentaire ici...", "rows": 3})
}
class LoginForm(AuthenticationForm):
"""
Formulaire pour la page de connexion
"""
username = forms.CharField(
widget=forms.TextInput(attrs={"class": "form-control", "placeholder": "Nom d'utilisateur"}), max_length=30,
label="Nom d'utilisateur")
password = forms.CharField(
widget=forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"}),
label="Mot de passe")
remember_me = forms.BooleanField(widget=forms.CheckboxInput(attrs={"class": "custom-control-input"}),
label="Se souvenir de moi", required=False)
class RegisterForm(forms.ModelForm):
"""
Formulaire pour la page d'inscription
"""
class Meta:
model = User
fields = ["username", "email", "password"]
widgets = {
"username": forms.TextInput(attrs={"class": "form-control", "placeholder": "Nom d'utilisateur"}),
"email": forms.EmailInput(attrs={"class": "form-control", "placeholder": "[email protected]"}),
"password": forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"})
}
email_confirm = forms.EmailField(
widget=forms.EmailInput(attrs={"class": "form-control", "placeholder": "[email protected]"}), max_length=100,
label="Confirmer l'adresse e-mail")
password_confirm = forms.CharField(
widget=forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"}),
label="Confirmer le mot de passe")
terms = forms.BooleanField(widget=forms.CheckboxInput(attrs={"class": "custom-control-input"}))
def clean_email_confirm(self):
"""
Fonction pour vérifier que le contenu du champ de l'adresse email et du champ de confirmation sont valides et
cohérents
Retourne une erreur si le contenu du champ "email" et du champ "email_confirm" n'ont pas la même valeur
"""
email = self.cleaned_data.get("email")
email_confirm = self.cleaned_data.get("email_confirm")
if email and email_confirm and email_confirm != email:
self.add_error(
"email_confirm",
"L'email ne correspond pas"
)
return email_confirm
def clean_password_confirm(self):
"""
Fonction pour vérifier que le contenu du champ du mot de passe et du champ de confirmation sont valides et
cohérents
Retourne une erreur si le contenu du champ "password" et du champ "password_confirm" n'ont pas la même valeur
"""
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password and password_confirm and password_confirm != password:
self.add_error(
"password_confirm",
"Le mot de passe ne correspond pas"
)
return password_confirm
def save(self, commit=True):
"""
Surcharge de la méthode "save"
Récupère l'objet User à l'aide de la méthode "save" sans envoyer les données, défini le mot de passe de
l'utilisateur, sauvegarde les données et retourne l'objet User
L'utilisation de la méthode "set_password" pour définir le mot de passe d'un objet User est obligatoire : en
effet, le mot de passe est hashé pour ne pas qu'il ai une valeur visible dans la base de données, voir
documentation sur le hashage des mots de passes : https://docs.djangoproject.com/fr/2.0/topics/auth/passwords/
Documentation Django sur la méthode "save" :
https://docs.djangoproject.com/fr/2.0/topics/forms/modelforms/#the-save-method
"""
user = super(RegisterForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class CommentForm(forms.ModelForm):
"""
Formulaire pour l'entrée de commentaires
"""
class Meta:
model = Comment
fields = ["content", ]
widgets = {
"content": forms.Textarea(
attrs={"class": "form-control", "placeholder": "Entrez votre commentaire ici...", "rows": 3})
}
class SettingsForm(forms.ModelForm):
"""
Formulaire pour la page des paramètres utilisateurs
"""
class Meta:
model = User
fields = ["username", "first_name", "last_name", "email", "password"]
labels = {
"email": "Nouvelle adresse e-mail",
"password": "Nouveau mot de passe"
}
widgets = {
"first_name": forms.TextInput(attrs={"class": "form-control", "placeholder": "Prénom"}),
"last_name": forms.TextInput(attrs={"class": "form-control", "placeholder": "Nom"}),
"email": forms.EmailInput(attrs={"class": "form-control", "placeholder": "[email protected]", "value": ""}),
"password": forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"})
}
username = forms.CharField(disabled=True)
email_confirm = forms.EmailField(
widget=forms.EmailInput(attrs={"class": "form-control", "placeholder": "[email protected]"}), max_length=100,
label="Confirmer la nouvelle adresse e-mail", required=False)
password_confirm = forms.CharField(
widget=forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"}),
label="Confirmer le nouveau mot de passe", required=False)
password_save = forms.CharField(
widget=forms.PasswordInput(attrs={"class": "form-control", "placeholder": "Mot de passe"}),
label="Mot de passe actuel")
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
self.fields["password"].required = False
def clean_email_confirm(self):
"""
Fonction pour vérifier que le contenu du champ de l'adresse email et du champ de confirmation sont valides et
cohérents si le champ de l'adresse email est défini
Retourne une erreur si le contenu du champ "email" et du champ "email_confirm" n'ont pas la même valeur
"""
email = self.cleaned_data.get("email")
email_confirm = self.cleaned_data.get("email_confirm")
if email and email_confirm != email:
self.add_error(
"email_confirm",
"L'email ne correspond pas"
)
return email_confirm
def clean_password_confirm(self):
"""
Fonction pour vérifier que le contenu du champ du mot de passe et du champ de confirmation sont valides et
cohérents si le champ du mot de passe est défini
Retourne une erreur si le contenu du champ "password" et du champ "password_confirm" n'ont pas la même valeur
"""
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password and password_confirm != password:
self.add_error(
"password_confirm",
"Le mot de passe ne correspond pas"
)
return password_confirm
def clean_password_save(self):
"""
Fonction pour vérifier que le contenu du champ du mot de passe et le mot de passe actuel de l'utilisateur sont
valides et cohérents
Retourne une erreur si le contenu du champ "password_save" et du mot de passe actuel de l'utilisateur n'ont
pas la même valeur
"""
if not self.instance.check_password(self.cleaned_data["password_save"]):
self.add_error(
"password_save",
"Le mot de passe ne correspond pas au mot de passe actuel"
)
def save(self, commit=True):
"""
Surcharge de la méthode "save"
Récupère l'objet User à l'aide de la méthode "save" sans envoyer les données, puis :
- Défini la propriété "first_name" de l'objet User si le champ "first_name" est défini
- Défini la propriété "last_name" de l'objet User si le champ "last_name" est défini
- Défini la propriété "email" de l'objet User si le champ "email" est défini
- Défini le mot de passe de l'objet User si le champ "password" est défini
- Puis,
sauvegarde les données et retourne l'objet User
"""
user = super(SettingsForm, self).save(commit=False)
if self.cleaned_data["first_name"]:
user.save(update_fields=["first_name"])
if self.cleaned_data["last_name"]:
user.save(update_fields=["last_name"])
if self.cleaned_data["email"]:
user.save(update_fields=["email"])
if self.cleaned_data["password"]:
user.set_password(self.cleaned_data["password"])
user.save(update_fields=["password"])
return user
|
py | 1a4f90b5439fc08c3fb0cb484eaa9385630b8495 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import json
with open('ocrd-tool.json', 'r') as f:
version = json.load(f)['version']
setup(
name='ocrd-anybaseocr',
version=version,
author="DFKI",
author_email="[email protected], [email protected]",
url="https://github.com/OCR-D/ocrd_anybaseocr",
license='Apache License 2.0',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
install_requires=open('requirements.txt').read().split('\n'),
packages=find_packages(exclude=["work_dir", "src"]),
package_data={
'': ['*.json']
},
entry_points={
'console_scripts': [
'ocrd-anybaseocr-binarize = ocrd_anybaseocr.cli.ocrd_anybaseocr_binarize:cli',
'ocrd-anybaseocr-deskew = ocrd_anybaseocr.cli.ocrd_anybaseocr_deskew:cli',
'ocrd-anybaseocr-crop = ocrd_anybaseocr.cli.ocrd_anybaseocr_cropping:cli',
'ocrd-anybaseocr-dewarp = ocrd_anybaseocr.cli.ocrd_anybaseocr_dewarp:cli',
'ocrd-anybaseocr-tiseg = ocrd_anybaseocr.cli.ocrd_anybaseocr_tiseg:cli',
'ocrd-anybaseocr-textline = ocrd_anybaseocr.cli.ocrd_anybaseocr_textline:cli',
'ocrd-anybaseocr-layout-analysis = ocrd_anybaseocr.cli.ocrd_anybaseocr_layout_analysis:cli',
'ocrd-anybaseocr-block-segmentation = ocrd_anybaseocr.cli.ocrd_anybaseocr_block_segmentation:cli'
]
},
)
|
py | 1a4f90bf285eb3425f843a03e75d0f5b9ee8f9a3 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'noted.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a4f90d64bace12535bf0e3cdbefe7e98dd12eb9 | from __future__ import print_function
import os
import torch
from torch.utils.ffi import create_extension
sources = ['src/roi_pooling.cpp']
headers = ['src/roi_pooling.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/roi_pooling_cuda.cpp']
headers += ['src/roi_pooling_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/roi_pooling.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.roi_pooling',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects,
libraries=["ATen", '_C', 'cudart']
)
if __name__ == '__main__':
ffi.build()
|
py | 1a4f9158f764c26445c111dbcbe27e3dcd750d2d | import os
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
import numpy as np
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
def onehot_from_logits(logits, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
gumbSamp = sample_gumbel(logits.shape, tens_type=type(logits.data))
if logits.is_cuda:
gumbSamp = gumbSamp.cuda()
y = logits + gumbSamp
return F.softmax(y / temperature, dim=-1)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, temperature=1.0, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
|
py | 1a4f916b1caa1d5626b014450e576f1ec71b6e56 | """
Django settings for test2 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!8y&(a9+ub0%hta57meh%9-&y5yzl^h7*o*n!joh*0(+$(z^gh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test2',
'USER': 'root',
'PASSWORD': 'mysql',
'HOST': '192.168.1.106',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a4f91cef8fd73e2b0d105d6e712202d3759b0ca | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def csp_protected_view(view, info):
"""
A view deriver which adds Content-Security-Policy headers to responses.
By default, a global policy is applied to every view.
Individual views can opt out of CSP altogether by specifying a view option
``csp_insecure_optout=True``. This is not recommended.
"""
if not info.registry.settings.get("csp.enabled", False):
return view
# Views can set ``csp_insecure_optout=True`` in their view options to
# disable CSP for the view.
if info.options.get("csp_insecure_optout"):
return view
policy = info.registry.settings.get("csp", {})
clauses = [
" ".join([directive] + values) for directive, values in sorted(policy.items())
]
header_value = "; ".join(clauses)
if info.registry.settings.get("csp.report_only", False):
header_name = "Content-Security-Policy-Report-Only"
else:
header_name = "Content-Security-Policy"
def wrapper_view(context, request):
resp = view(context, request)
resp.headers[header_name] = header_value
return resp
return wrapper_view
csp_protected_view.options = ("csp_insecure_optout",)
def includeme(config):
config.add_view_deriver(csp_protected_view)
|
py | 1a4f92bb231bdc20b61f3ef44fc4d8bdb48be303 | from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def touch():
return {"status": "ok"}
|
py | 1a4f93965a566bd98f889f2affbee7ba6bd82353 | """
This file contains the fundamental BrickBreaker game logic.
"""
import pygame
from pygame.locals import *
from GameElements import Paddle, Ball, Brick, Special, SpecialText, \
SpecialType, to_drop_special, choose_random_special, BOUNCE_OFF_VECTORS
from Player import Player
from LevelGenerator import LevelGenerator
from GameElements import Movement
from enum import Enum
from DatabaseInteract import DatabaseInteract
from GameState import GameState
from Constants import DISPLAY_WIDTH, DISPLAY_HEIGHT, WHITE, BLUE
from UIElement import TextElement
from pygame.sprite import RenderUpdates
from HighscorePage import highscore
import os
DEFAULT_CLOCK_SPEED = 60
CLOCK_SPEED_CHANGE_FACTOR = 1.5
class RectSide(Enum):
""" Enum indicating different sides of a rectangle """
TOP = 0
BOTTOM = 1
LEFT = 3
RIGHT = 3
class CollisionType(Enum):
""" Enum indication the possible brick collision types """
HORIZONTAL = 0
VERTICAL = 1
class Brickbreaker:
def __init__(self):
"""
description:
- Create a new instance of the Brickbreaker class.
- Initialize all attributes.
"""
self.clock_speed = DEFAULT_CLOCK_SPEED
self.screen = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
self.bricks = []
self.number_unbreakable_bricks = 0
self.paddle = Paddle()
self.ball = Ball()
self.present_specials = []
self.active_special = None
self.spcl_text = None
pygame.font.init()
self.font = pygame.font.SysFont("Arial", 25)
self.player = Player(current_level=1)
def start_game(self):
"""
description:
- Create new level.
- Position the paddle to the middle of the screen.
- Call method to choose starting angle.
:return: nothing
"""
self.create_blocks()
self.paddle.reset_position()
self.reset_ball()
def reset_ball(self):
"""
description:
- Center the ball over paddle and give the player the opportunity to choose inital angle.
- Loop:
- Switch angles using custom set left and right keys. Selected angles is displayed.
- Shoot ball using custom set key.
:return: nothing
"""
if not (self.active_special is None):
self.remove_special()
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = int(sets[2])
key_right = int(sets[4])
key_shoot = int(sets[9])
self.ball.center_over_paddle(self.paddle.get_center())
vector_indicator_start = (self.ball.form.centerx, self.ball.form.centery - 5)
current_index = int(len(BOUNCE_OFF_VECTORS)/2) - 1
clock = pygame.time.Clock()
vector_selected = False
while not vector_selected:
clock.tick(60)
self.draw_all()
self.draw_start_text()
currently_selected_vector = BOUNCE_OFF_VECTORS[current_index]
events = pygame.event.get()
for event in events:
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == key_left:
if current_index > 0:
current_index -= 1
elif event.key == key_right:
if current_index < len(BOUNCE_OFF_VECTORS) - 1:
current_index += 1
elif event.key == key_shoot:
self.ball.vector = currently_selected_vector
vector_selected = True
break
elif event.key == pygame.K_ESCAPE:
return GameState.TITLE
vector_indicator_end = (vector_indicator_start[0] + 10 * currently_selected_vector[0],
vector_indicator_start[1] + 10 * currently_selected_vector[1])
pygame.draw.line(self.screen, WHITE, vector_indicator_start, vector_indicator_end, 3)
pygame.display.flip()
def create_blocks(self):
"""
description:
- Create the bricks for the player's current level using the LevelGenerator-Class.
:return: nothing
"""
self.bricks, self.number_unbreakable_bricks = LevelGenerator().create_level(self.player.current_level)
def check_ball_collisions(self):
"""
description:
- Checks all possible collisions that can occur for the ball.
- Bounce off at left, right and top edge.
- Bounce off from paddle using paddle.hitzones' vectors.
- Check for brick collision and delegate handling.
- Check if player dropped the ball.
- if decremented to 0 --> game over --> save score --> restart
:return:
"""
# collision left or right edge
if self.ball.form.x <= 0 or self.ball.form.x >= DISPLAY_WIDTH:
self.ball.collide_vertical()
if self.ball.form.x <= 0:
self.ball.form.x = 1
else:
self.ball.form.x = DISPLAY_WIDTH - 1
# collision top edge
if self.ball.form.y <= 0:
self.ball.form.y = 1
self.ball.collide_horizontal()
# collission paddle
for paddle_part in self.paddle.hitzones:
if paddle_part[0].colliderect(self.ball.form):
self.ball.vector = paddle_part[1]
break
# brick collisions
collision_bricks = []
for brick in self.bricks:
if brick.rect.colliderect(self.ball.form):
collision_bricks.append(brick)
if len(collision_bricks) > 0:
self.handle_brick_collisions(collision_bricks)
# collision bottom edge --> lost
if self.ball.form.y > DISPLAY_HEIGHT:
self.player.lives -= 1
if self.player.lives == 0:
highscore(self.screen, self.player.score)
self.player.set_lives()
self.player.score = 0
self.player.current_level = 1
self.start_game()
else:
self.reset_ball()
def check_previously_horizontally_outside(self, brick_rect, horizontal_movement):
"""
description:
- Check whether the ball did not horizontally overlap with the currently brick hit in the previous frame.
- Aligned edges do not count as overlap.
:param brick_rect: pygame.Rect-Object representing the hit brick's position.
:param horizontal_movement: Movement-Enum value indicating left or right movement
:return: true if no overlap, false otherwise
"""
ball_pos_previous = self.ball.get_previous_position()
ball_rect_previous = pygame.Rect(ball_pos_previous[0], ball_pos_previous[1], self.ball.form.width,
self.ball.form.height)
if horizontal_movement == Movement.RIGHT:
return ball_rect_previous.right <= brick_rect.left
else:
return ball_rect_previous.left >= brick_rect.right
def check_previously_vertically_outside(self, brick_rect, vertical_movement):
"""
description:
- Check whether the ball did not vertically overlap with the currently brick hit in the previous frame.
- Aligned edges do not count as overlap.
:param brick_rect: pygame.Rect-Object representing the hit brick's position.
:param vertical_movement: Movement-Enum value indicating up or down movement
:return: true if no overlap, false otherwise
"""
ball_pos_previous = self.ball.get_previous_position()
ball_rect_previous = pygame.Rect(ball_pos_previous[0], ball_pos_previous[1], self.ball.form.width,
self.ball.form.height)
if vertical_movement == Movement.DOWN:
return ball_rect_previous.bottom <= brick_rect.top
else:
return ball_rect_previous.top >= brick_rect.bottom
def handle_brick_collisions(self, collision_bricks):
"""
description:
- Handle the brick-collision based on the number of bricks hit.
- If only one brick was hit: Call function to perform brick collision with determined collision type
- More than one (basically working with the first 2,
edge-case of more than 2 ignored due to unlikelihood and complexity):
- Determine expected collision type based on the relative position of the 2 bricks.
- Determine calculated collision type for 2 bricks.
- Perform brick collision with the brick matching the expected collision type.
- If none matches: chose one (irrelevant for user experience) to perform the brick collision with using
expected collision type.
:param collision_bricks: list of Brick-objects hit by the ball
:return: nothing
"""
if len(collision_bricks) == 1:
self.perform_brick_collision(collision_bricks[0], self.determine_collision_type(collision_bricks[0]))
else:
if collision_bricks[0].rect.x == collision_bricks[1].rect.x: # above each other
collision_required = CollisionType.VERTICAL
else: # next to each other
collision_required = CollisionType.HORIZONTAL
brick1_collision = self.determine_collision_type(collision_bricks[0])
brick2_collision = self.determine_collision_type(collision_bricks[1])
if brick1_collision == collision_required:
self.perform_brick_collision(collision_bricks[0], brick1_collision)
elif brick2_collision == collision_required:
self.perform_brick_collision(collision_bricks[1], brick2_collision)
else:
self.perform_brick_collision(collision_bricks[0], collision_required)
def determine_collision_type(self, brick_hit):
"""
description:
- Determine the collision type based on the movement and overlap in the previous frame.
:param brick_hit: Brick-object determine the theoretical collision type for.
:return: CollisionType-enum value
"""
horizontal_movement = self.ball.get_horizontal_movement()
vertical_movement = self.ball.get_vertical_movement()
previously_horizontally_outside = self.check_previously_horizontally_outside(brick_hit.rect,
horizontal_movement)
previously_vertically_outside = self.check_previously_vertically_outside(brick_hit.rect, vertical_movement)
# neither horizontal nor vertical overlap in the previous frame
# --> compare ratio of horizontal and vertical overlap in the current frame
if previously_horizontally_outside and previously_vertically_outside:
horizontal_delta = (self.ball.form.right - brick_hit.rect.left) if horizontal_movement == Movement.RIGHT \
else (brick_hit.rect.right - self.ball.form.left)
vertical_delta = (self.ball.form.bottom - brick_hit.rect.top) if vertical_movement == Movement.DOWN \
else (brick_hit.rect.bottom - self.ball.form.top)
if horizontal_delta > vertical_delta:
return CollisionType.HORIZONTAL
else:
return CollisionType.VERTICAL
# horizontal overlap but no vertical overlap in the previous frame --> vertical collision
elif previously_horizontally_outside and not previously_vertically_outside:
return CollisionType.VERTICAL
# no horizontal overlap but vertical overlap in the previous frame --> horizontal collision
elif not previously_horizontally_outside and previously_vertically_outside:
return CollisionType.HORIZONTAL
# horizontal overlap and vertical overlap in the previous frame
# --> irrelevant here because collision would have already happended and been handled in the previous frame.
def perform_brick_collision(self, brick_hit, collision_type):
"""
description:
- Call function to change ball's movement direction based on the collision_type.
- Call Brick's get_hit() function.
- Destroy brick, increase score if brick was destroyed and create a special with a certain probability.
:param brick_hit: Brick-object to perform the collision with
:param collision_type: CollisionType-Enum
:return: nothing
"""
if collision_type == CollisionType.HORIZONTAL:
self.ball.collide_horizontal()
else:
self.ball.collide_vertical()
if brick_hit.get_hit():
self.bricks.remove(brick_hit)
self.player.score += 1
if to_drop_special():
spcl = choose_random_special()
txt = spcl.get_german_name()
self.spcl_text = SpecialText(txt, self.clock_speed)
self.present_specials.append(Special(brick_hit.rect.topleft, spcl))
def check_special_collisions(self):
"""
description:
- Check if any specials, i.e. special.rect, currently present on the screen is caught with the paddle.
- To be caught the special has to be completely within the paddle's horizontal width and the paddle's
height.
- Remove active special if new special is caught.
- Activate special on self or paddle based on its type.
- Remove the special from the currently present specials and set self.active special.
- If special is off screen, remove it.
:return: nothing
"""
if len(self.present_specials) > 0:
for special in self.present_specials:
if (self.paddle.get_top_edge() < special.rect.bottom <= self.paddle.get_bottom_edge()) \
and self.paddle.get_left_edge() <= special.rect.left \
and self.paddle.get_right_edge() >= special.rect.right:
if not (self.active_special is None):
self.remove_special()
if special.is_paddle_special():
self.paddle.activate_special(special)
else:
self.activate_special(special)
self.present_specials.remove(special)
self.active_special = special
self.active_special.activate(self.clock_speed)
elif special.rect.top > DISPLAY_HEIGHT:
self.present_specials.remove(special)
def activate_special(self, special):
"""
description:
- Activate a caught non-paddle special.
- Either add a bonus life or adjust clock speed based on special.type
:param special: the caught special
:return: nothing
"""
if special.special_type == SpecialType.BONUS_LIFE:
self.player.lives += 1
elif special.special_type == SpecialType.FASTER:
self.clock_speed = DEFAULT_CLOCK_SPEED * CLOCK_SPEED_CHANGE_FACTOR
elif special.special_type == SpecialType.SLOWER:
self.clock_speed = DEFAULT_CLOCK_SPEED / CLOCK_SPEED_CHANGE_FACTOR
def remove_special(self):
"""
description:
- Remove the currently active special and negate its effect.
- If is_paddle_special: remove special from pedal
- else: reset self.clock_speed
:return: nothing
"""
if self.active_special.is_paddle_special():
self.paddle.remove_special()
else:
self.clock_speed = DEFAULT_CLOCK_SPEED
self.active_special = None
def draw_all(self):
"""
description:
- Called every tick
- draws screen with every element
:return:
"""
self.screen.fill(BLUE)
for brick in self.bricks:
brick.show_brick(self.screen)
for paddle_part in self.paddle.hitzones:
pygame.draw.rect(self.screen, WHITE, paddle_part[0])
for triangle in self.paddle.triangle_views:
pygame.draw.polygon(self.screen, WHITE, triangle)
for special in self.present_specials:
special.fall()
special.show_special(self.screen)
self.player.draw_lives(self.screen)
pygame.draw.rect(self.screen, WHITE, self.ball.form)
self.screen.blit(self.font.render(str(self.player.score), -1, WHITE), (400, 550))
self.draw_spcl_txt()
def draw_spcl_txt(self):
"""
description:
- Write the type of the special that just dropped to the top of the screen.
:return: nothing
"""
if self.spcl_text is not None:
info = TextElement(
center_position=(590, 10),
font_size=16,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Spezial: {self.spcl_text.text} aufgetaucht",
)
elems = RenderUpdates(info)
elems.draw(self.screen)
if self.spcl_text.tick():
self.spcl_text = None
def level_completed(self):
"""
description:
- Called when the player completes a level.
- If level 10 was completed: show Highscore Page
- Else: increase level, add bonus life
:return:
"""
if self.player.current_level == 10:
highscore(self.screen, self.player.score)
return GameState.TITLE
else:
self.player.current_level += 1
self.player.lives += 1
self.start_game()
def pause_elems(self):
"""
description:
- Creates the Text object when being in pause mode
:return: elements to be drawn during pause mode
"""
dbi = DatabaseInteract()
sets = dbi.get_settings()
heading = TextElement(
center_position=(400, 400),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Spiel Pausiert, zum Fortsetzen '{sets[5]}' drücken, zum Beenden 'ESC' drücken ",
)
elems = RenderUpdates(heading)
return elems
def draw_start_text(self):
"""
description:
- Creates and draws the Text object when being in pause mode
:return: nothing
"""
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = sets[1]
key_right = sets[3]
key_shoot = sets[8]
heading1 = TextElement(
center_position=(400, 400),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Startwinkel mit '{key_left}' und '{key_right}' auswählen",
)
heading2 = TextElement(
center_position=(400, 450),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Mit '{key_shoot}' Ball abschiessen, zum Beenden 'ESC' drücken ",
)
elems = RenderUpdates(heading1,heading2)
elems.draw(self.screen)
def main(self):
"""
description:
- Contains game logic.
- Process game events by calling corresponding functions.
- Update the UI.
- Check whether level was completed.
:return: nothing
"""
clock = pygame.time.Clock()
self.start_game()
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = sets[2]
key_right = sets[4]
pause_key = sets[6]
while True:
clock.tick(self.clock_speed)
for event in pygame.event.get():
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == int(pause_key):
elems = self.pause_elems()
game_paused = True
while game_paused:
elems.draw(self.screen)
events = pygame.event.get()
for event in events:
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == int(pause_key):
game_paused = False
break
elif event.key == pygame.K_ESCAPE:
return GameState.TITLE
pygame.display.update()
keys = pygame.key.get_pressed()
if keys[int(key_left)]:
self.paddle.move(-1)
if keys[int(key_right)]:
self.paddle.move(1)
if keys[pygame.K_ESCAPE]:
return GameState.TITLE
# update ball
self.ball.move()
self.check_ball_collisions()
# update specials
if not (self.active_special is None):
if self.active_special.tick():
self.remove_special()
self.check_special_collisions()
# Update screen
self.draw_all()
pygame.display.flip()
if len(self.bricks) == self.number_unbreakable_bricks:
if self.level_completed() == GameState.TITLE:
return GameState.TITLE
|
py | 1a4f93d7d2b2c0ecf26ced5c1661412d10717425 | import asyncio
import typing as t
from contextlib import asynccontextmanager
from nbclient import NotebookClient
from nbformat import NotebookNode
from nbclient.exceptions import CellExecutionComplete, DeadKernelError, CellControlSignal
from nbclient.util import run_hook
from appyter.ext.asyncio.event_loop import get_event_loop
from appyter.ext.asyncio.helpers import ensure_async
class NotebookClientIOPubHook(NotebookClient):
''' A notebook client with the ability to hook into iopub updates
'''
def __init__(self, *args, iopub_hook=None, **kwargs):
super().__init__(*args, **kwargs)
self.iopub_hook = iopub_hook
async def _async_poll_output_msg(
self,
parent_msg_id,
cell,
cell_index
):
assert self.kc is not None
complete = False
while not complete:
msg = await ensure_async(self.kc.iopub_channel.get_msg(timeout=None))
if msg['parent_header'].get('msg_id') == parent_msg_id:
try:
# Will raise CellExecutionComplete when completed
self.process_message(msg, cell, cell_index)
except CellExecutionComplete:
complete = True
finally:
if self.iopub_hook is not None:
await self.iopub_hook(cell, cell_index)
def _kc_execute(self, *args, **kwargs):
return self.kc.execute(*args, **kwargs)
async def async_execute_cell(
self,
cell: NotebookNode,
cell_index: int,
execution_count: t.Optional[int] = None,
store_history: bool = True) -> NotebookNode:
"""
Executes a single code cell.
To execute all cells see :meth:`execute`.
Parameters
----------
cell : nbformat.NotebookNode
The cell which is currently being processed.
cell_index : int
The position of the cell within the notebook object.
execution_count : int
The execution count to be assigned to the cell (default: Use kernel response)
store_history : bool
Determines if history should be stored in the kernel (default: False).
Specific to ipython kernels, which can store command histories.
Returns
-------
output : dict
The execution output payload (or None for no output).
Raises
------
CellExecutionError
If execution failed and should raise an exception, this will be raised
with defaults about the failure.
Returns
-------
cell : NotebookNode
The cell which was just processed.
"""
assert self.kc is not None
if cell.cell_type != 'code' or not cell.source.strip():
self.log.debug("Skipping non-executing cell %s", cell_index)
return cell
if self.record_timing and 'execution' not in cell['metadata']:
cell['metadata']['execution'] = {}
self.log.debug("Executing cell:\n%s", cell.source)
parent_msg_id = await ensure_async(self._kc_execute)(
cell.source,
store_history=store_history,
stop_on_error=not self.allow_errors
)
# We launched a code cell to execute
self.code_cells_executed += 1
exec_timeout = self._get_timeout(cell)
cell.outputs = []
self.clear_before_next_output = False
task_poll_kernel_alive = asyncio.ensure_future(
self._async_poll_kernel_alive()
)
task_poll_output_msg = asyncio.ensure_future(
self._async_poll_output_msg(parent_msg_id, cell, cell_index)
)
self.task_poll_for_reply = asyncio.ensure_future(
self._async_poll_for_reply(
parent_msg_id, cell, exec_timeout, task_poll_output_msg, task_poll_kernel_alive
)
)
try:
exec_reply = await self.task_poll_for_reply
except asyncio.CancelledError:
# can only be cancelled by task_poll_kernel_alive when the kernel is dead
task_poll_output_msg.cancel()
raise DeadKernelError("Kernel died")
except Exception as e:
# Best effort to cancel request if it hasn't been resolved
try:
# Check if the task_poll_output is doing the raising for us
if not isinstance(e, CellControlSignal):
task_poll_output_msg.cancel()
finally:
raise
if execution_count:
cell['execution_count'] = execution_count
await self._check_raise_for_error(cell, cell_index, exec_reply)
self.nb['cells'][cell_index] = cell
return cell
@asynccontextmanager
async def async_setup_kernel(self, **kwargs) -> t.AsyncGenerator:
"""
Context manager for setting up the kernel to execute a notebook.
This assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).
When control returns from the yield it stops the client's zmq channels, and shuts
down the kernel.
"""
# by default, cleanup the kernel client if we own the kernel manager
# and keep it alive if we don't
cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)
if self.km is None:
self.km = self.create_kernel_manager()
loop = get_event_loop()
if not self.km.has_kernel:
await self.async_start_new_kernel(**kwargs)
await self.async_start_new_kernel_client()
try:
yield
except RuntimeError as e:
await run_hook(self.on_notebook_error, notebook=self.nb)
raise e
finally:
if cleanup_kc:
await self._async_cleanup_kernel()
await run_hook(self.on_notebook_complete, notebook=self.nb)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.