filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_3321 | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_cppcheck'
setup(
name=package_name,
version='0.8.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
zip_safe=True,
author='Dirk Thomas',
author_email='[email protected]',
maintainer='Dirk Thomas',
maintainer_email='[email protected]',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Static code analysis on C/C++ code using Cppcheck.',
long_description="""\
The ability to perform static code analysis on C/C++ code using Cppcheck
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_cppcheck = ament_cppcheck.main:main',
],
},
)
|
the-stack_0_3322 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# mininode.py - Bitcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of https://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify
from io import BytesIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from pyblake2 import blake2b
try:
import yescrypt
except ImportError as e:
exit("Please run 'sudo pip install https://github.com/wo01/yescrypt_python/archive/master.zip'")
BIP0031_VERSION = 60000
SPROUT_PROTO_VERSION = 170002 # past bip-31 for ping/pong
OVERWINTER_PROTO_VERSION = 170003
SAPLING_PROTO_VERSION = 170006
BLOSSOM_PROTO_VERSION = 170008
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
SPROUT_VERSION_GROUP_ID = 0x00000000
OVERWINTER_VERSION_GROUP_ID = 0x02E7D970
SAPLING_VERSION_GROUP_ID = 0x9023E50A
# No transaction format change in Blossom.
MAX_INV_SZ = 50000
COIN = 100000000 # 1 zec in zatoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def bfh(x):
if sys.version_info[0] >= 3:
return bytes.fromhex(x)
else:
return x.decode("hex")
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def bh2u(x):
return binascii.hexlify(x).decode('ascii')
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def nuparams(branch_id, height):
return '-nuparams=%x:%d' % (branch_id, height)
def fundingstream(idx, start_height, end_height, addrs):
return '-fundingstream=%d:%d:%d:%s' % (idx, start_height, end_height, ",".join(addrs))
def ser_compactsize(n):
if n < 253:
return struct.pack("B", n)
elif n < 0x10000:
return struct.pack("<BH", 253, n)
elif n < 0x100000000:
return struct.pack("<BI", 254, n)
return struct.pack("<BQ", 255, n)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def block_work_from_compact(c):
target = uint256_from_compact(c)
return 2**256 // (target + 1)
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
def deser_char_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<B", f.read(1))[0]
r.append(t)
return r
def ser_char_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("B", i)
return r
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: b"Error",
1: b"TX",
2: b"Block"}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = SPROUT_PROTO_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%r)" \
% (self.nVersion, repr(self.vHave))
class SpendDescription(object):
def __init__(self):
self.cv = None
self.anchor = None
self.nullifier = None
self.rk = None
self.zkproof = None
self.spendAuthSig = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.anchor = deser_uint256(f)
self.nullifier = deser_uint256(f)
self.rk = deser_uint256(f)
self.zkproof = f.read(192)
self.spendAuthSig = f.read(64)
def serialize(self):
r = b""
r += ser_uint256(self.cv)
r += ser_uint256(self.anchor)
r += ser_uint256(self.nullifier)
r += ser_uint256(self.rk)
r += self.zkproof
r += self.spendAuthSig
return r
def __repr__(self):
return "SpendDescription(cv=%064x, anchor=%064x, nullifier=%064x, rk=%064x, zkproof=%064x, spendAuthSig=%064x)" \
% (self.cv, self.anchor, self.nullifier, self.rk, self.zkproof, self.spendauthsig)
class OutputDescription(object):
def __init__(self):
self.cv = None
self.cmu = None
self.ephemeralKey = None
self.encCiphertext = None
self.outCiphertext = None
self.zkproof = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.cmu = deser_uint256(f)
self.ephemeralKey = deser_uint256(f)
self.encCiphertext = f.read(580)
self.outCiphertext = f.read(80)
self.zkproof = f.read(192)
def serialize(self):
r = b""
r += ser_uint256(self.cv)
r += ser_uint256(self.cmu)
r += ser_uint256(self.ephemeralKey)
r += self.encCiphertext
r += self.outCiphertext
r += self.zkproof
return r
def __repr__(self):
return "OutputDescription(cv=%064x, cmu=%064x, ephemeralKey=%064x, encCiphertext=%064x, outCiphertext=%064x, zkproof=%064x)" \
% (self.cv, self.cmu, self.ephemeralKey, self.encCiphertext, self.outCiphertext, self.zkproof)
G1_PREFIX_MASK = 0x02
G2_PREFIX_MASK = 0x0a
class ZCProof(object):
def __init__(self):
self.g_A = None
self.g_A_prime = None
self.g_B = None
self.g_B_prime = None
self.g_C = None
self.g_C_prime = None
self.g_K = None
self.g_H = None
def deserialize(self, f):
def deser_g1(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_lsb': leadingByte & 1,
'x': f.read(32),
}
def deser_g2(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_gt': leadingByte & 1,
'x': f.read(64),
}
self.g_A = deser_g1(f)
self.g_A_prime = deser_g1(f)
self.g_B = deser_g2(f)
self.g_B_prime = deser_g1(f)
self.g_C = deser_g1(f)
self.g_C_prime = deser_g1(f)
self.g_K = deser_g1(f)
self.g_H = deser_g1(f)
def serialize(self):
def ser_g1(self, p):
return chr(G1_PREFIX_MASK | p['y_lsb']) + p['x']
def ser_g2(self, p):
return chr(G2_PREFIX_MASK | p['y_gt']) + p['x']
r = b""
r += ser_g1(self.g_A)
r += ser_g1(self.g_A_prime)
r += ser_g2(self.g_B)
r += ser_g1(self.g_B_prime)
r += ser_g1(self.g_C)
r += ser_g1(self.g_C_prime)
r += ser_g1(self.g_K)
r += ser_g1(self.g_H)
return r
def __repr__(self):
return "ZCProof(g_A=%r g_A_prime=%r g_B=%r g_B_prime=%r g_C=%r g_C_prime=%r g_K=%r g_H=%r)" \
% (self.g_A, self.g_A_prime,
self.g_B, self.g_B_prime,
self.g_C, self.g_C_prime,
self.g_K, self.g_H)
ZC_NUM_JS_INPUTS = 2
ZC_NUM_JS_OUTPUTS = 2
ZC_NOTEPLAINTEXT_LEADING = 1
ZC_V_SIZE = 8
ZC_RHO_SIZE = 32
ZC_R_SIZE = 32
ZC_MEMO_SIZE = 512
ZC_NOTEPLAINTEXT_SIZE = (
ZC_NOTEPLAINTEXT_LEADING +
ZC_V_SIZE +
ZC_RHO_SIZE +
ZC_R_SIZE +
ZC_MEMO_SIZE
)
NOTEENCRYPTION_AUTH_BYTES = 16
ZC_NOTECIPHERTEXT_SIZE = (
ZC_NOTEPLAINTEXT_SIZE +
NOTEENCRYPTION_AUTH_BYTES
)
class JSDescription(object):
def __init__(self):
self.vpub_old = 0
self.vpub_new = 0
self.anchor = 0
self.nullifiers = [0] * ZC_NUM_JS_INPUTS
self.commitments = [0] * ZC_NUM_JS_OUTPUTS
self.onetimePubKey = 0
self.randomSeed = 0
self.macs = [0] * ZC_NUM_JS_INPUTS
self.proof = None
self.ciphertexts = [None] * ZC_NUM_JS_OUTPUTS
def deserialize(self, f):
self.vpub_old = struct.unpack("<q", f.read(8))[0]
self.vpub_new = struct.unpack("<q", f.read(8))[0]
self.anchor = deser_uint256(f)
self.nullifiers = []
for i in range(ZC_NUM_JS_INPUTS):
self.nullifiers.append(deser_uint256(f))
self.commitments = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.commitments.append(deser_uint256(f))
self.onetimePubKey = deser_uint256(f)
self.randomSeed = deser_uint256(f)
self.macs = []
for i in range(ZC_NUM_JS_INPUTS):
self.macs.append(deser_uint256(f))
self.proof = ZCProof()
self.proof.deserialize(f)
self.ciphertexts = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.ciphertexts.append(f.read(ZC_NOTECIPHERTEXT_SIZE))
def serialize(self):
r = b""
r += struct.pack("<q", self.vpub_old)
r += struct.pack("<q", self.vpub_new)
r += ser_uint256(self.anchor)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.nullifiers[i])
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.commitments[i])
r += ser_uint256(self.onetimePubKey)
r += ser_uint256(self.randomSeed)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.macs[i])
r += self.proof.serialize()
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.ciphertexts[i])
return r
def __repr__(self):
return "JSDescription(vpub_old=%i vpub_new=%i anchor=%064x onetimePubKey=%064x randomSeed=%064x proof=%r)" \
% (self.vpub_old, self.vpub_new, self.anchor,
self.onetimePubKey, self.randomSeed, self.proof)
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (self.prevout, hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.fOverwintered = True
self.nVersion = 4
self.nVersionGroupId = SAPLING_VERSION_GROUP_ID
self.vin = []
self.vout = []
self.nLockTime = 0
self.nExpiryHeight = 0
self.valueBalance = 0
self.shieldedSpends = []
self.shieldedOutputs = []
self.vJoinSplit = []
self.joinSplitPubKey = None
self.joinSplitSig = None
self.bindingSig = None
self.sha256 = None
self.hash = None
else:
self.fOverwintered = tx.fOverwintered
self.nVersion = tx.nVersion
self.nVersionGroupId = tx.nVersionGroupId
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.nExpiryHeight = tx.nExpiryHeight
self.valueBalance = tx.valueBalance
self.shieldedSpends = copy.deepcopy(tx.shieldedSpends)
self.shieldedOutputs = copy.deepcopy(tx.shieldedOutputs)
self.vJoinSplit = copy.deepcopy(tx.vJoinSplit)
self.joinSplitPubKey = tx.joinSplitPubKey
self.joinSplitSig = tx.joinSplitSig
self.bindingSig = tx.bindingSig
self.sha256 = None
self.hash = None
def deserialize(self, f):
header = struct.unpack("<I", f.read(4))[0]
self.fOverwintered = bool(header >> 31)
self.nVersion = header & 0x7FFFFFFF
self.nVersionGroupId = (struct.unpack("<I", f.read(4))[0]
if self.fOverwintered else 0)
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
if isOverwinterV3 or isSaplingV4:
self.nExpiryHeight = struct.unpack("<I", f.read(4))[0]
if isSaplingV4:
self.valueBalance = struct.unpack("<q", f.read(8))[0]
self.shieldedSpends = deser_vector(f, SpendDescription)
self.shieldedOutputs = deser_vector(f, OutputDescription)
if self.nVersion >= 2:
self.vJoinSplit = deser_vector(f, JSDescription)
if len(self.vJoinSplit) > 0:
self.joinSplitPubKey = deser_uint256(f)
self.joinSplitSig = f.read(64)
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
self.bindingSig = f.read(64)
self.sha256 = None
self.hash = None
def serialize(self):
header = (int(self.fOverwintered)<<31) | self.nVersion
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
r = b""
r += struct.pack("<I", header)
if self.fOverwintered:
r += struct.pack("<I", self.nVersionGroupId)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
if isOverwinterV3 or isSaplingV4:
r += struct.pack("<I", self.nExpiryHeight)
if isSaplingV4:
r += struct.pack("<q", self.valueBalance)
r += ser_vector(self.shieldedSpends)
r += ser_vector(self.shieldedOutputs)
if self.nVersion >= 2:
r += ser_vector(self.vJoinSplit)
if len(self.vJoinSplit) > 0:
r += ser_uint256(self.joinSplitPubKey)
r += self.joinSplitSig
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
r += self.bindingSig
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].hex()
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 214160000 * 100000000:
return False
return True
def __repr__(self):
r = ("CTransaction(fOverwintered=%r nVersion=%i nVersionGroupId=0x%08x "
"vin=%r vout=%r nLockTime=%i nExpiryHeight=%i "
"valueBalance=%i shieldedSpends=%r shieldedOutputs=%r"
% (self.fOverwintered, self.nVersion, self.nVersionGroupId,
self.vin, self.vout, self.nLockTime, self.nExpiryHeight,
self.valueBalance, self.shieldedSpends, self.shieldedOutputs))
if self.nVersion >= 2:
r += " vJoinSplit=%r" % (self.vJoinSplit,)
if len(self.vJoinSplit) > 0:
r += " joinSplitPubKey=%064x joinSplitSig=%064x" \
% (self.joinSplitPubKey, self.joinSplitSig)
if len(self.shieldedSpends) > 0 or len(self.shieldedOutputs) > 0:
r += " bindingSig=%064x" % (self.bindingSig,)
r += ")"
return r
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.hashFinalSaplingRoot = header.hashFinalSaplingRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.hashFinalSaplingRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.hashFinalSaplingRoot = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.hashFinalSaplingRoot)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.hashFinalSaplingRoot)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].hex()
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.hashFinalSaplingRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def solve(self):
target = uint256_from_compact(self.nBits)
self.nNonce = 0
while True:
_powhash = rev_hex(bh2u(yescrypt.getPoWHash(super(CBlock, self).serialize())))
pow = int('0x' + _powhash, 16)
if pow <= target:
self.rehash()
return
self.nNonce += 1
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%r)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
self.hashFinalSaplingRoot, time.ctime(self.nTime), self.nBits,
self.nNonce, self.vtx)
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self, protocol_version=SPROUT_PROTO_VERSION):
self.nVersion = protocol_version
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
self.addrTo, self.addrFrom, self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%r)" % (self.addrs,)
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_notfound(object):
command = b"notfound"
def __init__(self):
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%r)" % (self.inv,)
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_filteradd(object):
command = b"filteradd"
def __init__(self):
self.data = b""
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
return ser_string(self.data)
def __repr__(self):
return "msg_filteradd(data=%r)" % (self.data,)
class msg_filterclear(object):
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# Derived classes should call this function once to set the message map
# which associates the derived classes' functions to incoming messages
def create_callback_map(self):
self.cbmap = {
b"version": self.on_version,
b"verack": self.on_verack,
b"addr": self.on_addr,
b"alert": self.on_alert,
b"inv": self.on_inv,
b"getdata": self.on_getdata,
b"notfound": self.on_notfound,
b"getblocks": self.on_getblocks,
b"tx": self.on_tx,
b"block": self.on_block,
b"getaddr": self.on_getaddr,
b"ping": self.on_ping,
b"pong": self.on_pong,
b"headers": self.on_headers,
b"getheaders": self.on_getheaders,
b"reject": self.on_reject,
b"mempool": self.on_mempool
}
def deliver(self, conn, message):
with mininode_lock:
try:
self.cbmap[message.command](conn, message)
except:
print("ERROR delivering %r (%s)" % (message,
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(SPROUT_PROTO_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_notfound(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"notfound": msg_notfound,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": b"\x4b\x6f\x74\x6f", # mainnet
"testnet3": b"\x54\x6f\x6b\x6f", # testnet3
"regtest": b"\x52\x65\x6b\x6f" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SAPLING_PROTO_VERSION):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version(protocol_version)
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport) + ' using version ' + str(protocol_version))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = b"connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = b"closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %r" % (self.recvbuf,))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum %r" % (self.recvbuf,))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != b"connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
the-stack_0_3325 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Car, Driver, Reporter
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
with self.assertRaises(AttributeError):
getattr(a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
with self.assertRaises(Reporter.DoesNotExist):
self.r.article_set.remove(self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_set(self):
# Use manager.set() to allocate ForeignKey. Null is legal, so existing
# members of the set that are not in the assignment set are set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Use manager.set(clear=True)
self.r2.article_set.set([self.a3, self.a4], clear=True)
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Fourth>', '<Article: Third>'])
# Clear the rest of the set
self.r2.article_set.set([])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Fourth>', '<Article: Second>', '<Article: Third>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of the set that are not in the assignment set are
# set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse FK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# RelatedManager.set() (#19816).
self.r2.article_set.set([self.a2, self.a3])
qs = self.r2.article_set.filter(headline="Second")
self.r2.article_set.set(qs)
self.assertEqual(1, self.r2.article_set.count())
self.assertEqual(1, qs.count())
def test_add_efficiency(self):
r = Reporter.objects.create()
articles = []
for _ in range(3):
articles.append(Article.objects.create())
with self.assertNumQueries(1):
r.article_set.add(*articles)
self.assertEqual(r.article_set.count(), 3)
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
|
the-stack_0_3326 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from typing import Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_portable_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
worker_parallelism: int) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'binary_accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
# LINT.IfChange
beam_pipeline_args=[
# -------------------------- Beam Args --------------------------.
'--runner=PortableRunner',
# Points to the job server started in
# setup_beam_on_{flink, spark}.sh
'--job_endpoint=localhost:8099',
'--environment_type=LOOPBACK',
'--sdk_worker_parallelism=%d' % worker_parallelism,
'--experiments=use_loopback_process_worker=True',
# Setting environment_cache_millis to practically infinity enables
# continual reuse of Beam SDK workers, improving performance.
'--environment_cache_millis=1000000',
# TODO(BEAM-7199): Obviate the need for setting pre_optimize=all. # pylint: disable=g-bad-todo
'--experiments=pre_optimize=all',
# Note; We use 100 worker threads to mitigate the issue with
# scheduling work between the Beam runner and SDK harness. Flink
# and Spark can process unlimited work items concurrently while
# SdkHarness can only process 1 work item per worker thread.
# Having 100 threads will let 100 tasks execute concurrently
# avoiding scheduling issue in most cases. In case the threads are
# exhausted, beam prints the relevant message in the log.
# TODO(BEAM-8151) Remove worker_threads=100 after we start using a # pylint: disable=g-bad-todo
# virtually unlimited thread pool by default.
'--experiments=worker_threads=100',
# ---------------------- End of Beam Args -----------------------.
# --------- Flink runner Args (ignored by Spark runner) ---------.
'--parallelism=%d' % worker_parallelism,
# TODO(FLINK-10672): Obviate setting BATCH_FORCED. # pylint: disable=g-bad-todo
'--execution_mode_for_batch=BATCH_FORCED',
# ------------------ End of Flink runner Args -------------------.
],
# LINT.ThenChange(setup/setup_beam_on_spark.sh)
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_portable_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
# LINT.IfChange
try:
parallelism = multiprocessing.cpu_count()
except NotImplementedError:
parallelism = 1
absl.logging.info('Using %d process(es) for Beam pipeline execution.' %
parallelism)
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
worker_parallelism=parallelism))
|
the-stack_0_3329 | # -*- coding: utf-8 -*-#
'''
# Name: dnn_regression-keras
# Description:
# Author: super
# Date: 2020/6/2
'''
from HelperClass2.MnistImageDataReader import *
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
train_file = "../data/ch09.train.npz"
test_file = "../data/ch09.test.npz"
dataReader = DataReader_2_0(train_file, test_file)
dataReader.ReadData()
# dr.NormalizeX()
# dr.NormalizeY(YNormalizationMethod.Regression)
dataReader.Shuffle()
dataReader.GenerateValidationSet()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
x_test, y_test = dataReader.XTest, dataReader.YTest
x_val, y_val = dataReader.XDev, dataReader.YDev
return x_train, y_train, x_test, y_test, x_val, y_val
def build_model():
model = Sequential()
model.add(Dense(4, activation='sigmoid', input_shape=(1, )))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='Adam',
loss='mean_squared_error')
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train, x_test, y_test, x_val, y_val = load_data()
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)
model = build_model()
history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_val, y_val))
draw_train_history(history)
loss = model.evaluate(x_test, y_test)
print("test loss: {}".format(loss))
weights = model.get_weights()
print("weights: ", weights) |
the-stack_0_3332 | from theano import function, config, shared, tensor
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], tensor.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, tensor.Elemwise) and
('Gpu' not in type(x.op).__name__)
for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
|
the-stack_0_3334 | # flake8: noqa: E402
import time
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import K8sVersionDisclosure
from kube_hunter.modules.hunting.cves import (
K8sClusterCveHunter,
ServerApiVersionEndPointAccessPE,
ServerApiVersionEndPointAccessDos,
CveUtils,
)
cve_counter = 0
def test_K8sCveHunter():
global cve_counter
# because the hunter unregisters itself, we manually remove this option, so we can test it
K8sClusterCveHunter.__new__ = lambda self, cls: object.__new__(self)
e = K8sVersionDisclosure(version="1.10.1", from_endpoint="/version")
h = K8sClusterCveHunter(e)
h.execute()
time.sleep(0.01)
assert cve_counter == 2
cve_counter = 0
# test patched version
e = K8sVersionDisclosure(version="v1.13.6-gke.13", from_endpoint="/version")
h = K8sClusterCveHunter(e)
h.execute()
time.sleep(0.01)
assert cve_counter == 0
cve_counter = 0
@handler.subscribe(ServerApiVersionEndPointAccessPE)
class test_CVE_2018_1002105:
def __init__(self, event):
global cve_counter
cve_counter += 1
@handler.subscribe(ServerApiVersionEndPointAccessDos)
class test_CVE_2019_1002100:
def __init__(self, event):
global cve_counter
cve_counter += 1
class TestCveUtils:
def test_is_downstream(self):
test_cases = (
("1", False),
("1.2", False),
("1.2-3", True),
("1.2-r3", True),
("1.2+3", True),
("1.2~3", True),
("1.2+a3f5cb2", True),
("1.2-9287543", True),
("v1", False),
("v1.2", False),
("v1.2-3", True),
("v1.2-r3", True),
("v1.2+3", True),
("v1.2~3", True),
("v1.2+a3f5cb2", True),
("v1.2-9287543", True),
("v1.13.9-gke.3", True),
)
for version, expected in test_cases:
actual = CveUtils.is_downstream_version(version)
assert actual == expected
def test_ignore_downstream(self):
test_cases = (
("v2.2-abcd", ["v1.1", "v2.3"], False),
("v2.2-abcd", ["v1.1", "v2.2"], False),
("v1.13.9-gke.3", ["v1.14.8"], False),
)
for check_version, fix_versions, expected in test_cases:
actual = CveUtils.is_vulnerable(fix_versions, check_version, True)
assert actual == expected
|
the-stack_0_3340 | ### Simulate a large number of coin flips using Python ###
from random import randint
def coingame(numflips: int, gamenum: int):
flips = []
for _ in range(0, numflips):
flips.append(randint(0, 1))
heads = flips.count(0)
tails = flips.count(1)
# Printing the results and showing the distribution with a pie graph
print(f"Game {gamenum + 1} | Heads: {heads:,} | Tails: {tails:,} | Total: {heads + tails:,}")
if __name__ == '__main__':
# Call the function with the number of games and flips
games = 5
flips = 1000000
print("< Python >")
for i in range(0, games):
coingame(flips, i)
|
the-stack_0_3341 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add master_addresses to bay
Revision ID: 6f21dc998bb
Revises: 421102d1f2d2
Create Date: 2015-08-20 13:57:14.863292
"""
# revision identifiers, used by Alembic.
revision = '6f21dc998bb'
down_revision = '421102d1f2d2'
from alembic import op # noqa: E402
from magnum.db.sqlalchemy import models # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
op.add_column(
'bay',
sa.Column('master_addresses',
models.JSONEncodedList(),
nullable=True)
)
|
the-stack_0_3344 | import mock
import csv
import furl
import pytz
import pytest
from datetime import datetime, timedelta
from nose import tools as nt
from django.test import RequestFactory
from django.http import Http404
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from website import settings
from framework.auth import Auth
from osf.models.user import OSFUser
from osf.models.tag import Tag
from osf_tests.factories import (
UserFactory,
AuthUserFactory,
ProjectFactory,
TagFactory,
UnconfirmedUserFactory
)
from admin_tests.utilities import setup_view, setup_log_view, setup_form_view
from admin.users import views
from admin.users.forms import WorkshopForm, UserSearchForm
from osf.models.admin_log_entry import AdminLogEntry
pytestmark = pytest.mark.django_db
class TestUserView(AdminTestCase):
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[views.UserView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.UserView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
response = views.UserView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestResetPasswordView(AdminTestCase):
def setUp(self):
super(TestResetPasswordView, self).setUp()
self.user = UserFactory()
self.request = RequestFactory().get('/fake_path')
self.request.user = self.user
self.plain_view = views.ResetPasswordView
self.view = setup_view(self.plain_view(), self.request, guid=self.user._id)
def test_get_initial(self):
self.view.user = self.user
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_equal(res['guid'], self.user._id)
nt.assert_equal(res['emails'], [(r, r) for r in self.user.emails.values_list('address', flat=True)])
def test_reset_password_context(self):
self.view.user = self.user
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_in((self.user.emails.first().address, self.user.emails.first().address), self.view.initial['emails'])
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.ResetPasswordView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
response = views.ResetPasswordView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestDisableUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.UserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_reactivate_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
self.view().delete(self.request)
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user(self):
view = setup_view(views.UserDeleteView(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestHamUserRestore(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.HamUserRestoreView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
self.spam_confirmed, created = Tag.objects.get_or_create(name='spam_confirmed')
self.ham_confirmed, created = Tag.objects.get_or_create(name='ham_confirmed')
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_enable_user(self):
self.user.disable_account()
self.user.save()
nt.assert_true(self.user.is_disabled)
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_false(self.user.all_tags.filter(name=self.spam_confirmed.name).exists())
nt.assert_true(self.user.all_tags.filter(name=self.ham_confirmed.name).exists())
class TestDisableSpamUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.public_node = ProjectFactory(creator=self.user, is_public=True)
self.private_node = ProjectFactory(creator=self.user, is_public=False)
self.request = RequestFactory().post('/fake_path')
self.view = views.SpamUserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_spam_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
self.public_node.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_true(self.user.all_tags.filter(name='spam_confirmed').exists())
nt.assert_false(self.public_node.is_public)
nt.assert_equal(AdminLogEntry.objects.count(), count + 3)
def test_no_user(self):
view = setup_view(self.view(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class SpamUserListMixin(object):
def setUp(self):
spam_flagged = TagFactory(name='spam_flagged')
spam_confirmed = TagFactory(name='spam_confirmed')
ham_confirmed = TagFactory(name='ham_confirmed')
self.flagged_user = UserFactory()
self.flagged_user.tags.add(spam_flagged)
self.flagged_user.save()
self.spam_user = UserFactory()
self.spam_user.tags.add(spam_confirmed)
self.spam_user.save()
self.ham_user = UserFactory()
self.ham_user.tags.add(ham_confirmed)
self.ham_user.save()
self.request = RequestFactory().post('/fake_path')
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(self.url)
request.user = user
with self.assertRaises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
spam_permission = Permission.objects.get(codename='view_spam')
user.user_permissions.add(view_permission)
user.user_permissions.add(spam_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestFlaggedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestFlaggedSpamUserList, self).setUp()
self.plain_view = views.UserFlaggedSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:flagged-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.flagged_user._id)
class TestConfirmedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedSpamUserList, self).setUp()
self.plain_view = views.UserKnownSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.spam_user._id)
class TestConfirmedHamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedHamUserList, self).setUp()
self.plain_view = views.UserKnownHamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-ham')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.ham_user._id)
class TestRemove2Factor(AdminTestCase):
def setUp(self):
super(TestRemove2Factor, self).setUp()
self.user = AuthUserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.User2FactorDeleteView
self.setup_view = setup_log_view(self.view(), self.request, guid=self.user._id)
self.url = reverse('users:remove2factor', kwargs={'guid': self.user._id})
@mock.patch('osf.models.user.OSFUser.delete_addon')
def test_remove_two_factor_get(self, mock_delete_addon):
self.setup_view.delete(self.request)
mock_delete_addon.assert_called_with('twofactor')
def test_integration_delete_two_factor(self):
user_addon = self.user.get_or_add_addon('twofactor')
nt.assert_not_equal(user_addon, None)
user_settings = self.user.get_addon('twofactor')
nt.assert_not_equal(user_settings, None)
count = AdminLogEntry.objects.count()
self.setup_view.delete(self.request)
post_addon = self.user.get_addon('twofactor')
nt.assert_equal(post_addon, None)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
guid = self.user._id
request = RequestFactory().get(self.url)
request.user = self.user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
guid = self.user._id
change_permission = Permission.objects.get(codename='change_osfuser')
self.user.user_permissions.add(change_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestUserWorkshopFormView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory()
self.auth_1 = Auth(self.user_1)
self.view = views.UserWorkshopFormView()
self.workshop_date = timezone.now()
self.data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, self.user_1.username, None],
]
self.user_exists_by_name_data = [
['number', 'date', 'location', 'topic', 'name', 'email', 'other'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, self.user_1.fullname, '[email protected]', None],
]
self.user_not_found_data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, '[email protected]', None],
]
self.mock_data = mock.patch.object(
csv,
'reader',
# parse data into the proper format handling None values as csv reader would
side_effect=(lambda values: [[item or '' for item in value] for value in values])
)
self.mock_data.start()
def tearDown(self):
self.mock_data.stop()
def _create_and_parse_test_file(self, data):
result_csv = self.view.parse(data)
return result_csv
def _create_nodes_and_add_logs(self, first_activity_date, second_activity_date=None):
node_one = ProjectFactory(creator=self.user_1)
node_one.date_created = first_activity_date
node_one.add_log(
'log_added', params={'project': node_one._id}, auth=self.auth_1, log_date=first_activity_date, save=True
)
if second_activity_date:
node_two = ProjectFactory(creator=self.user_1)
node_two.date_created = second_activity_date
node_two.add_log(
'log_added', params={'project': node_two._id}, auth=self.auth_1, log_date=second_activity_date, save=True
)
def test_correct_number_of_columns_added(self):
added_columns = ['OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Data']
result_csv = self._create_and_parse_test_file(self.data)
nt.assert_equal(len(self.data[0]) + len(added_columns), len(result_csv[0]))
def test_user_activity_day_of_workshop_only(self):
self._create_nodes_and_add_logs(first_activity_date=self.workshop_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_before_workshop_only(self):
activity_date = timezone.now() - timedelta(days=1)
self._create_nodes_and_add_logs(first_activity_date=activity_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_after_workshop_only(self):
activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(first_activity_date=activity_date)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_activity_day_of_workshop_and_before(self):
activity_date = timezone.now() - timedelta(days=1)
self._create_nodes_and_add_logs(
first_activity_date=self.workshop_date,
second_activity_date=activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_day_of_workshop_and_after(self):
activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(
first_activity_date=self.workshop_date,
second_activity_date=activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_activity_before_workshop_and_after(self):
before_activity_date = timezone.now() - timedelta(days=1)
after_activity_date = timezone.now() + timedelta(hours=25)
self._create_nodes_and_add_logs(
first_activity_date=before_activity_date,
second_activity_date=after_activity_date
)
result_csv = self._create_and_parse_test_file(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
# One log before workshop, one after, only should show the one after
nt.assert_equal(user_logs_since_workshop, 1)
nt.assert_equal(user_nodes_created_since_workshop, 1)
def test_user_osf_account_not_found(self):
result_csv = self._create_and_parse_test_file(self.user_not_found_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, '')
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_found_by_name(self):
result_csv = self._create_and_parse_test_file(self.user_exists_by_name_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, self.user_1.id)
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_form_valid(self):
request = RequestFactory().post('/fake_path')
data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, '9/1/16', None, None, None, self.user_1.username, None],
]
uploaded = SimpleUploadedFile('test_name', bytes(csv.reader(data)), content_type='text/csv')
form = WorkshopForm(data={'document': uploaded})
form.is_valid()
form.cleaned_data['document'] = uploaded
setup_form_view(self.view, request, form)
class TestUserSearchView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory(fullname='Broken Matt Hardy')
self.user_2 = AuthUserFactory(fullname='Jeff Hardy')
self.user_3 = AuthUserFactory(fullname='Reby Sky')
self.user_4 = AuthUserFactory(fullname='King Maxel Hardy')
self.user_2_alternate_email = '[email protected]'
self.user_2.emails.create(address=self.user_2_alternate_email)
self.user_2.save()
self.request = RequestFactory().get('/fake_path')
self.view = views.UserFormView()
self.view = setup_form_view(self.view, self.request, form=UserSearchForm())
def test_search_user_by_guid(self):
form_data = {
'guid': self.user_1.guids.first()._id
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_name(self):
form_data = {
'name': 'Hardy'
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/search/Hardy/')
def test_search_user_by_username(self):
form_data = {
'email': self.user_1.username
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_alternate_email(self):
form_data = {
'email': self.user_2_alternate_email
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_2.guids.first()._id))
def test_search_user_list(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'Hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
def test_search_user_list_case_insensitive(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
class TestGetLinkView(AdminTestCase):
def test_get_user_confirmation_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
user_token = user.email_verifications.keys()[0]
ideal_link_path = '/confirm/{}/{}/'.format(user._id, user_token)
link = view.get_link(user)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_user_confirmation_link_with_expired_token(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
old_user_token = user.email_verifications.keys()[0]
user.email_verifications[old_user_token]['expiration'] = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(hours=24)
user.save()
link = view.get_link(user)
new_user_token = user.email_verifications.keys()[0]
link_path = str(furl.furl(link).path)
ideal_link_path = '/confirm/{}/{}/'.format(user._id, new_user_token)
nt.assert_equal(link_path, ideal_link_path)
def test_get_password_reset_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetPasswordResetLink()
view = setup_view(view, request, guid=user._id)
link = view.get_link(user)
user_token = user.verification_key_v2.get('token')
nt.assert_is_not_none(user_token)
ideal_link_path = '/resetpassword/{}/{}'.format(user._id, user_token)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_unclaimed_node_links(self):
project = ProjectFactory()
unregistered_contributor = project.add_unregistered_contributor(fullname='Brother Nero', email='[email protected]', auth=Auth(project.creator))
project.save()
request = RequestFactory().get('/fake_path')
view = views.GetUserClaimLinks()
view = setup_view(view, request, guid=unregistered_contributor._id)
links = view.get_claim_links(unregistered_contributor)
unclaimed_records = unregistered_contributor.unclaimed_records
nt.assert_equal(len(links), 1)
nt.assert_equal(len(links), len(unclaimed_records.keys()))
link = links[0]
nt.assert_in(project._id, link)
nt.assert_in(unregistered_contributor.unclaimed_records[project._id]['token'], link)
class TestUserReindex(AdminTestCase):
def setUp(self):
super(TestUserReindex, self).setUp()
self.request = RequestFactory().post('/fake_path')
self.user = AuthUserFactory()
@mock.patch('website.search.search.update_user')
def test_reindex_user_elastic(self, mock_reindex_elastic):
count = AdminLogEntry.objects.count()
view = views.UserReindexElastic()
view = setup_log_view(view, self.request, guid=self.user._id)
view.delete(self.request)
nt.assert_true(mock_reindex_elastic.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
|
the-stack_0_3345 | #!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_cursor08.py
# Log cursors with compression
#
import fnmatch, os, shutil, run, time
from suite_subprocess import suite_subprocess
from wiredtiger import stat, WiredTigerError
from wtscenario import multiply_scenarios, number_scenarios, check_scenarios
import wttest
class test_cursor08(wttest.WiredTigerTestCase, suite_subprocess):
logmax = "100K"
tablename = 'test_cursor08'
uri = 'table:' + tablename
nkeys = 500
reopens = check_scenarios([
('regular', dict(reopen=False)),
('reopen', dict(reopen=True))
])
compress = check_scenarios([
('nop', dict(compress='nop')),
('snappy', dict(compress='snappy')),
('zlib', dict(compress='zlib')),
('none', dict(compress='none')),
])
scenarios = number_scenarios(multiply_scenarios('.', reopens, compress))
# Load the compression extension, and enable it for logging.
def conn_config(self, dir):
return 'log=(archive=false,enabled,file_max=%s,' % self.logmax + \
'compressor=%s),' % self.compress + \
'transaction_sync="(method=dsync,enabled)",' + \
self.extensionArg(self.compress)
# Return the wiredtiger_open extension argument for a shared library.
def extensionArg(self, name):
if name == None or name == 'none':
return ''
testdir = os.path.dirname(__file__)
extdir = os.path.join(run.wt_builddir, 'ext/compressors')
extfile = os.path.join(
extdir, name, '.libs', 'libwiredtiger_' + name + '.so')
if not os.path.exists(extfile):
self.skipTest('compression extension "' + extfile + '" not built')
return ',extensions=["' + extfile + '"]'
def test_log_cursor(self):
# print "Creating %s with config '%s'" % (self.uri, self.create_params)
create_params = 'key_format=i,value_format=S'
self.session.create(self.uri, create_params)
c = self.session.open_cursor(self.uri, None)
# A binary value.
value = u'\u0001\u0002abcd\u0003\u0004'
self.session.begin_transaction()
for k in range(self.nkeys):
c[k] = value
self.session.commit_transaction()
c.close()
if self.reopen:
self.reopen_conn()
# Check for these values via a log cursor
c = self.session.open_cursor("log:", None)
count = 0
while c.next() == 0:
# lsn.file, lsn.offset, opcount
keys = c.get_key()
# txnid, rectype, optype, fileid, logrec_key, logrec_value
values = c.get_value()
try:
if value in str(values[5]): # logrec_value
count += 1
except:
pass
c.close()
self.assertEqual(count, self.nkeys)
if __name__ == '__main__':
wttest.run()
|
the-stack_0_3346 | """
pyEngine_problem
"""
# =============================================================================
# Imports
# =============================================================================
from .pyAero_problem import AeroProblem
class EngineProblem(AeroProblem):
"""
The EngineProblem class inherits from the AeroProblem class so that
aerodynamic solvers (AeroSolver) and engine models (EngineModelSMT) can
reference the same flight condition without needing to define redundant
information. The EngineProblem layer simply adds a few possible design
variables and handles some stuff with derivatives.
Parameters
----------
name : str
Name of this Engine problem.
evalFuncs : iterable object containing strings
The names of the functions the user wants evaluated for this
engineProblem.
throttle : float
Initial value for throttle variable
ISA : float
Initial value for ISA temperature variable"""
def __init__(self, name, throttle=1.0, ISA=0.0, **kwargs):
# Initialize AeroProblem
super().__init__(name, **kwargs)
# Set initial throttle or ISA
self.throttle = throttle
self.ISA = ISA
# Update AeroProblem variable sets with possible engine variables
newVars = ["throttle", "ISA"]
self.allVarFuncs += newVars
self.possibleDVs.update(newVars)
self.possibleFunctions.update(newVars)
|
the-stack_0_3347 | import mock
import pytest
from ocflib.ucb.cas import verify_ticket
@pytest.yield_fixture
def mock_get():
with mock.patch('requests.get') as mock_get:
yield mock_get
GOOD_RESPONSE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>1034192</cas:user>
</cas:authenticationSuccess>
</cas:serviceResponse>"""
BAD_RESPONSE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationFailure code='INVALID_TICKET'>
ticket 'ST-832595-ZOm6NYCTBJO0d41jjL6l-ncas-p3.calnet.berkeley.edu' not recognized
</cas:authenticationFailure>
</cas:serviceResponse>"""
class TestVerifyTicket:
def test_good_ticket(self, mock_get):
mock_get.return_value.text = GOOD_RESPONSE
assert verify_ticket(
'some-ticket',
'https://accounts.ocf.berkeley.edu/',
) == '1034192'
called_url = mock_get.call_args[0][0]
start = 'https://auth.berkeley.edu/cas/serviceValidate?'
assert called_url.startswith(start)
params = called_url[len(start):].split('&')
assert sorted(params) == [
'service=https%3A%2F%2Faccounts.ocf.berkeley.edu%2F',
'ticket=some-ticket',
]
@pytest.mark.parametrize('response', [
BAD_RESPONSE,
'',
'hello world',
])
def test_bad_ticket(self, response, mock_get):
mock_get.return_value.text = response
assert verify_ticket(
'some-ticket',
'https://accounts.ocf.berkeley.edu/',
) is None
|
the-stack_0_3350 | import logging
from Request import *
from RandomUtil import *
# Extremely basic check to determine if a post is what we are looking for
def determineExchangeType(submission):
opFlair = submission.link_flair_text
opTitle = submission.title.lower()
opTitle = opTitle.split("[w]")[0]
# Check to ensure the exchange hasn't already been completed
if opFlair is not None and opFlair.lower() == "closed":
return ""
for cardType in searchDict:
if len(searchDict[cardType]) > 0:
if any(string in opTitle for string in hitWordDict[cardType]):
logging.info("Found a valid %s post: %s", cardType, opTitle)
return cardType
return "" |
the-stack_0_3351 | from models.network import Net
from learning.learning import create_learners, train_model, test_model, Trainer
from learning.testing import CorrelationMatrix, ResidualStatistics
from data.load_data import load_synth_spectra, split_data
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
#from utils.errorfuncs import corr_matrix_relresids
plt.rcParams["font.family"] = "serif"
# first train on the npca=6 set
# then test on the npca=15 set
# also perform the splitting for better comparison
wave_grid, qso_cont_npca6, qso_flux_npca6 = load_synth_spectra(small=False, npca=6)
X_train6, X_valid6, X_test6, y_train6, y_valid6, y_test6 = split_data(qso_flux_npca6, qso_cont_npca6)
wave_grid15, qso_cont_npca15, qso_flux_npca15 = load_synth_spectra(small=False, npca=15)
X_train15, X_valid15, X_test15, y_train15, y_valid15, y_test15 = split_data(qso_flux_npca15, qso_cont_npca15)
n_feature = len(X_train6[1])
n_output = len(y_train6[1])
net = Net(n_feature, 100, n_output)
optimizer, criterion = create_learners(net.parameters())
trainer = Trainer(net, optimizer, criterion, batch_size=1000, num_epochs=400)
trainer.train(wave_grid, X_train6, y_train6, X_valid6, y_valid6)
#running_loss, mse_loss_valid, scaler_X, scaler_y = train_model(wave_grid, X_train6, y_train6,\
# X_valid6, y_valid6, net, optimizer,\
# criterion, batch_size=1000, num_epochs=400)
#epochs = np.arange(1, len(running_loss)+1)
# plot the test statistics as a function of wavelength
Stats = ResidualStatistics(X_test15, y_test15, trainer.scaler_X, trainer.scaler_y, net)
fig0, ax0 = Stats.plot_means(wave_grid)
fig0.show()
# test the final model and print the result
#mse_test, corr_matrix = test_model(X_test, y_test, scaler_X, scaler_y, net)
#print ("MSE on test set:", mse_test)
fig, ax = trainer.plot_loss()
#ax.plot(epochs, running_loss, label="Training set")
#ax.plot(epochs, mse_loss_valid, label="Validation set")
#ax.legend()
#ax.set_xlabel("Epoch number")
#ax.set_ylabel("MSE")
#ax.set_yscale("log")
#ax.set_title("Mean squared error on the normalised spectra")
fig.show()
# now plot an example result on the npca = 15 TEST set
rand_indx = np.random.randint(len(X_test15))
rescaled_result = net.full_predict(X_test15[rand_indx], trainer.scaler_X, trainer.scaler_y)
#test_input_normed = normalise(scaler_X, X_test[rand_indx])
#test_input_normed_var = Variable(torch.FloatTensor(test_input_normed.numpy()))
#normed_result = net(test_input_normed_var)
#rescaled_result = scaler_y.backward(normed_result)
fig2, ax2 = plt.subplots(figsize=(7,5), dpi=320)
ax2.plot(wave_grid, X_test15[rand_indx], alpha=0.8, lw=2, label="Input")
ax2.plot(wave_grid, y_test15[rand_indx], alpha=0.8, lw=2, label="Target")
ax2.plot(wave_grid, rescaled_result, alpha=0.8, lw=2, label="Output")
ax2.set_xlabel("Rest-frame wavelength ($\AA$)")
ax2.set_ylabel("Flux (a.u.)")
ax2.legend()
ax2.grid()
ax2.set_title("Example of a predicted quasar spectrum")
fig2.show()
# visualise the correlation matrix for the npca = 15 TEST set
CorrMat = CorrelationMatrix(X_test15, y_test15, trainer.scaler_X, trainer.scaler_y, net)
CorrMat.show(wave_grid)
#fig3, ax3 = plt.subplots()
#im = ax3.pcolormesh(wave_grid, wave_grid, corr_matrix)
#fig3.show() |
the-stack_0_3352 | import tkinter as tk
import tkinter.messagebox as msg
import os
import sqlite3
class Todo(tk.Tk):
def __init__(self, tasks=None):
super().__init__()
if not tasks:
self.tasks = []
else:
self.tasks = tasks
self.tasks_canvas = tk.Canvas(self)
self.tasks_frame = tk.Frame(self.tasks_canvas)
self.text_frame = tk.Frame(self)
self.scrollbar = tk.Scrollbar(self.tasks_canvas, orient="vertical", command=self.tasks_canvas.yview)
self.tasks_canvas.configure(yscrollcommand=self.scrollbar.set)
self.title("To-Do App v3")
self.geometry("300x400")
self.task_create = tk.Text(self.text_frame, height=3, bg="white", fg="black")
self.tasks_canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.canvas_frame = self.tasks_canvas.create_window((0, 0), window=self.tasks_frame, anchor="n")
self.task_create.pack(side=tk.BOTTOM, fill=tk.X)
self.text_frame.pack(side=tk.BOTTOM, fill=tk.X)
self.task_create.focus_set()
self.colour_schemes = [{"bg": "lightgrey", "fg": "black"}, {"bg": "grey", "fg": "white"}]
current_tasks = self.load_tasks()
for task in current_tasks:
task_text = task[0]
self.add_task(None, task_text, True)
self.bind("<Return>", self.add_task)
self.bind("<Configure>", self.on_frame_configure)
self.bind_all("<MouseWheel>", self.mouse_scroll)
self.bind_all("<Button-4>", self.mouse_scroll)
self.bind_all("<Button-5>", self.mouse_scroll)
self.tasks_canvas.bind("<Configure>", self.task_width)
def add_task(self, event=None, task_text=None, from_db=False):
if not task_text:
task_text = self.task_create.get(1.0, tk.END).strip()
if len(task_text) > 0:
new_task = tk.Label(self.tasks_frame, text=task_text, pady=10)
self.set_task_colour(len(self.tasks), new_task)
new_task.bind("<Button-1>", self.remove_task)
new_task.pack(side=tk.TOP, fill=tk.X)
self.tasks.append(new_task)
if not from_db:
self.save_task(task_text)
self.task_create.delete(1.0, tk.END)
def remove_task(self, event):
task = event.widget
if msg.askyesno("Really Delete?", "Delete " + task.cget("text") + "?"):
self.tasks.remove(event.widget)
delete_task_query = "DELETE FROM tasks WHERE task=?"
delete_task_data = (task.cget("text"),)
self.runQuery(delete_task_query, delete_task_data)
event.widget.destroy()
self.recolour_tasks()
def recolour_tasks(self):
for index, task in enumerate(self.tasks):
self.set_task_colour(index, task)
def set_task_colour(self, position, task):
_, task_style_choice = divmod(position, 2)
my_scheme_choice = self.colour_schemes[task_style_choice]
task.configure(bg=my_scheme_choice["bg"])
task.configure(fg=my_scheme_choice["fg"])
def on_frame_configure(self, event=None):
self.tasks_canvas.configure(scrollregion=self.tasks_canvas.bbox("all"))
def task_width(self, event):
canvas_width = event.width
self.tasks_canvas.itemconfig(self.canvas_frame, width = canvas_width)
def mouse_scroll(self, event):
if event.delta:
self.tasks_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
else:
if event.num == 5:
move = 1
else:
move = -1
self.tasks_canvas.yview_scroll(move, "units")
def save_task(self, task):
insert_task_query = "INSERT INTO tasks VALUES (?)"
insert_task_data = (task,)
self.runQuery(insert_task_query, insert_task_data)
def load_tasks(self):
load_tasks_query = "SELECT task FROM tasks"
my_tasks = self.runQuery(load_tasks_query, receive=True)
return my_tasks
@staticmethod
def runQuery(sql, data=None, receive=False):
conn = sqlite3.connect("tasks.db")
cursor = conn.cursor()
if data:
cursor.execute(sql, data)
else:
cursor.execute(sql)
if receive:
return cursor.fetchall()
else:
conn.commit()
conn.close()
@staticmethod
def firstTimeDB():
create_tables = "CREATE TABLE tasks (task TEXT)"
Todo.runQuery(create_tables)
default_task_query = "INSERT INTO tasks VALUES (?)"
default_task_data = ("--- Add Items Here ---",)
Todo.runQuery(default_task_query, default_task_data)
if __name__ == "__main__":
if not os.path.isfile("tasks.db"):
Todo.firstTimeDB()
todo = Todo()
todo.mainloop() |
the-stack_0_3353 | import io
from notifypy.cli import entry
import os
import sys
from setuptools import Command, find_packages, setup
# import notifypy
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="notify_py",
version="0.3.2",
author="Mustafa Mohamed",
author_email="[email protected]",
description="Cross-platform desktop notification library for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ms7m/notify-py",
python_requires=">=3.6.0",
packages=find_packages(
exclude=["testing", "*.testing", "*.testing.*", "testing.*", "tests"]
),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["notifypy = notifypy.cli:entry"]},
include_package_data=True,
install_requires=["loguru", "jeepney ; platform_system=='Linux'"],
)
|
the-stack_0_3357 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import unittest
from datetime import timedelta
from unittest import mock
from urllib.parse import quote_plus
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.models.serialized_dag import SerializedDagModel
from airflow.settings import Session
from airflow.utils.timezone import datetime, parse as parse_datetime, utcnow
from airflow.version import version
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_pools
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, os.pardir)
)
class TestBase(unittest.TestCase):
@conf_vars({('api', 'enable_experimental_api'): 'true'})
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
def assert_deprecated(self, resp):
assert 'true' == resp.headers['Deprecation']
assert re.search(
r'\<.+/stable-rest-api/migration.html\>; ' 'rel="deprecation"; type="text/html"',
resp.headers['Link'],
)
class TestApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super().tearDown()
def test_info(self):
url = '/api/experimental/info'
resp_raw = self.client.get(url)
resp = json.loads(resp_raw.data.decode('utf-8'))
assert version == resp['version']
self.assert_deprecated(resp_raw)
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(url_template.format('example_bash_operator', 'runme_0'))
self.assert_deprecated(response)
assert '"email"' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
assert 200 == response.status_code
response = self.client.get(url_template.format('example_bash_operator', 'DNE'))
assert 'error' in response.data.decode('utf-8')
assert 404 == response.status_code
response = self.client.get(url_template.format('DNE', 'DNE'))
assert 'error' in response.data.decode('utf-8')
assert 404 == response.status_code
def test_get_dag_code(self):
url_template = '/api/experimental/dags/{}/code'
response = self.client.get(url_template.format('example_bash_operator'))
self.assert_deprecated(response)
assert 'BashOperator(' in response.data.decode('utf-8')
assert 200 == response.status_code
response = self.client.get(url_template.format('xyz'))
assert 404 == response.status_code
def test_dag_paused(self):
pause_url_template = '/api/experimental/dags/{}/paused/{}'
paused_url_template = '/api/experimental/dags/{}/paused'
paused_url = paused_url_template.format('example_bash_operator')
response = self.client.get(pause_url_template.format('example_bash_operator', 'true'))
self.assert_deprecated(response)
assert 'ok' in response.data.decode('utf-8')
assert 200 == response.status_code
paused_response = self.client.get(paused_url)
assert 200 == paused_response.status_code
assert {"is_paused": True} == paused_response.json
response = self.client.get(pause_url_template.format('example_bash_operator', 'false'))
assert 'ok' in response.data.decode('utf-8')
assert 200 == response.status_code
paused_response = self.client.get(paused_url)
assert 200 == paused_response.status_code
assert {"is_paused": False} == paused_response.json
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
run_id = 'my_run' + utcnow().isoformat()
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': run_id}),
content_type="application/json",
)
self.assert_deprecated(response)
assert 200 == response.status_code
response_execution_date = parse_datetime(json.loads(response.data.decode('utf-8'))['execution_date'])
assert 0 == response_execution_date.microsecond
# Check execution_date is correct
response = json.loads(response.data.decode('utf-8'))
dagbag = DagBag()
dag = dagbag.get_dag('example_bash_operator')
dag_run = dag.get_dagrun(response_execution_date)
dag_run_id = dag_run.run_id
assert run_id == dag_run_id
assert dag_run_id == response['run_id']
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'), data=json.dumps({}), content_type="application/json"
)
assert 404 == response.status_code
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
execution_date = utcnow() + timedelta(hours=1)
datetime_string = execution_date.isoformat()
# Test correct execution with execution date
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json",
)
self.assert_deprecated(response)
assert 200 == response.status_code
assert datetime_string == json.loads(response.data.decode('utf-8'))['execution_date']
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
assert dag_run, f'Dag Run not found for execution date {execution_date}'
# Test correct execution with execution date and microseconds replaced
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string, 'replace_microseconds': 'true'}),
content_type="application/json",
)
assert 200 == response.status_code
response_execution_date = parse_datetime(json.loads(response.data.decode('utf-8'))['execution_date'])
assert 0 == response_execution_date.microsecond
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(response_execution_date)
assert dag_run, f'Dag Run not found for execution date {execution_date}'
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json",
)
assert 404 == response.status_code
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json",
)
assert 400 == response.status_code
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id, run_id='test_task_instance_info_run', execution_date=execution_date)
# Test Correct execution
response = self.client.get(url_template.format(dag_id, datetime_string, task_id))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'state' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string, task_id),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent task
response = self.client.get(url_template.format(dag_id, datetime_string, 'does_not_exist_task'))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string, task_id))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime', task_id))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
def test_dagrun_status(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id, run_id='test_task_instance_info_run', execution_date=execution_date)
# Test Correct execution
response = self.client.get(url_template.format(dag_id, datetime_string))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'state' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime'))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
class TestLineageApiExperimental(TestBase):
PAPERMILL_EXAMPLE_DAGS = os.path.join(ROOT_FOLDER, "airflow", "providers", "papermill", "example_dags")
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=False, dag_folder=cls.PAPERMILL_EXAMPLE_DAGS)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
@mock.patch("airflow.settings.DAGS_FOLDER", PAPERMILL_EXAMPLE_DAGS)
def test_lineage_info(self):
url_template = '/api/experimental/lineage/{}/{}'
dag_id = 'example_papermill_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# create DagRun
trigger_dag(dag_id=dag_id, run_id='test_lineage_info_run', execution_date=execution_date)
# test correct execution
response = self.client.get(url_template.format(dag_id, datetime_string))
self.assert_deprecated(response)
assert 200 == response.status_code
assert 'task_ids' in response.data.decode('utf-8')
assert 'error' not in response.data.decode('utf-8')
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(url_template.format(dag_id, wrong_datetime_string))
assert 404 == response.status_code
assert 'error' in response.data.decode('utf-8')
# Test error for bad datetime format
response = self.client.get(url_template.format(dag_id, 'not_a_datetime'))
assert 400 == response.status_code
assert 'error' in response.data.decode('utf-8')
class TestPoolApiExperimental(TestBase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
super().setUp()
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = f'experimental_{i + 1}'
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[-1]
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
assert response.status_code == 200
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
f'/api/experimental/pools/{self.pool.pool}',
)
self.assert_deprecated(response)
assert response.status_code == 200
assert json.loads(response.data.decode('utf-8')) == self.pool.to_json()
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
assert response.status_code == 404
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool 'foo' doesn't exist"
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assert_deprecated(response)
assert response.status_code == 200
pools = json.loads(response.data.decode('utf-8'))
assert len(pools) == self.TOTAL_POOL_COUNT
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
assert pool == self.pools[i].to_json()
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps(
{
'name': 'foo',
'slots': 1,
'description': '',
}
),
content_type='application/json',
)
self.assert_deprecated(response)
assert response.status_code == 200
pool = json.loads(response.data.decode('utf-8'))
assert pool['pool'] == 'foo'
assert pool['slots'] == 1
assert pool['description'] == ''
assert self._get_pool_count() == self.TOTAL_POOL_COUNT + 1
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps(
{
'name': name,
'slots': 1,
'description': '',
}
),
content_type='application/json',
)
assert response.status_code == 400
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool name shouldn't be empty"
assert self._get_pool_count() == self.TOTAL_POOL_COUNT
def test_delete_pool(self):
response = self.client.delete(
f'/api/experimental/pools/{self.pool.pool}',
)
self.assert_deprecated(response)
assert response.status_code == 200
assert json.loads(response.data.decode('utf-8')) == self.pool.to_json()
assert self._get_pool_count() == self.TOTAL_POOL_COUNT - 1
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
assert response.status_code == 404
assert json.loads(response.data.decode('utf-8'))['error'] == "Pool 'foo' doesn't exist"
def test_delete_default_pool(self):
clear_db_pools()
response = self.client.delete(
'/api/experimental/pools/default_pool',
)
assert response.status_code == 400
assert json.loads(response.data.decode('utf-8'))['error'] == "default_pool cannot be deleted"
|
the-stack_0_3358 | # Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" Feature module
Provides:
o Feature - class to wrap Bio.SeqFeature objects with drawing information
For drawing capabilities, this module uses reportlab to define colors:
http://www.reportlab.com
For dealing with biological information, the package uses BioPython:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.lib import colors
# GenomeDiagram imports
from ._Colors import ColorTranslator
class Feature(object):
""" Class to wrap Bio.SeqFeature objects for GenomeDiagram
Provides:
Methods:
o __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen) Called when the feature is
instantiated
o set_feature(self, feature) Wrap the passed feature
o get_feature(self) Return the unwrapped Bio.SeqFeature object
o set_color(self, color) Set the color in which the feature will
be drawn (accepts multiple formats: reportlab color.Color()
tuple and color.name, or integer representing Artemis color
o get_color(self) Returns color.Color tuple of the feature's color
o __getattr__(self, name) Catches attribute requests and passes them to
the wrapped Bio.SeqFeature object
Attributes:
o parent FeatureSet, container for the object
o id Unique id
o color color.Color, color to draw the feature
o hide Boolean for whether the feature will be drawn or not
o sigil String denoting the type of sigil to use for the feature.
Currently either "BOX" or "ARROW" are supported.
o arrowhead_length Float denoting length of the arrow head to be drawn,
relative to the bounding box height. The arrow shaft
takes up the remainder of the bounding box's length.
o arrowshaft_height Float denoting length of the representative arrow
shaft to be drawn, relative to the bounding box height.
The arrow head takes the full height of the bound box.
o name_qualifiers List of Strings, describes the qualifiers that may
contain feature names in the wrapped Bio.SeqFeature object
o label Boolean, 1 if the label should be shown
o label_font String describing the font to use for the feature label
o label_size Int describing the feature label font size
o label_color color.Color describing the feature label color
o label_angle Float describing the angle through which to rotate the
feature label in degrees (default = 45, linear only)
o label_position String, 'start', 'end' or 'middle' denoting where
to place the feature label. Leave as None for the default
which is 'start' for linear diagrams, and at the bottom of
the feature as drawn on circular diagrams.
o label_strand Integer -1 or +1 to explicitly place the label on the
forward or reverse strand. Default (None) follows th
feature's strand. Use -1 to put labels under (linear) or
inside (circular) the track, +1 to put them above (linear)
or outside (circular) the track.
o locations List of tuples of (start, end) ints describing where the
feature and any subfeatures start and end
o type String denoting the feature type
o name String denoting the feature name
o strand Int describing the strand on which the feature is found
"""
def __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0, border=None, colour=None):
""" __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0)
o parent FeatureSet containing the feature
o feature_id Unique id for the feature
o feature Bio.SeqFeature object to be wrapped
o color color.Color Color to draw the feature (overridden
by backwards compatible argument with UK spelling,
colour). Either argument is overridden if 'color'
is found in feature qualifiers
o border color.Color Color to draw the feature border, use
None for the same as the fill color, False for no border.
o label Boolean, 1 if the label should be shown
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
self._colortranslator = ColorTranslator()
# Initialise attributes
self.parent = parent
self.id = feature_id
self.color = color # default color to draw the feature
self.border = border
self._feature = None # Bio.SeqFeature object to wrap
self.hide = 0 # show by default
self.sigil = 'BOX'
self.arrowhead_length = 0.5 # 50% of the box height
self.arrowshaft_height = 0.4 # 40% of the box height
self.name_qualifiers = ['gene', 'label', 'name', 'locus_tag', 'product']
self.label = label
self.label_font = 'Helvetica'
self.label_size = 6
self.label_color = colors.black
self.label_angle = 45
self.label_position = None #Expect 'start', 'middle', or 'end' (plus aliases)
self.label_strand = None #Expect +1 or -1 if overriding this
if feature is not None:
self.set_feature(feature)
def set_feature(self, feature):
""" set_feature(self, feature)
o feature Bio.SeqFeature object to be wrapped
Defines the Bio.SeqFeature object to be wrapped
"""
self._feature = feature
self.__process_feature()
def __process_feature(self):
""" __process_feature(self)
Examine the feature to be wrapped, and set some of the Feature's
properties accordingly
"""
self.locations = []
bounds = []
#This will be a list of length one for simple FeatureLocation:
for location in self._feature.location.parts:
start = location.nofuzzy_start
end = location.nofuzzy_end
#if start > end and self.strand == -1:
# start, end = end, start
self.locations.append((start, end))
bounds += [start, end]
self.type = str(self._feature.type) # Feature type
#TODO - Strand can vary with subfeatures (e.g. mixed strand tRNA)
if self._feature.strand is None:
#This is the SeqFeature default (None), but the drawing code
#only expects 0, +1 or -1.
self.strand = 0
else:
self.strand = int(self._feature.strand) # Feature strand
if 'color' in self._feature.qualifiers: # Artemis color (if present)
self.color = self._colortranslator.artemis_color(
self._feature.qualifiers['color'][0])
self.name = self.type
for qualifier in self.name_qualifiers:
if qualifier in self._feature.qualifiers:
self.name = self._feature.qualifiers[qualifier][0]
break
#Note will be 0 to N for origin wrapping feature on genome of length N
self.start, self.end = min(bounds), max(bounds)
def get_feature(self):
""" get_feature(self) -> Bio.SeqFeature
Returns the unwrapped Bio.SeqFeature object
"""
return self._feature
def set_colour(self, colour):
"""Backwards compatible variant of set_color(self, color) using UK spelling."""
color = self._colortranslator.translate(colour)
self.color = color
def set_color(self, color):
""" set_color(self, color)
o color The color to draw the feature - either a colors.Color
object, an RGB tuple of floats, or an integer
corresponding to colors in colors.txt
Set the color in which the feature will be drawn
"""
#TODO - Make this into the set method for a color property?
color = self._colortranslator.translate(color)
self.color = color
def __getattr__(self, name):
""" __getattr__(self, name) -> various
If the Feature class doesn't have the attribute called for,
check in self._feature for it
"""
return getattr(self._feature, name) # try to get the attribute from the feature
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
# Test code
gdf = Feature()
|
the-stack_0_3360 | # -*- coding: utf-8 -*-
"""
Setup
-----
Install troposphere in the current python environment.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# ---- Future
from __future__ import print_function
from __future__ import with_statement
# ---- System
import os
from setuptools import setup
# ----------------------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------------------
def file_contents(file_name):
"""Given a file name to a valid file returns the file object."""
curr_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curr_dir, file_name)) as the_file:
contents = the_file.read()
return contents
def get_version():
curr_dir = os.path.abspath(os.path.dirname(__file__))
with open(curr_dir + "/troposphere/__init__.py", "r") as init_version:
for line in init_version:
if "__version__" in line:
return str(line.split("=")[-1].strip(" ")[1:-2])
# ----------------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------------
setup(
name='troposphere',
version=get_version(),
description="AWS CloudFormation creation library",
long_description=file_contents("README.rst"),
long_description_content_type='text/x-rst',
author="Mark Peek",
author_email="[email protected]",
license="New BSD license",
url="https://github.com/cloudtools/troposphere",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 2.7",
],
packages=[
'troposphere',
'troposphere.openstack',
'troposphere.helpers'
],
scripts=[
'scripts/cfn',
'scripts/cfn2py'
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=file_contents("requirements.txt"),
test_suite="tests",
tests_require=["awacs>=0.8"],
extras_require={'policy': ['awacs>=0.8']},
use_2to3=True,
)
|
the-stack_0_3362 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
|
the-stack_0_3364 | # Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for constructing/calculating the means, variances and covariances of
Gaussian states.
"""
from itertools import product
from scipy.special import factorial
import numpy as np
from .._hafnian import hafnian, reduction
from .._torontonian import threshold_detection_prob
from .conversions import (
reduced_gaussian,
Qmat,
Xmat,
complex_to_real_displacements
)
def photon_number_mean(mu, cov, j, hbar=2):
r""" Calculate the mean photon number of mode j of a Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the mean photon number in mode :math:`j`.
"""
num_modes = len(mu) // 2
return (
mu[j] ** 2
+ mu[j + num_modes] ** 2
+ cov[j, j]
+ cov[j + num_modes, j + num_modes]
- hbar
) / (2 * hbar)
def photon_number_mean_vector(mu, cov, hbar=2):
r""" Calculate the mean photon number of each of the modes in a Gaussian state
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the vector of means of the photon number distribution
"""
N = len(mu) // 2
return np.array([photon_number_mean(mu, cov, j, hbar=hbar) for j in range(N)])
def photon_number_covar(mu, cov, j, k, hbar=2):
r""" Calculate the variance/covariance of the photon number distribution
of a Gaussian state.
Implements the covariance matrix of the photon number distribution of a
Gaussian state according to the Last two eq. of Part II. in
`'Multidimensional Hermite polynomials and photon distribution for polymode
mixed light', Dodonov et al. <https://journals.aps.org/pra/abstract/10.1103/PhysRevA.50.813>`_
.. math::
\sigma_{n_j n_j} &= \frac{1}{2}\left(T_j^2 - 2d_j - \frac{1}{2}\right)
+ \left<\mathbf{Q}_j\right>\mathcal{M}_j\left<\mathbf{Q}_j\right>, \\
\sigma_{n_j n_k} &= \frac{1}{2}\mathrm{Tr}\left(\Lambda_j \mathbf{M} \Lambda_k \mathbf{M}\right)
+ \left<\mathbf{Q}\right>\Lambda_j \mathbf{M} \Lambda_k\left<\mathbf{Q}\right>,
where :math:`T_j` and :math:`d_j` are the trace and the determinant of
:math:`2 \times 2` matrix :math:`\mathcal{M}_j` whose elements coincide
with the nonzero elements of matrix :math:`\mathbf{M}_j = \Lambda_j \mathbf{M} \Lambda_k`
while the two-vector :math:`\mathbf{Q}_j` has the components :math:`(q_j, p_j)`.
:math:`2N \times 2N` projector matrix :math:`\Lambda_j` has only two nonzero
elements: :math:`\left(\Lambda_j\right)_{jj} = \left(\Lambda_j\right)_{j+N,j+N} = 1`.
Note that the convention for ``mu`` used here differs from the one used in Dodonov et al.,
They both provide the same results in this particular case.
Also note that the original reference of Dodonov et al. has an incorrect prefactor of 1/2
in the last terms of the last equation above.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
k (int): the k :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the covariance for the photon numbers at modes :math:`j` and :math:`k`.
"""
if j == k:
mu, cov = reduced_gaussian(mu, cov, [j])
term_1 = 0.5 * np.trace(cov) ** 2 - np.linalg.det(cov)
term_2 = mu @ cov @ mu
return ((term_1 + term_2) / hbar ** 2) - 0.25
mu, cov = reduced_gaussian(mu, cov, [j, k])
term_1 = cov[0, 1] ** 2 + cov[0, 3] ** 2 + cov[2, 1] ** 2 + cov[2, 3] ** 2
term_2 = (
cov[0, 1] * mu[0] * mu[1]
+ cov[2, 1] * mu[1] * mu[2]
+ cov[0, 3] * mu[0] * mu[3]
+ cov[2, 3] * mu[2] * mu[3]
)
return (term_1 + 2 * term_2) / (2 * hbar ** 2)
def photon_number_covmat(mu, cov, hbar=2):
r""" Calculate the covariance matrix of the photon number distribution of a
Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the covariance matrix of the photon number distribution
"""
N = len(mu) // 2
pnd_cov = np.zeros((N, N))
for i in range(N):
for j in range(i+1):
pnd_cov[i][j] = photon_number_covar(mu, cov, i, j, hbar=hbar)
pnd_cov[j][i] = pnd_cov[i][j]
return pnd_cov
def photon_number_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the product of the number operator of the modes in a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the product of the number operators of the modes.
"""
n, _ = cov.shape
n_modes = n // 2
rpt = np.zeros([n], dtype=int)
for i in modes:
rpt[i] = 1
rpt[i + n_modes] = 1
return normal_ordered_expectation(mu, cov, rpt, hbar=hbar)
def photon_number_squared_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the square of the product of the number operator of the modes in
a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the square of the product of the number operator of the modes.
"""
n_modes = len(modes)
mu_red, cov_red = reduced_gaussian(mu, cov, modes)
result = 0
for item in product([1, 2], repeat=n_modes):
rpt = item + item
term = normal_ordered_expectation(mu_red, cov_red, rpt, hbar=hbar)
result += term
return result
def normal_ordered_expectation(mu, cov, rpt, hbar=2):
r"""Calculates the expectation value of the normal ordered product
:math:`\prod_{i=0}^{N-1} a_i^{\dagger n_i} \prod_{j=0}^{N-1} a_j^{m_j}` with respect to an N-mode Gaussian state,
where :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the normal ordered product of operators
"""
return s_ordered_expectation(mu, cov, rpt, hbar, s=1)
def s_ordered_expectation(mu, cov, rpt, hbar=2, s=0):
r"""Calculates the expectation value of the s-ordered product
obtained by taking deirvatives of the characteristic function of a Gaussian states,
Here, :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
indicates how many derivatives are taken with respect to the complex argument and its
conjugate.
The values :math:`s=\{1,0,-1\}` correspond respectively to normal, symmetric and antinormal order.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
s (float): value setting the ordering it must be between -1 and 1.
Returns:
(float): expectation value of the normal ordered product of operators
"""
# The following seven lines are written so that we remove from the calculation the
# modes k that we don't care about. These modes have rpt[k] = rpt[k+M] = 0
if np.allclose(rpt, 0):
return 1.0
M = len(cov) // 2
modes = np.where(np.array(rpt[0:M]) + np.array(rpt[M : 2 * M]) != 0)[0]
mu, cov = reduced_gaussian(mu, cov, list(modes))
ind = list(modes) + list(modes + M)
rpt = list(np.array(rpt)[np.array(ind)])
alpha = complex_to_real_displacements(mu, hbar=hbar)
n = len(cov)
V = (Qmat(cov, hbar=hbar) - 0.5 * (s + 1) * np.identity(n)) @ Xmat(n // 2)
A = reduction(V, rpt)
if np.allclose(mu, 0):
return hafnian(A)
np.fill_diagonal(A, reduction(np.conj(alpha), rpt))
return hafnian(A, loop=True)
def mean_clicks(cov, hbar=2):
r""" Calculates the total mean number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: mean number of clicks
"""
n, _ = cov.shape
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
meanc = 1.0 * nmodes
for i in range(nmodes):
det_val = np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
meanc -= 1.0 / np.sqrt(det_val)
return meanc
def variance_clicks(cov, hbar=2):
r""" Calculates the variance of the total number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: variance in the total number of clicks
"""
n, _ = cov.shape
means = np.zeros([n])
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
vac_probs = np.array(
[
np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
for i in range(nmodes)
]
)
vac_probs = np.sqrt(vac_probs)
vac_probs = 1 / vac_probs
term1 = np.sum(vac_probs * (1 - vac_probs))
term2 = 0
for i in range(nmodes):
for j in range(i):
_, Qij = reduced_gaussian(means, Q, [i, j])
prob_vac_ij = np.linalg.det(Qij).real
prob_vac_ij = 1.0 / np.sqrt(prob_vac_ij)
term2 += prob_vac_ij - vac_probs[i] * vac_probs[j]
return term1 + 2 * term2
def _coeff_normal_ordered(m, k):
r"""Returns the coefficients giving the expansion of a photon number power in terms of normal ordered power of creation
and annihilation operators. The coefficient is given by :math:`\sum_{\mu=0}^k \frac{(-1)^{k-\mu} \mu^m}{\mu!(k-\mu)!}`.
Args:
m (int): power of the photon number operator, :math:`(a^\dagger a)^m `.
k (int): power of the normal ordered term, :math:`a^{\dagger i} a^i`.
Returns:
(float): expansion coefficient
"""
return sum(
[
(1 / (factorial(mu) * factorial(k - mu)))
* ((-1) ** (k - mu) * (mu ** m))
for mu in range(0, k + 1)
]
)
def photon_number_moment(mu, cov, indices, hbar=2):
r"""Calculates the expectation value of product of powers of photon number operators of a Gaussian state.
The powers are specified by a dictionary with modes as keys and powers as values.
The calculation is performed by first writing any power of the photon number as
:math:`(a^\dagger a)^m = \sum_{k=1}^m c_k a^{\dagger k} a^k`
where the coefficients :math:`c_i` are provided by the function `_coeff_normal_ordered`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
indices (dictionary): specification of the different modes and their power of their photon number
hbar (float): value of hbar in the uncertainty relation.
Returns:
float: the expectation value of the photon number powers.
"""
N = len(cov) // 2
list_indices = [indices[key] for key in indices]
modes = list(indices)
# Find the expansion coefficients of all the different powers
expansion_coeff = [
[_coeff_normal_ordered(indices[key], i) for i in range(1, 1 + indices[key])]
for key in indices
]
values = [list(range(i)) for i in list_indices]
net_sum = 0.0
# Construct the product of each possible term appearing in the normal ordered expansion
for item in product(*values):
rpt = [0] * N
for i, key in enumerate(modes):
rpt[key] = item[i] + 1
rpt = rpt + rpt
prod_coeff = np.prod([expansion_coeff[i][coeff] for i, coeff in enumerate(item)])
net_sum += prod_coeff * s_ordered_expectation(mu, cov, rpt, s=1, hbar=hbar)
return np.real_if_close(net_sum)
def partition(collection):
"""Generate all set partitions of a collection.
Taken from: https://stackoverflow.com/a/30134039
Args:
collection (sequence): set to find partitions of
Yields:
list[list]: set partition of collection
"""
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in partition(collection[1:]):
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n+1:]
yield [[first]] + smaller
def _list_to_freq_dict(words):
"""Convert between a list which of "words" and a dictionary
which shows how many times each word appears in word
Args:
words (list): list of words
Returns:
dict : how many times a word appears. key is word, value is multiplicity
"""
return {i : words.count(i) for i in set(words)}
def photon_number_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the photon-number cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes. Note that it can have repetitions.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
indices = _list_to_freq_dict(B)
term *= photon_number_moment(mu, cov, indices, hbar=hbar)
kappa += term
return kappa
def click_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the click cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
B = list(set(B)) # remove repetitions
pattern = np.ones_like(B)
mu_red, cov_red = reduced_gaussian(mu, cov, B)
summand = threshold_detection_prob(mu_red, cov_red, pattern, hbar=hbar)
term *= summand
kappa += term
return kappa
|
the-stack_0_3365 | from __future__ import absolute_import
try:
import holoviews as hv
except ImportError:
hv = None
import pytest
from bokeh.plotting import figure
from panel.layout import Row
from panel.links import Link
from panel.pane import Bokeh, HoloViews
from panel.widgets import FloatSlider, RangeSlider, ColorPicker, TextInput, DatetimeInput
from panel.tests.util import hv_available
def test_widget_link_bidirectional():
t1 = TextInput()
t2 = TextInput()
t1.link(t2, value='value', bidirectional=True)
t1.value = 'ABC'
assert t1.value == 'ABC'
assert t2.value == 'ABC'
t2.value = 'DEF'
assert t1.value == 'DEF'
assert t2.value == 'DEF'
def test_widget_jslink_bidirectional(document, comm):
t1 = TextInput()
t2 = TextInput()
t1.jslink(t2, value='value', bidirectional=True)
row = Row(t1, t2)
model = row.get_root(document, comm)
tm1, tm2 = model.children
link1_customjs = tm1.js_property_callbacks['change:value'][-1]
link2_customjs = tm2.js_property_callbacks['change:value'][-1]
assert link1_customjs.args['source'] is tm1
assert link2_customjs.args['source'] is tm2
assert link1_customjs.args['target'] is tm2
assert link2_customjs.args['target'] is tm1
def test_widget_link_source_param_not_found():
t1 = TextInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value1='value')
assert "Could not jslink \'value1\' parameter" in str(excinfo)
def test_widget_link_target_param_not_found():
t1 = TextInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value='value1')
assert "Could not jslink \'value1\' parameter" in str(excinfo)
def test_widget_link_no_transform_error():
t1 = DatetimeInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t1.jslink(t2, value='value')
assert "Cannot jslink \'value\' parameter on DatetimeInput object" in str(excinfo)
def test_widget_link_no_target_transform_error():
t1 = DatetimeInput()
t2 = TextInput()
with pytest.raises(ValueError) as excinfo:
t2.jslink(t1, value='value')
assert ("Cannot jslink \'value\' parameter on TextInput object "
"to \'value\' parameter on DatetimeInput object") in str(excinfo)
@hv_available
def test_pnwidget_hvplot_links(document, comm):
size_widget = FloatSlider(value=5, start=1, end=10)
points1 = hv.Points([1, 2, 3])
size_widget.jslink(points1, value='glyph.size')
row = Row(points1, size_widget)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
widg_views = row.select(FloatSlider)
assert len(hv_views) == 1
assert len(widg_views) == 1
slider = widg_views[0]._models[model.ref['id']][0]
scatter = hv_views[0]._plots[model.ref['id']][0].handles['glyph']
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
@hv_available
def test_bkwidget_hvplot_links(document, comm):
from bokeh.models import Slider
bokeh_widget = Slider(value=5, start=1, end=10, step=1e-1)
points1 = hv.Points([1, 2, 3])
Link(bokeh_widget, points1, properties={'value': 'glyph.size'})
row = Row(points1, bokeh_widget)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
assert len(hv_views) == 1
slider = bokeh_widget
scatter = hv_views[0]._plots[model.ref['id']][0].handles['glyph']
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_bkwidget_bkplot_links(document, comm):
from bokeh.models import Slider
bokeh_widget = Slider(value=5, start=1, end=10, step=1e-1)
bokeh_fig = figure()
scatter = bokeh_fig.scatter([1, 2, 3], [1, 2, 3])
Link(bokeh_widget, scatter, properties={'value': 'glyph.size'})
row = Row(bokeh_fig, bokeh_widget)
row.get_root(document, comm=comm)
slider = bokeh_widget
link_customjs = slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is slider
assert link_customjs.args['target'] is scatter.glyph
code = """
var value = source['value'];
value = value;
value = value;
try {
var property = target.properties['size'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set size on target, raised error: ' + err);
return;
}
try {
target['size'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_widget_bkplot_link(document, comm):
widget = ColorPicker(value='#ff00ff')
bokeh_fig = figure()
scatter = bokeh_fig.scatter([1, 2, 3], [1, 2, 3])
widget.jslink(scatter.glyph, value='fill_color')
row = Row(bokeh_fig, widget)
model = row.get_root(document, comm=comm)
link_customjs = model.children[1].js_property_callbacks['change:color'][-1]
assert link_customjs.args['source'] is model.children[1]
assert link_customjs.args['target'] is scatter.glyph
assert scatter.glyph.fill_color == '#ff00ff'
code = """
var value = source['color'];
value = value;
value = value;
try {
var property = target.properties['fill_color'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set fill_color on target, raised error: ' + err);
return;
}
try {
target['fill_color'] = value;
} catch(err) {
console.log(err)
}
"""
assert link_customjs.code == code
def test_bokeh_figure_jslink(document, comm):
fig = figure()
pane = Bokeh(fig)
t1 = TextInput()
pane.jslink(t1, **{'x_range.start': 'value'})
row = Row(pane, t1)
model = row.get_root(document, comm)
link_customjs = fig.x_range.js_property_callbacks['change:start'][-1]
assert link_customjs.args['source'] == fig.x_range
assert link_customjs.args['target'] == model.children[1]
assert link_customjs.code == """
var value = source['start'];
value = value;
value = value;
try {
var property = target.properties['value'];
if (property !== undefined) { property.validate(value); }
} catch(err) {
console.log('WARNING: Could not set value on target, raised error: ' + err);
return;
}
try {
target['value'] = value;
} catch(err) {
console.log(err)
}
"""
def test_widget_jscallback(document, comm):
widget = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code')
model = widget.get_root(document, comm=comm)
customjs = model.js_property_callbacks['change:color'][-1]
assert customjs.args['source'] is model
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
def test_widget_jscallback_args_scalar(document, comm):
widget = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code', args={'scalar': 1})
model = widget.get_root(document, comm=comm)
customjs = model.js_property_callbacks['change:color'][-1]
assert customjs.args['scalar'] == 1
def test_widget_jscallback_args_model(document, comm):
widget = ColorPicker(value='#ff00ff')
widget2 = ColorPicker(value='#ff00ff')
widget.jscallback(value='some_code', args={'widget': widget2})
model = Row(widget, widget2).get_root(document, comm=comm)
customjs = model.children[0].js_property_callbacks['change:color'][-1]
assert customjs.args['source'] is model.children[0]
assert customjs.args['widget'] is model.children[1]
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
@hv_available
def test_hvplot_jscallback(document, comm):
points1 = hv.Points([1, 2, 3])
hvplot = HoloViews(points1)
hvplot.jscallback(**{'x_range.start': "some_code"})
model = hvplot.get_root(document, comm=comm)
x_range = hvplot._plots[model.ref['id']][0].handles['x_range']
customjs = x_range.js_property_callbacks['change:start'][-1]
assert customjs.args['source'] is x_range
assert customjs.code == "try { some_code } catch(err) { console.log(err) }"
@hv_available
def test_link_with_customcode(document, comm):
range_widget = RangeSlider(start=0., end=1.)
curve = hv.Curve([])
code = """
x_range.start = source.value[0]
x_range.end = source.value[1]
"""
range_widget.jslink(curve, code={'value': code})
row = Row(curve, range_widget)
range_widget.value = (0.5, 0.7)
model = row.get_root(document, comm=comm)
hv_views = row.select(HoloViews)
widg_views = row.select(RangeSlider)
assert len(hv_views) == 1
assert len(widg_views) == 1
range_slider = widg_views[0]._models[model.ref['id']][0]
x_range = hv_views[0]._plots[model.ref['id']][0].handles['x_range']
link_customjs = range_slider.js_property_callbacks['change:value'][-1]
assert link_customjs.args['source'] is range_slider
assert link_customjs.args['x_range'] is x_range
assert link_customjs.code == "try { %s } catch(err) { console.log(err) }" % code
|
the-stack_0_3366 |
import uuid
from datetime import datetime
from flasgger import swag_from
from flask import Blueprint, jsonify, request
from cloudinary.uploader import upload
from src.models import Author, Book, UserProfile, db
from src.google import get_user_info
from src.constants.http_status_codes import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_500_INTERNAL_SERVER_ERROR, HTTP_404_NOT_FOUND
book_bp = Blueprint('book', __name__, url_prefix='/api')
@book_bp.post('/books')
@swag_from('../docs/book/create.yml')
def create_book():
file = request.files['image']
# Author
author_first_name = request.form['author_first_name']
author_last_name = request.form['author_last_name']
# Book
title = request.form['title']
isbn = request.form['isbn']
language = request.form['language']
year_of_publication = request.form['year_of_publication']
category = request.form['category']
owner_id = request.form['owner_id']
if not title:
return jsonify({
'error': "book title is required"
}), HTTP_400_BAD_REQUEST
if not language:
return jsonify({
'error': "book language is required"
}), HTTP_400_BAD_REQUEST
if not owner_id:
return jsonify({
'error': "book owner is required"
}), HTTP_400_BAD_REQUEST
owner = UserProfile.query.filter_by(id=owner_id).first()
if not owner:
return jsonify({
'error':f"user with id {owner_id} not found"
}), HTTP_404_NOT_FOUND
if not year_of_publication:
return jsonify({
'error': "year of publication is required"
}), HTTP_400_BAD_REQUEST
if not (author_first_name and author_last_name):
return jsonify({
'error': "author's first and last name is required"
}), HTTP_400_BAD_REQUEST
try:
# Upload image to cloudinary server
cloudinary_response = upload(file, folder="bookie-books")
except Exception as ex:
return({'error':"error uploading image to cloudinary"}, HTTP_500_INTERNAL_SERVER_ERROR)
if not cloudinary_response:
return jsonify({
'error': "error uploading image"
}), HTTP_400_BAD_REQUEST
author = Author(
id=uuid.uuid4(),
first_name=author_first_name,
last_name=author_last_name)
book = Book(
id=uuid.uuid4(),
name=title,
isbn=isbn,
language=language,
year_of_publication=year_of_publication,
category=category,
author_id=author.id,
owner_id=uuid.UUID(owner.id),
image_url = cloudinary_response['secure_url'], # from cloudinary response after successful upload
cld_asset_id=cloudinary_response['asset_id'],
cld_public_id=cloudinary_response['public_id'],
is_available=True,
created_at=datetime.now(),
borrowed=False # Not borrowed on creation
)
db.session.add(author)
db.session.add(book)
db.session.commit()
return {'message':"book created"}, HTTP_201_CREATED
|
the-stack_0_3368 | import argparse
import optparse
import sys
import turtle
from turtle import *
import numpy as np
parser = optparse.OptionParser(description='paint')
parser.add_option('--name', type=str, default='circle',
help='file name')
parser.add_option('--start_length', type=int, default=0, help='number of forwards')
parser.add_option('--end_length', type=int, default=120, help='number of forwards')
parser.add_option('--n_edges', type=int, default=6, help='number_of_edges')
parser.add_option('--pattern', type=int, default=-2, help='index of pattern, e.g. -2, -1, 0, 1, 2, ...')
parser.add_option('--color', type=str, default='monocolor', help='color, e.g. monocolor, red, ...')
parser.add_option('--n_circle', type=int, default=10, help='number of circle')
(opts, args) = parser.parse_args()
argvs = sys.argv
if opts.color == 'colorful':
colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
else:
colors = [opts.color] * 6
speed(0)
# bgcolor("Black")
def rotation(start_length, end_length, n_edges, pattern, color):
n = 0
c = 0
colormode(255)
for count in range(start_length, end_length):
# decide color
if color == 'monocolor':
c += int(255 / end_length)
# pencolor(np.uint8(-c), np.uint8(-c), np.uint8(-c))
pencolor(np.uint8(c), np.uint8(c), np.uint8(c))
else:
# you can change color as you like here
start = 255
c += int(start / end_length)
# pencolor(np.uint8(start - c), np.uint8(start - c), np.uint8(c))
# pencolor(np.uint8(c), np.uint8(start - c), np.uint8(start - c))
pencolor(np.uint8(c), np.uint8(0), np.uint8(0))
for i in range(n_edges):
if color == 'colorful':
pencolor(colors[i % 6])
forward(count)
left(int(360 / n_edges) + pattern)
n += 1
left(3)
print(count)
window = turtle.Screen()
window.setup(width=600, height=600, startx=10, starty=0.5)
position_list = np.random.randint(-300, 300, size=(opts.n_circle, 2))
np.random.randint(10) * 10
for ii in range(opts.n_circle):
penup()
goto(position_list[ii, 0], position_list[ii, 1])
pendown()
rotation(np.random.randint(10) * 4, np.random.randint(10, 30) * 4, np.random.randint(4, 5), np.random.choice([-1, 1]) * np.random.randint(0, 3), opts.color)
hideturtle()
ts = getscreen()
bgcolor("Black")
ts.getcanvas().postscript(file=f"{opts.name}_{opts.end_length}_{opts.n_edges}_{opts.pattern}_{opts.color}.eps")
print('end')
exit()
|
the-stack_0_3369 | # Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROS2 Tesla driver."""
import os
import cv2
import numpy as np
import rclpy
from sensor_msgs.msg import Image
from ackermann_msgs.msg import AckermannDrive
from rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy
from rclpy.node import Node
CONTROL_COEFFICIENT = 0.0005
class LaneFollower(Node):
def __init__(self):
super().__init__('lane_follower')
# ROS interface
self.__ackermann_publisher = self.create_publisher(AckermannDrive, 'cmd_ackermann', 1)
qos_camera_data = qos_profile_sensor_data
# In case ROS_DISTRO is Rolling or Galactic the QoSReliabilityPolicy is strict.
if ('ROS_DISTRO' in os.environ and (os.environ['ROS_DISTRO'] == 'rolling' or os.environ['ROS_DISTRO'] == 'galactic')):
qos_camera_data.reliability = QoSReliabilityPolicy.RELIABLE
self.create_subscription(Image, 'vehicle/camera', self.__on_camera_image, qos_camera_data)
def __on_camera_image(self, message):
img = message.data
img = np.frombuffer(img, dtype=np.uint8).reshape((message.height, message.width, 4))
img = img[380:420, :]
# Segment the image by color in HSV color space
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(img, np.array([50, 110, 150]), np.array([120, 255, 255]))
# Find the largest segmented contour
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
command_message = AckermannDrive()
command_message.speed = 50.0
command_message.steering_angle = 0.0
if contours:
largest_contour = max(contours, key=cv2.contourArea)
largest_contour_center = cv2.moments(largest_contour)
if largest_contour_center['m00'] != 0:
center_x = int(largest_contour_center['m10'] / largest_contour_center['m00'])
# Find error (the lane distance from the target distance)
error = center_x - 190
command_message.steering_angle = error*CONTROL_COEFFICIENT
self.__ackermann_publisher.publish(command_message)
def main(args=None):
rclpy.init(args=args)
follower = LaneFollower()
rclpy.spin(follower)
rclpy.shutdown()
if __name__ == '__main__':
main()
|
the-stack_0_3371 | from swsscommon import swsscommon
import time
import json
import random
import time
from pprint import pprint
def create_entry(tbl, key, pairs):
fvs = swsscommon.FieldValuePairs(pairs)
tbl.set(key, fvs)
time.sleep(1)
def create_entry_tbl(db, table, separator, key, pairs):
tbl = swsscommon.Table(db, table)
create_entry(tbl, key, pairs)
def create_entry_pst(db, table, separator, key, pairs):
tbl = swsscommon.ProducerStateTable(db, table)
create_entry(tbl, key, pairs)
def how_many_entries_exist(db, table):
tbl = swsscommon.Table(db, table)
return len(tbl.getKeys())
def entries(db, table):
tbl = swsscommon.Table(db, table)
return set(tbl.getKeys())
def get_exist_entries(dvs, table):
db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
tbl = swsscommon.Table(db, table)
return set(tbl.getKeys())
def get_created_entry(db, table, existed_entries):
tbl = swsscommon.Table(db, table)
entries = set(tbl.getKeys())
new_entries = list(entries - existed_entries)
assert len(new_entries) == 1, "Wrong number of created entries."
return new_entries[0]
def get_created_entries(db, table, existed_entries, count):
tbl = swsscommon.Table(db, table)
entries = set(tbl.getKeys())
new_entries = list(entries - existed_entries)
assert len(new_entries) == count, "Wrong number of created entries."
new_entries.sort()
return new_entries
def get_default_vr_id(dvs):
db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER'
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
assert len(keys) == 1, "Wrong number of virtual routers found"
return keys[0]
def check_object(db, table, key, expected_attributes):
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
assert key in keys, "The desired key is not presented"
status, fvs = tbl.get(key)
assert status, "Got an error when get a key"
assert len(fvs) >= len(expected_attributes), "Incorrect attributes"
attr_keys = {entry[0] for entry in fvs}
for name, value in fvs:
if name in expected_attributes:
assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \
(value, name, expected_attributes[name])
def create_vnet_local_routes(dvs, prefix, vnet_name, ifname):
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"VNET_ROUTE_TABLE", ':', "%s:%s" % (vnet_name, prefix),
[
("ifname", ifname),
]
)
time.sleep(2)
def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0):
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
attrs = [
("endpoint", endpoint),
]
if vni:
attrs.append(('vni', vni))
if mac:
attrs.append(('mac_address', mac))
create_entry_pst(
app_db,
"VNET_ROUTE_TUNNEL_TABLE", ':', "%s:%s" % (vnet_name, prefix),
attrs,
)
time.sleep(2)
def create_vlan(dvs, vlan_name, vlan_ids):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
vlan_id = vlan_name[4:]
# create vlan
create_entry_tbl(
conf_db,
"VLAN", '|', vlan_name,
[
("vlanid", vlan_id),
],
)
time.sleep(1)
vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids)
check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid,
{
"SAI_VLAN_ATTR_VLAN_ID": vlan_id,
}
)
return vlan_oid
def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_oid = create_vlan (dvs, vlan_name, vlan_ids)
# create a vlan member in config db
create_entry_tbl(
conf_db,
"VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname),
[
("tagging_mode", "untagged"),
],
)
time.sleep(1)
# create vlan interface in config db
create_entry_tbl(
conf_db,
"VLAN_INTERFACE", '|', vlan_name,
[
("vnet_name", vnet_name),
],
)
#FIXME - This is created by IntfMgr
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"INTF_TABLE", ':', vlan_name,
[
("vnet_name", vnet_name),
],
)
time.sleep(2)
create_entry_tbl(
conf_db,
"VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr),
[
("family", "IPv4"),
],
)
time.sleep(2)
return vlan_oid
def create_phy_interface(dvs, ifname, vnet_name, ipaddr):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE")
# create vlan interface in config db
create_entry_tbl(
conf_db,
"INTERFACE", '|', ifname,
[
("vnet_name", vnet_name),
],
)
#FIXME - This is created by IntfMgr
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
create_entry_pst(
app_db,
"INTF_TABLE", ':', ifname,
[
("vnet_name", vnet_name),
],
)
time.sleep(2)
create_entry_tbl(
conf_db,
"INTERFACE", '|', "%s|%s" % (ifname, ipaddr),
[
("family", "IPv4"),
],
)
def create_vnet_entry(dvs, name, tunnel, vni, peer_list):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
attrs = [
("vxlan_tunnel", tunnel),
("vni", vni),
("peer_list", peer_list),
]
# create the VXLAN tunnel Term entry in Config DB
create_entry_tbl(
conf_db,
"VNET", '|', name,
attrs,
)
time.sleep(2)
def create_vxlan_tunnel(dvs, name, src_ip):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)
attrs = [
("src_ip", src_ip),
]
# create the VXLAN tunnel Term entry in Config DB
create_entry_tbl(
conf_db,
"VXLAN_TUNNEL", '|', name,
attrs,
)
def get_lo(dvs):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_id = get_default_vr_id(dvs)
tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE')
entries = tbl.getKeys()
lo_id = None
for entry in entries:
status, fvs = tbl.get(entry)
assert status, "Got an error when get a key"
for key, value in fvs:
if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK':
lo_id = entry
break
else:
assert False, 'Don\'t found loopback id'
return lo_id
def get_switch_mac(dvs):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH')
entries = tbl.getKeys()
mac = None
for entry in entries:
status, fvs = tbl.get(entry)
assert status, "Got an error when get a key"
for key, value in fvs:
if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS':
mac = value
break
else:
assert False, 'Don\'t found switch mac'
return mac
loopback_id = 0
def_vr_id = 0
switch_mac = None
class VnetVxlanVrfTunnel(object):
ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL"
ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP"
ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY"
ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY"
ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER"
ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY"
ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP"
tunnel_map_ids = set()
tunnel_map_entry_ids = set()
tunnel_ids = set()
tunnel_term_ids = set()
tunnel_map_map = {}
tunnel = {}
vnet_vr_ids = set()
vr_map = {}
nh_ids = {}
def fetch_exist_entries(self, dvs):
self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE)
self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE)
self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP)
self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY)
self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY)
self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE)
self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY)
self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP)
global loopback_id, def_vr_id, switch_mac
if not loopback_id:
loopback_id = get_lo(dvs)
if not def_vr_id:
def_vr_id = get_default_vr_id(dvs)
if switch_mac is None:
switch_mac = get_switch_mac(dvs)
def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
global loopback_id, def_vr_id
tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 2)
tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids)
tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids)
# check that the vxlan tunnel termination are there
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 2), "The TUNNEL_MAP wasn't created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created"
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created"
check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0],
{
'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID',
}
)
check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1],
{
'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI',
}
)
check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id,
{
'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN',
'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id,
'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '1:%s' % tunnel_map_id[0],
'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '1:%s' % tunnel_map_id[1],
'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip,
}
)
expected_attributes = {
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP',
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id,
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip,
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN',
'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id,
}
check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes)
self.tunnel_map_ids.update(tunnel_map_id)
self.tunnel_ids.add(tunnel_id)
self.tunnel_term_ids.add(tunnel_term_id)
self.tunnel_map_map[tunnel_name] = tunnel_map_id
self.tunnel[tunnel_name] = tunnel_id
def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)
time.sleep(2)
if (self.tunnel_map_map.get(tunnel_name) is None):
tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 2)
else:
tunnel_map_id = self.tunnel_map_map[tunnel_name]
tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2)
# check that the vxlan tunnel termination are there
assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early"
check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0],
{
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI',
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[1],
'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'),
'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id,
}
)
check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1],
{
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID',
'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[0],
'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id,
'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'),
}
)
self.tunnel_map_entry_ids.update(tunnel_map_entry_id)
def check_vnet_entry(self, dvs, name, peer_list=[]):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
#Check virtual router objects
assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 2),\
"The VR objects are not created"
new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 2)
self.vnet_vr_ids.update(new_vr_ids)
self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[1], 'peer':peer_list }
def vnet_route_ids(self, dvs, name, local=False):
vr_set = set()
if local:
vr_set.add(self.vr_map[name].get('egr'))
else:
vr_set.add(self.vr_map[name].get('ing'))
try:
for peer in self.vr_map[name].get('peer'):
vr_set.add(self.vr_map[peer].get('ing'))
except IndexError:
pass
return vr_set
def check_router_interface(self, dvs, name, vlan_oid=0):
# Check RIF in ingress VRF
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
global switch_mac
expected_attr = {
"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'),
"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac,
"SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
}
if vlan_oid:
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'})
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid})
else:
expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'})
new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs)
check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr)
#IP2ME and subnet routes will be created with every router interface
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, 2)
self.rifs.add(new_rif)
self.routes.update(new_route)
def check_vnet_local_routes(self, dvs, name):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_ids = self.vnet_route_ids(dvs, name, True)
count = len(vr_ids)
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count)
#Check if the route is duplicated to egress VRF
asic_vrs = set()
for idx in range(count):
rt_key = json.loads(new_route[idx])
asic_vrs.add(rt_key['vr'])
assert asic_vrs == vr_ids
self.routes.update(new_route)
def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
vr_ids = self.vnet_route_ids(dvs, name)
count = len(vr_ids)
# Check routes in ingress VRF
expected_attr = {
"SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP",
"SAI_NEXT_HOP_ATTR_IP": endpoint,
"SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel],
}
if vni:
expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni})
if mac:
expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac})
if endpoint in self.nh_ids:
new_nh = self.nh_ids[endpoint]
else:
new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops)
self.nh_ids[endpoint] = new_nh
self.nhops.add(new_nh)
check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr)
new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count)
#Check if the route is in expected VRF
asic_vrs = set()
for idx in range(count):
check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx],
{
"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh,
}
)
rt_key = json.loads(new_route[idx])
asic_vrs.add(rt_key['vr'])
assert asic_vrs == vr_ids
self.routes.update(new_route)
class TestVnetOrch(object):
'''
Test 1 - Create Vlan Interface, Tunnel and Vnet
'''
def test_vnet_orch_1(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_1'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10')
create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2000')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000')
vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10')
vid = create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vnet_2000", "100.100.3.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2000', vid)
vid = create_vlan_interface(dvs, "Vlan101", "Ethernet28", "Vnet_2000", "100.100.4.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2000', vid)
create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name)
create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000')
create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000', 'Vlan101')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000')
#Create Physical Interface in another Vnet
create_vnet_entry(dvs, 'Vnet_2001', tunnel_name, '2001', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2001')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2001', '2001')
create_phy_interface(dvs, "Ethernet4", "Vnet_2001", "100.102.1.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2001')
create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A")
vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A")
create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001')
'''
Test 2 - Two VNets, One HSMs per VNet
'''
def test_vnet_orch_2(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_2'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
create_vnet_entry(dvs, 'Vnet_1', tunnel_name, '1111', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_1')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_1', '1111')
tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
vid = create_vlan_interface(dvs, "Vlan1001", "Ethernet0", "Vnet_1", "1.1.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_1', vid)
create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name)
create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name)
create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name)
create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201')
vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name)
create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1')
create_vnet_entry(dvs, 'Vnet_2', tunnel_name, '2222', "")
vnet_obj.check_vnet_entry(dvs, 'Vnet_2')
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2', '2222')
vid = create_vlan_interface(dvs, "Vlan1002", "Ethernet4", "Vnet_2", "2.2.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_2', vid)
create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name)
create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name)
create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2')
'''
Test 3 - Two VNets, One HSMs per VNet, Peering
'''
def test_vnet_orch_3(self, dvs, testlog):
vnet_obj = VnetVxlanVrfTunnel()
tunnel_name = 'tunnel_3'
vnet_obj.fetch_exist_entries(dvs)
create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7')
create_vnet_entry(dvs, 'Vnet_10', tunnel_name, '1111', "Vnet_20")
vnet_obj.check_vnet_entry(dvs, 'Vnet_10', ['Vnet_20'])
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_10', '1111')
create_vnet_entry(dvs, 'Vnet_20', tunnel_name, '2222', "Vnet_10")
vnet_obj.check_vnet_entry(dvs, 'Vnet_20', ['Vnet_10'])
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_20', '2222')
tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7')
vid = create_vlan_interface(dvs, "Vlan2001", "Ethernet8", "Vnet_10", "5.5.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_10', vid)
vid = create_vlan_interface(dvs, "Vlan2002", "Ethernet12", "Vnet_20", "8.8.10.1/24")
vnet_obj.check_router_interface(dvs, 'Vnet_20', vid)
create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10')
vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name)
create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20')
vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name)
create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10')
create_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20', 'Vlan2002')
vnet_obj.check_vnet_local_routes(dvs, 'Vnet_20')
|
the-stack_0_3373 | def get_parent_index(h, idx):
## calculate the maximum index first
## if the input is too large, return a negative 1
max_idx = 2**h - 1
if max_idx < idx:
return -1
# otherwise, carry on
else:
node_offset = 0
continue_flag = True
subtree_size = max_idx
result = -1 # default result
while continue_flag:
if subtree_size == 0:
continue_flag = False
# right shift is equivalent to dividing by 2 and discarding the remainder.
subtree_size = subtree_size >> 1
# predict the left node
left_node = node_offset + subtree_size
# predict the right node
right_node = left_node + subtree_size
# calculate my node value
my_node = right_node + 1
# if either child is a match, return my node value
if (left_node == idx) or (right_node == idx):
result = my_node
continue_flag = False
# Make the current left child the offset if the index is greater than the left.
# This effectively searches down the right subtree.
if (idx > left_node):
node_offset = left_node
return result
def solution(h, q):
return [ get_parent_index(h, x) for x in q ] |
the-stack_0_3375 | from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import pandas as pd
import os
import argparse
def create_folder(parent_path, folder):
if not parent_path.endswith('/'):
parent_path += '/'
folder_path = parent_path + folder
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path
def shuffle_stays(stays, seed=9):
return shuffle(stays, random_state=seed)
def process_table(table_name, table, stays, folder_path):
table = table.loc[stays].copy()
table.to_csv('{}/{}.csv'.format(folder_path, table_name))
return
def split_train_test(path, is_test=True, seed=9, cleanup=True, MIMIC=False):
labels = pd.read_csv(path + 'preprocessed_labels.csv')
labels.set_index('patient', inplace=True)
# we split by unique patient identifier to make sure there are no patients
# that cross into both the train and the test sets
patients = labels.uniquepid.unique()
train, test = train_test_split(patients, test_size=0.2, random_state=seed)
train, val = train_test_split(train, test_size=0.1/0.8, random_state=seed)
print('==> Loading data for splitting...')
if is_test:
timeseries = pd.read_csv(
path + 'preprocessed_timeseries.csv', nrows=999999)
else:
timeseries = pd.read_csv(path + 'preprocessed_timeseries.csv')
timeseries.set_index('patient', inplace=True)
if not MIMIC:
diagnoses = pd.read_csv(path + 'preprocessed_diagnoses.csv')
diagnoses.set_index('patient', inplace=True)
flat_features = pd.read_csv(path + 'preprocessed_flat.csv')
flat_features.set_index('patient', inplace=True)
# delete the source files, as they won't be needed anymore
if is_test is False and cleanup:
print('==> Removing the unsorted data...')
os.remove(path + 'preprocessed_timeseries.csv')
if not MIMIC:
os.remove(path + 'preprocessed_diagnoses.csv')
os.remove(path + 'preprocessed_labels.csv')
os.remove(path + 'preprocessed_flat.csv')
for partition_name, partition in zip(['train', 'val', 'test'], [train, val, test]):
print('==> Preparing {} data...'.format(partition_name))
stays = labels.loc[labels['uniquepid'].isin(partition)].index
folder_path = create_folder(path, partition_name)
with open(folder_path + '/stays.txt', 'w') as f:
for stay in stays:
f.write("%s\n" % stay)
stays = shuffle_stays(stays, seed=9)
if MIMIC:
for table_name, table in zip(['labels', 'flat', 'timeseries'],
[labels, flat_features, timeseries]):
process_table(table_name, table, stays, folder_path)
else:
for table_name, table in zip(['labels', 'flat', 'diagnoses', 'timeseries'],
[labels, flat_features, diagnoses, timeseries]):
process_table(table_name, table, stays, folder_path)
return
if __name__ == '__main__':
from eICU_preprocessing.run_all_preprocessing import eICU_path
parser = argparse.ArgumentParser()
parser.add_argument('--cleanup', action='store_true')
args = parser.parse_args()
split_train_test(eICU_path, is_test=False, cleanup=args.cleanup)
|
the-stack_0_3376 | """
Support for HomematicIP sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor.homematicip_cloud/
"""
import logging
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.components.homematicip_cloud import (
HomematicipGenericDevice, DOMAIN, EVENT_HOME_CHANGED,
ATTR_HOME_LABEL, ATTR_HOME_ID, ATTR_LOW_BATTERY, ATTR_RSSI)
from homeassistant.const import TEMP_CELSIUS, STATE_OK
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematicip_cloud']
ATTR_VALVE_STATE = 'valve_state'
ATTR_VALVE_POSITION = 'valve_position'
ATTR_TEMPERATURE_OFFSET = 'temperature_offset'
HMIP_UPTODATE = 'up_to_date'
HMIP_VALVE_DONE = 'adaption_done'
HMIP_SABOTAGE = 'sabotage'
STATE_LOW_BATTERY = 'low_battery'
STATE_SABOTAGE = 'sabotage'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HomematicIP sensors devices."""
# pylint: disable=import-error, no-name-in-module
from homematicip.device import (
HeatingThermostat, TemperatureHumiditySensorWithoutDisplay,
TemperatureHumiditySensorDisplay)
homeid = discovery_info['homeid']
home = hass.data[DOMAIN][homeid]
devices = [HomematicipAccesspoint(home)]
for device in home.devices:
devices.append(HomematicipDeviceStatus(home, device))
if isinstance(device, HeatingThermostat):
devices.append(HomematicipHeatingThermostat(home, device))
if isinstance(device, TemperatureHumiditySensorWithoutDisplay):
devices.append(HomematicipSensorThermometer(home, device))
devices.append(HomematicipSensorHumidity(home, device))
if isinstance(device, TemperatureHumiditySensorDisplay):
devices.append(HomematicipSensorThermometer(home, device))
devices.append(HomematicipSensorHumidity(home, device))
if home.devices:
add_devices(devices)
class HomematicipAccesspoint(Entity):
"""Representation of an HomeMaticIP access point."""
def __init__(self, home):
"""Initialize the access point sensor."""
self._home = home
_LOGGER.debug('Setting up access point %s', home.label)
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, EVENT_HOME_CHANGED, self._home_changed)
@callback
def _home_changed(self, deviceid):
"""Handle device state changes."""
if deviceid is None or deviceid == self._home.id:
_LOGGER.debug('Event home %s', self._home.label)
self.async_schedule_update_ha_state()
@property
def name(self):
"""Return the name of the access point device."""
if self._home.label == '':
return 'Access Point Status'
return '{} Access Point Status'.format(self._home.label)
@property
def icon(self):
"""Return the icon of the access point device."""
return 'mdi:access-point-network'
@property
def state(self):
"""Return the state of the access point."""
return self._home.dutyCycle
@property
def available(self):
"""Device available."""
return self._home.connected
@property
def device_state_attributes(self):
"""Return the state attributes of the access point."""
return {
ATTR_HOME_LABEL: self._home.label,
ATTR_HOME_ID: self._home.id,
}
class HomematicipDeviceStatus(HomematicipGenericDevice):
"""Representation of an HomematicIP device status."""
def __init__(self, home, device):
"""Initialize the device."""
super().__init__(home, device)
_LOGGER.debug('Setting up sensor device status: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Status')
@property
def icon(self):
"""Return the icon of the status device."""
if (hasattr(self._device, 'sabotage') and
self._device.sabotage == HMIP_SABOTAGE):
return 'mdi:alert'
elif self._device.lowBat:
return 'mdi:battery-outline'
elif self._device.updateState.lower() != HMIP_UPTODATE:
return 'mdi:refresh'
return 'mdi:check'
@property
def state(self):
"""Return the state of the generic device."""
if (hasattr(self._device, 'sabotage') and
self._device.sabotage == HMIP_SABOTAGE):
return STATE_SABOTAGE
elif self._device.lowBat:
return STATE_LOW_BATTERY
elif self._device.updateState.lower() != HMIP_UPTODATE:
return self._device.updateState.lower()
return STATE_OK
class HomematicipHeatingThermostat(HomematicipGenericDevice):
"""MomematicIP heating thermostat representation."""
def __init__(self, home, device):
""""Initialize heating thermostat."""
super().__init__(home, device)
_LOGGER.debug('Setting up heating thermostat device: %s', device.label)
@property
def icon(self):
"""Return the icon."""
if self._device.valveState.lower() != HMIP_VALVE_DONE:
return 'mdi:alert'
return 'mdi:radiator'
@property
def state(self):
"""Return the state of the radiator valve."""
if self._device.valveState.lower() != HMIP_VALVE_DONE:
return self._device.valveState.lower()
return round(self._device.valvePosition*100)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return '%'
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_VALVE_STATE: self._device.valveState.lower(),
ATTR_TEMPERATURE_OFFSET: self._device.temperatureOffset,
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue
}
class HomematicipSensorHumidity(HomematicipGenericDevice):
"""MomematicIP thermometer device."""
def __init__(self, home, device):
""""Initialize the thermometer device."""
super().__init__(home, device)
_LOGGER.debug('Setting up humidity device: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Humidity')
@property
def icon(self):
"""Return the icon."""
return 'mdi:water'
@property
def state(self):
"""Return the state."""
return self._device.humidity
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return '%'
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue,
}
class HomematicipSensorThermometer(HomematicipGenericDevice):
"""MomematicIP thermometer device."""
def __init__(self, home, device):
""""Initialize the thermometer device."""
super().__init__(home, device)
_LOGGER.debug('Setting up thermometer device: %s', device.label)
@property
def name(self):
"""Return the name of the device."""
return self._name('Temperature')
@property
def icon(self):
"""Return the icon."""
return 'mdi:thermometer'
@property
def state(self):
"""Return the state."""
return self._device.actualTemperature
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_TEMPERATURE_OFFSET: self._device.temperatureOffset,
ATTR_LOW_BATTERY: self._device.lowBat,
ATTR_RSSI: self._device.rssiDeviceValue,
}
|
the-stack_0_3378 | from devito.ir.clusters.queue import QueueStateful
from devito.ir.support import (SEQUENTIAL, PARALLEL, PARALLEL_INDEP, PARALLEL_IF_ATOMIC,
AFFINE, ROUNDABLE, TILABLE, Forward)
from devito.tools import as_tuple, flatten, timed_pass
__all__ = ['analyze']
@timed_pass()
def analyze(clusters):
state = QueueStateful.State()
# Collect properties
clusters = Parallelism(state).process(clusters)
clusters = Affiness(state).process(clusters)
clusters = Tiling(state).process(clusters)
clusters = Rounding(state).process(clusters)
# Reconstruct Clusters attaching the discovered properties
processed = [c.rebuild(properties=state.properties.get(c)) for c in clusters]
return processed
class Detector(QueueStateful):
def process(self, elements):
return self._process_fatd(elements, 1)
def callback(self, clusters, prefix):
if not prefix:
return clusters
# The analyzed Dimension
d = prefix[-1].dim
# Apply the actual callback
retval = self._callback(clusters, d, prefix)
# Normalize retval
retval = set(as_tuple(retval))
# Update `self.state`
if retval:
for c in clusters:
properties = self.state.properties.setdefault(c, {})
properties.setdefault(d, set()).update(retval)
return clusters
class Parallelism(Detector):
"""
Detect SEQUENTIAL, PARALLEL, PARALLEL_INDEP and PARALLEL_IF_ATOMIC Dimensions.
Consider an IterationSpace over `n` Dimensions. Let `(d_1, ..., d_n)` be the
distance vector of a dependence. Let `i` be the `i-th` Dimension of the
IterationSpace. Then:
* `i` is PARALLEL_INDEP if all dependences have distance vectors:
(d_1, ..., d_i) = 0
* `i` is PARALLEL if all dependences have distance vectors:
(d_1, ..., d_i) = 0, OR
(d_1, ..., d_{i-1}) > 0
* `i` is PARALLEL_IF_ATOMIC if all dependences have distance vectors:
(d_1, ..., d_i) = 0, OR
(d_1, ..., d_{i-1}) > 0, OR
the 'write' is known to be an associative and commutative increment
"""
def _callback(self, clusters, d, prefix):
# Rule out if non-unitary increment Dimension (e.g., `t0=(time+1)%2`)
if any(c.sub_iterators.get(d) for c in clusters):
return SEQUENTIAL
# All Dimensions up to and including `i-1`
prev = flatten(i.dim._defines for i in prefix[:-1])
is_parallel_indep = True
is_parallel_atomic = False
scope = self._fetch_scope(clusters)
for dep in scope.d_all_gen():
test00 = dep.is_indep(d) and not dep.is_storage_related(d)
test01 = all(dep.is_reduce_atmost(i) for i in prev)
if test00 and test01:
continue
test1 = len(prev) > 0 and any(dep.is_carried(i) for i in prev)
if test1:
is_parallel_indep &= (dep.distance_mapper.get(d.root) == 0)
continue
if dep.function in scope.initialized:
# False alarm, the dependence is over a locally-defined symbol
continue
if dep.is_increment:
is_parallel_atomic = True
continue
return SEQUENTIAL
if is_parallel_atomic:
return PARALLEL_IF_ATOMIC
elif is_parallel_indep:
return {PARALLEL, PARALLEL_INDEP}
else:
return PARALLEL
class Rounding(Detector):
def _callback(self, clusters, d, prefix):
itinterval = prefix[-1]
# The iteration direction must be Forward -- ROUNDABLE is for rounding *up*
if itinterval.direction is not Forward:
return
properties = self._fetch_properties(clusters, prefix)
if PARALLEL not in properties[d]:
return
scope = self._fetch_scope(clusters)
# All accessed Functions must have enough room in the PADDING region
# so that `i`'s trip count can safely be rounded up
# Note: autopadding guarantees that the padding size along the
# Fastest Varying Dimension is a multiple of the SIMD vector length
functions = [f for f in scope.functions if f.is_Tensor]
if any(not f._honors_autopadding for f in functions):
return
# Mixed data types (e.g., float and double) is unsupported
if len({f.dtype for f in functions}) > 1:
return
return ROUNDABLE
class Affiness(Detector):
"""
Detect the AFFINE Dimensions.
"""
def _callback(self, clusters, d, prefix):
scope = self._fetch_scope(clusters)
accesses = [a for a in scope.accesses if not a.is_scalar]
if all(a.is_regular and a.affine_if_present(d._defines) for a in accesses):
return AFFINE
class Tiling(Detector):
"""
Detect the TILABLE Dimensions.
"""
def process(self, elements):
return self._process_fdta(elements, 1)
def _callback(self, clusters, d, prefix):
# A Dimension is TILABLE only if it's PARALLEL and AFFINE
properties = self._fetch_properties(clusters, prefix)
if not {PARALLEL, AFFINE} <= properties[d]:
return
# In addition, we use the heuristic that we do not consider
# TILABLE a Dimension that is not embedded in at least one
# SEQUENTIAL Dimension. This is to rule out tiling when the
# computation is not expected to be expensive
if not any(SEQUENTIAL in properties[i.dim] for i in prefix[:-1]):
return
# Likewise, it won't be marked TILABLE if there's at least one
# local SubDimension in all Clusters
if all(any(i.dim.is_Sub and i.dim.local for i in c.itintervals)
for c in clusters):
return
# If it induces dynamic bounds, then it's ruled out too
scope = self._fetch_scope(clusters)
if any(i.is_lex_non_stmt for i in scope.d_all_gen()):
return
return TILABLE
|
the-stack_0_3380 | """
Functions connecting the whole process. 'visual_from_signal' should be run if signal visualization of certain signal is requested.
'visual_from_data' should be run if signal visualization of any point on Earth is requested.
Miha Lotric, April 2020
"""
import io
from signal_visualizer import getters as gt
def visual_from_signal(signal_id, show=0, save_as=None, return_bytes=0):
"""Save/Show static Mapbox map with dome representing reach and position of the signal.
Args:
signal_id [int]: Unique identifier of a signal.
show [bool]: If true final image is shown by default OS image viewer.
save_as [None/str]: Path to the location where image is stored. If it is left None image is not stored.
return_bytes [bool]: If True image in bytes is returned.
Return:
BytesIO: Bytes image of Mapbox static map with dome on it.
"""
signal_info = gt.get_signal_info(signal_id)
coordinates = float(signal_info['coordinates'][0]), float(signal_info['coordinates'][1])
radius_meters = signal_info['radius']
map_bytes = visual_from_data(coordinates, radius_meters, save_as=save_as, show=show, return_bytes=return_bytes)
return map_bytes if return_bytes else None
def visual_from_data(coordinates, radius_meters, show=1, save_as=None, return_bytes=0):
"""Save/Show static Mapbox map with dome representing specified radius and coordinates.
Args:
coordinates [tuple]: Coordinates of the signal position - (latitude,longitude).
radius_meters [float]: Radius of the dome in meters.
show [bool]: If true final image is shown by default OS image viewer.
save_as [None/str]: Path to the location where image is stored. If it is left None image is not stored.
return_bytes [bool]: If True image in bytes is returned.
Return:
BytesIO: Bytes image of Mapbox static map with dome on it.
"""
radius_px = gt.get_radius_px(radius_meters)
zoom = gt.get_zoom(coordinates[0], radius_px, radius_meters)
map_img = gt.get_map(radius_px, coordinates, zoom)
if show: map_img.show()
if save_as: map_img.save(save_as)
if return_bytes:
map_bytes = io.BytesIO()
map_img.save(map_bytes, format='PNG')
return io.BytesIO(map_bytes.getvalue())
|
the-stack_0_3383 | from srcs.interpretator.interpretator_callback import InterpretatorCallback
from srcs.interpretator.context import Context
class Interpretator:
def __init__(self, runner, meta=None):
self.runner = runner
self.meta = meta
self.callbacks = []
self.context = Context()
self.current_string_index = 0
self.strings = []
def __call__(self, filter=None):
def decorator(function):
self.callbacks.append(InterpretatorCallback(function, filter))
return function
return decorator
def execute(self, string):
result = []
for callback in self.callbacks:
single_output = callback(string, self.current_string_index, self.context)
if single_output is not None:
result.append(single_output)
self.current_string_index += 1
self.strings.append(string)
return '\n'.join(result)
def run(self):
self.runner(self)
|
the-stack_0_3384 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from structlog import get_logger
from covidfaq import config, routers
# from covidfaq.evaluating.model.bert_plus_ood import BertPlusOODEn, BertPlusOODFr
# from covidfaq.scrape.scrape import (
# load_latest_source_data,
# download_OOD_model,
# download_cached_embeddings,
# )
app = FastAPI()
app.include_router(routers.health.router)
app.include_router(routers.answers.router)
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"]
)
@app.on_event("startup")
def on_startup():
conf = config.get()
log = get_logger()
log.info("launching", **conf.dict())
# load_latest_source_data()
# download_OOD_model()
# download_cached_embeddings()
# BertPlusOODEn()
# BertPlusOODFr()
|
the-stack_0_3386 | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000086"
addresses_name = "parl.2019-12-12/Version 1/Parliamentary Election - Democracy_Club__12December2019east.tsv"
stations_name = "parl.2019-12-12/Version 1/Parliamentary Election - Democracy_Club__12December2019east.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
rec = super().address_record_to_dict(record)
# Implausible looking UPRN geocodes. Throw away rather than investigate.
if uprn in [
"10091136033",
"100060322293",
"10091136173",
"10009593531",
"10012197923",
]:
return None
if uprn in [
"100060294726", # SO533DA -> SO533HB : 242 Bournemouth Road, Chandler`s Ford, Eastleigh
"10009593644", # SO533DA -> SO533AD : Dovecote, Howard Close, Chandler`s Ford, Eastleigh
"100060302836", # SO532FG -> SO535BZ : 138 Kingsway, Chandler`s Ford, Eastleigh
"10009589400", # SO507DE -> SO507DF : The Chalet, Moon River Pines, Fir Tree Lane, Horton Heath, Eastleigh
"10091136799", # SO303FA -> SO532HL : 8a Clanfield Close, Chandler`s Ford, Eastleigh
]:
rec["accept_suggestion"] = True
return rec
def station_record_to_dict(self, record):
if record.polling_place_id == "4629": # Abbey Hall, Victoria Road.
record = record._replace(polling_place_easting="445232")
record = record._replace(polling_place_northing="108734")
if (
record.polling_place_uprn == "100062644887"
): # Chandler's Ford Community Centre
record = record._replace(polling_place_postcode="SO53 2FT")
return super().station_record_to_dict(record)
|
the-stack_0_3387 | # Author: Zheng Hao Tan
# Email: [email protected]
import sys
import SMS
if len(sys.argv) != 6:
sys.exit('Invalid arguments. Please rerun the script')
accountSID = sys.argv[1]
authToken = sys.argv[2]
from_ = sys.argv[3]
to = sys.argv[4]
smsBody = sys.argv[5]
print('Setting up phone numbers and logging in...')
sms = SMS.SMS(accountSID, authToken)
print('Sending SMS...')
sms.send(from_, to, smsBody)
print('SMS sent!')
|
the-stack_0_3389 | from django.conf.urls import url, include
from django.contrib import admin
from . import views
from django.conf import settings
from django.conf.urls.static import static
from login.views import *
app_name='home'
urlpatterns = [
#Home
url(r'^$', index, name='index'),
#semantic
url(r'^varta/', video_chat_view, name='varta_chat'),
#temp
url(r'^temp/', temp_view, name="temp_view"),
#get if user exists with username or not
url(r'^getifuser/', get_if_user, name="get_if_user"),
#general profile settings
url(r'^settings/general/', general_info, name="general_info" ),
#all auth urls
url(r'^accounts/', include('allauth.urls')),
#Topic Autocomplete
url(r'^topic-autocomplete/$',
TopicAutocomplete.as_view(model = Topic, create_field=''),
name='topic-autocomplete'
),
#login
url(r'^login/', login_view, name='login'),
#report lost and found
url(r'^report/', item_create, name='report'),
#lost found view
url(r'^lost/(?P<id>\d+)/$', lost_view, name='lost_view'),
#follow users
url(r'^follow/request/', follow_request, name='follow_request'),
#edit profile
url(r'^fillup/', person_view, name='fillup'),
#markdown drag and drop markdown editor
url(r'^markdownx/', include('markdownx.urls')),
#post create
url(r'^post/create/', post_create, name = 'create_post'),
#upvote a post
url(r'^post/upvote/', post_upvote, name = 'upvote_post'),
#upvote a post
url(r'^answer/upvote/', answer_upvote, name = 'upvote_answer'),
#update post
url(r'^post/update/(?P<id>\d+)/$', post_update, name = 'update_post'),
#view individual post
url(r'^post/view/(?P<id>\d+)/$', post_view, name='view_post'),
#delete post
url(r'^post/delete/', post_delete, name = 'delete_post'),
#post feed
url(r'^post/', post_details, name = 'details_post'),
#question create
url(r'^question/create/', question_create, name = 'create_question'),
#update question
url(r'^question/update/(?P<id>\d+)/$', question_update, name = 'update_question'),
#view individual question
url(r'^question/view/(?P<id>\d+)/$', question_view, name='view_question'),
#delete question
url(r'^question/delete/', question_delete, name = 'delete_question'),
#answer create
url(r'^answer/create/(?P<id>\d+)/$', answer_create, name = 'create_answer'),
#update answer
url(r'^answer/update/(?P<id>\d+)/$', answer_update, name = 'update_answer'),
#view individual answer
url(r'^answer/view/(?P<id>\d+)/$', answer_view, name='view_answer'),
#delete answer
url(r'^answer/delete/', answer_delete, name = 'delete_answer'),
#create a comment
url(r'^comment/create/$', CommentCreateView.as_view(), name='comment-create'),
#update a comment
url(r'comment/update/(?P<pk>[0-9]+)/$',CommentUpdateView.as_view(), name='comment-update'),
#delete a comment
url(r'^comment/delete/(?P<pk>[-\w]+)$', CommentDeleteView.as_view(), name='comment-delete'),
#like a comment
url(r'^comments/like/$', LikeComment.as_view(), name='comment-like'),
#unlike a comment
url(r'^comments/unlike/$', UnlikeComment.as_view(), name='comment-unlike'),
#simply logout or GTFO
url(r'^logout/', logout_view, name='logout'),
#draceditor urls
url(r'^draceditor/', include('draceditor.urls')),
#Searching Users
url(r'^search/user/profiles/', profile_search, name='profile_search'),
#view individuals profile
url(r'^view/profile/(?P<id>\d+)/$', profile_view, name='profile_view'),
#admin/
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
the-stack_0_3390 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Source-based register."""
import collections
import datetime
import hashlib
import json
import tempfile
from typing import Any, List, Optional, Type
from absl import logging
import dataclasses
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.community import cache
from tensorflow_datasets.core.community import dataset_sources as dataset_sources_lib
from tensorflow_datasets.core.community import load
from tensorflow_datasets.core.community import register_base
from tensorflow_datasets.core.download import checksums
from tensorflow_datasets.core.utils import gcs_utils
# Datasets are installed as `import tfds_community.<ns>.<ds>.<hash>`
_IMPORT_MODULE_NAME = 'tfds_community'
_METADATA_FILENAME = 'installation.json'
@dataclasses.dataclass(frozen=True, eq=True)
class _DatasetPackage:
"""Dataset metadata (before installation), of a single dataset package.
Contains the information required to fetch the dataset package.
Attributes:
name: Dataset name
source: Source to locate of the source code (e.g. `github://...`)
"""
name: utils.DatasetName
source: dataset_sources_lib.DatasetSource
# Ideally, we should also save the version so `tfds.load('ns:ds/1.0.0')`
# fetch a specific version (e.g. at an older commit).
@classmethod
def from_json(cls, data: utils.Json) -> '_DatasetPackage':
"""Factory which creates the cls from json."""
return cls(
name=utils.DatasetName(data['name']),
source=dataset_sources_lib.DatasetSource.from_json(data['source']),
)
def to_json(self) -> utils.Json:
"""Exports the cls as json."""
return {
'name': str(self.name),
'source': self.source.to_json(),
}
@dataclasses.dataclass(frozen=True, eq=True)
class _InstalledPackage:
"""Dataset metadata (after installation), of a single dataset package.
Contains the local informations of the installed dataset package. This is
specific to the user.
Attributes:
package: Source of the dataset package.
instalation_date: Date of installation of the package
hash: base64 checksum of the installed files
"""
package: _DatasetPackage
instalation_date: datetime.datetime
hash: str
@property
def module_name(self) -> str:
"""Module name to import this dataset."""
name = self.package.name
return f'{_IMPORT_MODULE_NAME}.{name.namespace}.{name.name}.{self.hash}.{name.name}'
@property
def installation_path(self) -> utils.ReadWritePath:
"""Local path of the package."""
name = self.package.name
sub_dir = f'{_IMPORT_MODULE_NAME}/{name.namespace}/{name.name}/{self.hash}'
return cache.module_path() / sub_dir
@classmethod
def from_json(cls, data: utils.Json) -> '_InstalledPackage':
"""Factory which creates the cls from json."""
return cls(
package=_DatasetPackage.from_json(data['package']),
# TODO(py3.7): Should use `datetime.fromisoformat`
instalation_date=datetime.datetime.strptime(
data['instalation_date'], '%Y-%m-%dT%H:%M:%S.%f'
),
hash=data['hash'],
)
def to_json(self) -> utils.Json:
"""Exports the cls as json."""
return {
'package': self.package.to_json(),
'instalation_date': self.instalation_date.isoformat(),
'hash': self.hash,
}
# TODO(py3.9): Should be `UserDict[utils.DatasetName, _DatasetPackage]`
class _PackageIndex(collections.UserDict):
"""Package index.
Package index is a `Dict[DatasetName, _DatasetPackage]` loaded from cache.
It has an additional `.refresh()` method to update the local cache by
querying the remote index (stored in `gs://tfds-data`).
On disk, the package index is a simple list of datasets with their
associated source:
```jsonl
{"name": "kaggle:ds0", "source": "github://..."}
{"name": "kaggle:ds1", "source": "github://..."}
{"name": "tensorflow_graphics:shapenet", "source": "github://..."}
[...]
```
"""
def __init__(self, path: utils.PathLike):
"""Contructor.
Args:
path: Remote location of the package index (file containing the list of
dataset packages)
"""
super().__init__()
self._remote_path: utils.ReadOnlyPath = utils.as_path(path)
self._cached_path: utils.ReadOnlyPath = (
cache.cache_path() / 'community-datasets-list.jsonl'
)
# Pre-load the index from the cache
if self._cached_path.exists():
self._refresh_from_content(self._cached_path.read_text())
def _refresh_from_content(self, content: str) -> None:
"""Update the index from the given `jsonl` content."""
dataset_packages = [
_DatasetPackage.from_json(json.loads(line))
for line in content.splitlines() if line.strip()
]
self.clear()
self.update({src.name: src for src in dataset_packages})
def refresh(self) -> None:
"""Update the cache."""
# Should have a timer to avoid refreshing the cache immediatelly
# (and a force=True option to ignore this)
# e.g. with os.path.getmtime(cached_path) - time.gmtime()
try:
content = self._remote_path.read_text()
except gcs_utils.GCS_UNAVAILABLE_EXCEPTIONS as e:
# Do not crash if GCS access not available, but instead silently reuse
# the cache.
logging.info(
'Could not refresh the package index (GCS unavailable): %s', e
)
return
# If read was sucessful, update the cache with the new dataset list
self._cached_path.write_text(content)
self._refresh_from_content(content)
class PackageRegister(register_base.BaseRegister):
"""Dataset register based on a list of remotely stored datasets definitions.
Package register is similar to a dataset package manager. It contains a
package index containing the list of all registered datasets with their
associated location.
When a specific dataset is requested, `PackageRegister` will download
and cache the original source code locally.
Usage:
```python
register = PackageRegister(path='/path/to/datasets-source-list.jsonl')
# List all registered datasets: ['kaggle:ds0', 'kaggle:ds1',...]
register.list_builders()
# Load a specific dataset
builder = register.builder('tensorflow_graphics:shapenet')
```
"""
def __init__(self, path: utils.PathLike):
"""Contructor.
Args:
path: Path to the register files containing the list of dataset sources,
forwarded to `_PackageIndex`
"""
self._path = utils.as_path(path)
@utils.memoized_property
def _package_index(self) -> _PackageIndex:
"""`Dict[DatasetName, _DatasetPackage]` containg the community datasets."""
# Use property to lazy-initialize the cache (and create the tmp dir) only
# if it is used.
return _PackageIndex(self._path)
def list_builders(self) -> List[str]:
"""Returns the list of registered builders."""
if not self._package_index: # Package index not loaded nor cached
self._package_index.refresh() # Try updating the index
return sorted(str(name) for name in self._package_index) # pylint: disable=not-an-iterable
def builder_cls(
self, name: utils.DatasetName,
) -> Type[dataset_builder.DatasetBuilder]:
"""Returns the builder class."""
# Download the dataset generation code, or reuse the cache
# TODO(tfds): Should add the option to request a specific code version
installed_dataset = _download_or_reuse_cache(
name=name,
package_index=self._package_index,
)
# Load the dataset from the module
return load.builder_cls_from_module(installed_dataset.module_name)
def builder(
self, name: utils.DatasetName, **builder_kwargs: Any,
) -> dataset_builder.DatasetBuilder:
"""Returns the dataset builder."""
return self.builder_cls(name)(**builder_kwargs) # pytype: disable=not-instantiable
def _download_or_reuse_cache(
name: utils.DatasetName,
package_index: _PackageIndex,
) -> _InstalledPackage:
"""Downloads the dataset generation source code.
Search the dataset in the cache, or download it from the package index
otherwise.
Args:
name: Dataset name to load.
package_index: Index of all community datasets. Might be updated.
Returns:
The installed dataset information.
Raises:
DatasetNotFoundError: If the dataset can't be loaded.
"""
# Dataset can be:
# * Installed locally (in the cache) -> reuse
# * Not installed but present in the package index -> install
# * Not present in the package index -> raise error
# Check if the file is already downloaded/cached
# TODO(tfds): To force a download even if file already present, we
# should add a `ignore_cache=True` option in `tfds.load`. Or should always
# try to download the file ?
last_installed_version = _get_last_installed_version(name)
if last_installed_version:
return last_installed_version
# If file isn't cached yet, we need to download it.
# First need to find it's location.
if name not in package_index:
# If not, we need to update the package index cache
package_index.refresh()
# If the dataset is present in the package index cache, use this
package = package_index.get(name)
if not package:
# If still not found, raise an DatasetNotFoundError
raise registered.DatasetNotFoundError(
f'Could not find dataset {name}: Dataset not found among the '
f'{len(package_index)} datasets of the community index.'
)
# If package was found, download it.
installed_package = _download_and_cache(package)
return installed_package
def _get_last_installed_version(
name: utils.DatasetName,
) -> Optional[_InstalledPackage]:
"""Checks whether the datasets is installed locally and returns it."""
root_dir = (
cache.module_path() / _IMPORT_MODULE_NAME / name.namespace / name.name
)
if not root_dir.exists(): # Dataset not found
return None
all_installed_package_metadatas = [
package / _METADATA_FILENAME for package in root_dir.iterdir()
]
all_installed_packages = [
_InstalledPackage.from_json(json.loads(metadata.read_text()))
for metadata in all_installed_package_metadatas
if metadata.exists()
]
all_installed_packages = sorted(
all_installed_packages, key=lambda p: p.instalation_date
)
if not all_installed_packages: # No valid package found
return None
else:
return all_installed_packages[-1] # Most recently installed package
def _download_and_cache(package: _DatasetPackage) -> _InstalledPackage:
"""Downloads and installs locally the dataset source.
This function install the dataset package in:
`<module_path>/<namespace>/<ds_name>/<hash>/...`.
Args:
package: Package to install.
Returns:
installed_dataset: The installed dataset package.
"""
tmp_dir = utils.as_path(tempfile.mkdtemp())
try:
# Download the package in a tmp directory
dataset_sources_lib.download_from_source(
package.source,
tmp_dir,
)
# Compute the package hash (to install the dataset in a unique dir)
package_hash = _compute_dir_hash(tmp_dir)
# Add package metadata
installed_package = _InstalledPackage(
package=package,
instalation_date=datetime.datetime.now(),
hash=package_hash,
)
package_metadata = json.dumps(installed_package.to_json())
(tmp_dir / _METADATA_FILENAME).write_text(package_metadata)
# Rename the package to it's final destination
installation_path = installed_package.installation_path
if installation_path.exists(): # Package already exists (with same hash)
# In the future, we should be smarter to allow overwrite.
raise ValueError(
f'Package {package} already installed in {installation_path}.'
)
installation_path.parent.mkdir(parents=True, exist_ok=True)
tmp_dir.rename(installation_path)
finally:
# Cleanup the tmp directory if it still exists.
if tmp_dir.exists():
tmp_dir.rmtree()
return installed_package
def _compute_dir_hash(path: utils.ReadOnlyPath) -> str:
"""Computes the checksums of the given directory deterministically."""
all_files = sorted(path.iterdir())
if any(f.is_dir() for f in all_files):
raise ValueError('Installed package should only contains files.')
# Concatenate the filenames and files content to create the directory hash
all_checksums = [f.name for f in all_files]
all_checksums += [checksums.compute_url_info(f).checksum for f in all_files]
return hashlib.sha256(''.join(all_checksums).encode()).hexdigest()
# Register pointing to the GCS community list.
community_register = PackageRegister(path=gcs_utils.GCS_COMMUNITY_INDEX_PATH)
|
the-stack_0_3392 | ## \file Projectile.py
# \author Samuel J. Crawford, Brooks MacLachlan, and W. Spencer Smith
# \brief Contains the entire Projectile program
import math
import sys
## \brief Calculates flight duration: the time when the projectile lands (s)
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param g_vect gravitational acceleration (m/s^2)
# \return flight duration: the time when the projectile lands (s)
def func_t_flight(v_launch, theta, g_vect):
return 2 * v_launch * math.sin(theta) / g_vect
## \brief Calculates landing position: the distance from the launcher to the final position of the projectile (m)
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param g_vect gravitational acceleration (m/s^2)
# \return landing position: the distance from the launcher to the final position of the projectile (m)
def func_p_land(v_launch, theta, g_vect):
return 2 * v_launch ** 2 * math.sin(theta) * math.cos(theta) / g_vect
## \brief Calculates distance between the target position and the landing position: the offset between the target position and the landing position (m)
# \param p_target target position: the distance from the launcher to the target (m)
# \param p_land landing position: the distance from the launcher to the final position of the projectile (m)
# \return distance between the target position and the landing position: the offset between the target position and the landing position (m)
def func_d_offset(p_target, p_land):
return p_land - p_target
## \brief Calculates output message as a string
# \param p_target target position: the distance from the launcher to the target (m)
# \param epsilon hit tolerance
# \param d_offset distance between the target position and the landing position: the offset between the target position and the landing position (m)
# \return output message as a string
def func_s(p_target, epsilon, d_offset):
if (math.fabs(d_offset / p_target) < epsilon) :
return "The target was hit."
elif (d_offset < 0) :
return "The projectile fell short."
else :
return "The projectile went long."
## \brief Reads input from a file with the given file name
# \param filename name of the input file
# \return launch speed: the initial speed of the projectile when launched (m/s)
# \return launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \return target position: the distance from the launcher to the target (m)
def get_input(filename):
infile = open(filename, "r")
infile.readline()
v_launch = float(infile.readline())
infile.readline()
theta = float(infile.readline())
infile.readline()
p_target = float(infile.readline())
infile.close()
return v_launch, theta, p_target
## \brief Verifies that input values satisfy the physical constraints
# \param v_launch launch speed: the initial speed of the projectile when launched (m/s)
# \param theta launch angle: the angle between the launcher and a straight line from the launcher to the target (rad)
# \param p_target target position: the distance from the launcher to the target (m)
def input_constraints(v_launch, theta, p_target):
if (not(v_launch > 0)) :
print("Warning: ", end="")
print("v_launch has value ", end="")
print(v_launch, end="")
print(", but is suggested to be ", end="")
print("above ", end="")
print(0, end="")
print(".")
if (not(0 < theta and theta < math.pi / 2)) :
print("Warning: ", end="")
print("theta has value ", end="")
print(theta, end="")
print(", but is suggested to be ", end="")
print("between ", end="")
print(0, end="")
print(" and ", end="")
print(math.pi / 2, end="")
print(" ((pi)/(2))", end="")
print(".")
if (not(p_target > 0)) :
print("Warning: ", end="")
print("p_target has value ", end="")
print(p_target, end="")
print(", but is suggested to be ", end="")
print("above ", end="")
print(0, end="")
print(".")
## \brief Writes the output values to output.txt
# \param s output message as a string
# \param d_offset distance between the target position and the landing position: the offset between the target position and the landing position (m)
def write_output(s, d_offset):
outputfile = open("output.txt", "w")
print("s = ", end="", file=outputfile)
print(s, file=outputfile)
print("d_offset = ", end="", file=outputfile)
print(d_offset, file=outputfile)
outputfile.close()
filename = sys.argv[1]
g_vect = 9.8
epsilon = 2.0e-2
v_launch, theta, p_target = get_input(filename)
input_constraints(v_launch, theta, p_target)
t_flight = func_t_flight(v_launch, theta, g_vect)
p_land = func_p_land(v_launch, theta, g_vect)
d_offset = func_d_offset(p_target, p_land)
s = func_s(p_target, epsilon, d_offset)
write_output(s, d_offset)
|
the-stack_0_3393 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import DollarTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(DollarTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
the-stack_0_3394 | '''
实验名称:大气压强传感器BMP280
版本:v1.0
日期:2020.5
作者:01Studio(www.01studio.org)
'''
#导入相关模块
import time,board,busio
from analogio import AnalogIn
import adafruit_ssd1306,adafruit_hcsr04
#构建I2C对象
i2c = busio.I2C(board.SCK, board.MOSI)
#构建oled对象,01Studio配套的OLED地址为0x3C
display = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3C)
#清屏
display.fill(0)
display.show()
#构建超声波传感器对象
sonar = adafruit_hcsr04.HCSR04(trigger_pin=board.TX, echo_pin=board.RX)
while True:
#获取距离值
distance=sonar.distance
#基础信息显示
display.fill(0) #清屏
display.text('01Studio', 0,0, 1,font_name='font5x8.bin')
display.text('Distance Test', 0,15, 1,font_name='font5x8.bin')
#距离信息显示
display.text(str(round(distance,1))+' CM', 0,40, 1,font_name='font5x8.bin')
display.show()
print(str(round(distance,1))+' CM')
time.sleep(0.2) #检测周期0.2秒
|
the-stack_0_3398 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 10:13:43 2018
@author: Stamatis Lefkimmiatis
@email : [email protected]
"""
import argparse
import os.path
import torch as th
from pydl.networks.ResDNet.net import ResDNet_denoise
from pydl.utils import psnr
from pydl.datasets.BSDS import BSDS68
from torch.utils.data import DataLoader
def tupleOfData(s,dtype):
if s.find('(',0,1) > -1: # If the first character of the string is '(' then
# this is a tuple and we keep only the substring with the values
# separated by commas, i.e., s[1:-1]. Then we create a list that holds
# the characters which corresponds to the entries of the tuple by using
# s[1:-1].split(',')
s = tuple(dtype(i) for i in s[1:-1].replace(" ","").split(',') if i!="")
else:
s = dtype(s)
return s
tupleOfInts = lambda s: tupleOfData(s,int)
tupleOfFloats = lambda s: tupleOfData(s,float)
parser = argparse.ArgumentParser(description='Validation of ResDNet in BSDS68')
parser.add_argument('--stdn', type = tupleOfFloats, default='(5,10,15,20,25,30,35,40,45,50,55)', help=" Number of noise levels (standard deviation) for which the network will be validated.")
parser.add_argument('--color', action='store_true', help="Type of images used to validate the network.")
parser.add_argument('--seed', type = int, default = 20151909, help='random seed to use for generating the noisy images.')
parser.add_argument('--batchSize', type = int, default = 64, help='validation batch size.')
parser.add_argument('--threads', type = int, default = 4, help='number of threads for data loader to use.')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--gpu_device', type = int, default = 0, help='which gpu to use?')
opt = parser.parse_args()
print('========= Selected validation parameters =============')
print(opt)
print('======================================================')
print('\n')
if opt.cuda:
if opt.gpu_device != th.cuda.current_device()\
and (opt.gpu_device >= 0 and opt.gpu_device < th.cuda.device_count()):
print("===> Setting GPU device {}".format(opt.gpu_device))
th.cuda.set_device(opt.gpu_device)
val_tall_set = BSDS68(opt.stdn,random_seed=opt.seed,tall=True,color=opt.color)
val_wide_set = BSDS68(opt.stdn,random_seed=opt.seed,tall=False,color=opt.color)
Nstdn = len(opt.stdn)
Ntall = len(val_tall_set.img_gt)
Nwide = len(val_wide_set.img_gt)
dataLoader_tall = DataLoader(dataset=val_tall_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
dataLoader_wide = DataLoader(dataset=val_wide_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
ptable_tall = th.ones(Ntall*Nstdn,2)
ptable_wide = th.ones(Nwide*Nstdn,2)
for i, batch in enumerate(dataLoader_tall, 0):
input, target, sigma = batch[0], batch[1], batch[2]
start = i*opt.batchSize
end = min((i+1)*opt.batchSize,Ntall*Nstdn)
if opt.cuda:
input = input.cuda()
target = target.cuda()
sigma = sigma.cuda()
out = ResDNet_denoise(input,sigma)
ptable_tall[start:end:1,0]= psnr(input,target)
ptable_tall[start:end:1,1]= psnr(out,target)
del out,input,target,sigma
ptable_tall = ptable_tall.t().contiguous().view(2,Nstdn,Ntall).permute(2,1,0)
for i, batch in enumerate(dataLoader_wide, 0):
input, target, sigma = batch[0], batch[1], batch[2]
start = i*opt.batchSize
end = min((i+1)*opt.batchSize,Nwide*Nstdn)
if opt.cuda:
input = input.cuda()
target = target.cuda()
sigma = sigma.cuda()
out = ResDNet_denoise(input,sigma)
ptable_wide[start:end:1,0]= psnr(input,target)
ptable_wide[start:end:1,1]= psnr(out,target)
del out,input,target,sigma
ptable_wide = ptable_wide.t().contiguous().view(2,Nstdn,Nwide).permute(2,1,0)
ptable = th.cat((ptable_tall,ptable_wide),dim=0)
del ptable_tall,ptable_wide
results = dict.fromkeys(opt.stdn)
for i,stdn in enumerate(opt.stdn,0):
results[stdn] = {"noisy":ptable[:,i,0],"denoised":ptable[:,i,1]}
del ptable
cstr = "color_BSDS68_std:" if opt.color else "gray_BSDS68_std:"
cstr += str(opt.stdn) + ".pth"
currentPath = os.path.dirname(os.path.realpath(__file__))
dirPath = os.path.join(currentPath,'Results')
os.makedirs(dirPath,exist_ok = True)
th.save(results,os.path.join(dirPath,cstr)) |
the-stack_0_3399 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
from azure.cli.core.commands import ExtensionCommandSource
from knack.help import (HelpFile as KnackHelpFile, CommandHelpFile as KnackCommandHelpFile,
GroupHelpFile as KnackGroupHelpFile, ArgumentGroupRegistry as KnackArgumentGroupRegistry,
HelpExample as KnackHelpExample, HelpParameter as KnackHelpParameter,
_print_indent, CLIHelp, HelpAuthoringException)
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
PRIVACY_STATEMENT = """
Welcome to Azure CLI!
---------------------
Use `az -h` to see available commands or go to https://aka.ms/cli.
Telemetry
---------
The Azure CLI collects usage data in order to improve your experience.
The data is anonymous and does not include commandline argument values.
The data is collected by Microsoft.
You can change your telemetry settings with `az configure`.
"""
WELCOME_MESSAGE = r"""
/\
/ \ _____ _ _ ___ _
/ /\ \ |_ / | | | \'__/ _\
/ ____ \ / /| |_| | | | __/
/_/ \_\/___|\__,_|_| \___|
Welcome to the cool new Azure CLI!
Use `az --version` to display the current version.
Here are the base commands:
"""
# PrintMixin class to decouple printing functionality from AZCLIHelp class.
# Most of these methods override print methods in CLIHelp
class CLIPrintMixin(CLIHelp):
def _print_header(self, cli_name, help_file):
super(CLIPrintMixin, self)._print_header(cli_name, help_file)
links = help_file.links
if links:
link_text = "{} and {}".format(", ".join([link["url"] for link in links[0:-1]]),
links[-1]["url"]) if len(links) > 1 else links[0]["url"]
link_text = "For more information, see: {}\n".format(link_text)
_print_indent(link_text, 2, width=self.textwrap_width)
def _print_detailed_help(self, cli_name, help_file):
CLIPrintMixin._print_extensions_msg(help_file)
super(CLIPrintMixin, self)._print_detailed_help(cli_name, help_file)
self._print_az_find_message(help_file.command, self.cli_ctx.enable_color)
@staticmethod
def _get_choices_defaults_sources_str(p):
choice_str = ' Allowed values: {}.'.format(', '.join(sorted([str(x) for x in p.choices]))) \
if p.choices else ''
default_value_source = p.default_value_source if p.default_value_source else 'Default'
default_str = ' {}: {}.'.format(default_value_source, p.default) \
if p.default and p.default != argparse.SUPPRESS else ''
value_sources_str = CLIPrintMixin._process_value_sources(p) if p.value_sources else ''
return '{}{}{}'.format(choice_str, default_str, value_sources_str)
@staticmethod
def _print_examples(help_file):
indent = 0
_print_indent('Examples', indent)
for e in help_file.examples:
indent = 1
_print_indent('{0}'.format(e.short_summary), indent)
indent = 2
if e.long_summary:
_print_indent('{0}'.format(e.long_summary), indent)
_print_indent('{0}'.format(e.command), indent)
print('')
@staticmethod
def _print_az_find_message(command, enable_color):
from colorama import Style
indent = 0
message = 'To search AI knowledge base for examples, use: az find "az {}"'.format(command)
if enable_color:
message = Style.BRIGHT + message + Style.RESET_ALL
_print_indent(message + '\n', indent)
@staticmethod
def _process_value_sources(p):
commands, strings, urls = [], [], []
for item in p.value_sources:
if "string" in item:
strings.append(item["string"])
elif "link" in item and "command" in item["link"]:
commands.append(item["link"]["command"])
elif "link" in item and "url" in item["link"]:
urls.append(item["link"]["url"])
command_str = ' Values from: {}.'.format(", ".join(commands)) if commands else ''
string_str = ' {}'.format(", ".join(strings)) if strings else ''
string_str = string_str + "." if string_str and not string_str.endswith(".") else string_str
urls_str = ' For more info, go to: {}.'.format(", ".join(urls)) if urls else ''
return '{}{}{}'.format(command_str, string_str, urls_str)
@staticmethod
def _print_extensions_msg(help_file):
if help_file.type != 'command':
return
if isinstance(help_file.command_source, ExtensionCommandSource):
logger.warning(help_file.command_source.get_command_warn_msg())
# Extension preview/experimental warning is disabled because it can be confusing when displayed together
# with command or command group preview/experimental warning. See #12556
# # If experimental is true, it overrides preview
# if help_file.command_source.experimental:
# logger.warning(help_file.command_source.get_experimental_warn_msg())
# elif help_file.command_source.preview:
# logger.warning(help_file.command_source.get_preview_warn_msg())
class AzCliHelp(CLIPrintMixin, CLIHelp):
def __init__(self, cli_ctx):
super(AzCliHelp, self).__init__(cli_ctx,
privacy_statement=PRIVACY_STATEMENT,
welcome_message=WELCOME_MESSAGE,
command_help_cls=CliCommandHelpFile,
group_help_cls=CliGroupHelpFile,
help_cls=CliHelpFile)
from knack.help import HelpObject
# TODO: This workaround is used to avoid a bizarre bug in Python 2.7. It
# essentially reassigns Knack's HelpObject._normalize_text implementation
# with an identical implemenation in Az. For whatever reason, this fixes
# the bug in Python 2.7.
@staticmethod
def new_normalize_text(s):
if not s or len(s) < 2:
return s or ''
s = s.strip()
initial_upper = s[0].upper() + s[1:]
trailing_period = '' if s[-1] in '.!?' else '.'
return initial_upper + trailing_period
HelpObject._normalize_text = new_normalize_text # pylint: disable=protected-access
self._register_help_loaders()
self._name_to_content = {}
def show_help(self, cli_name, nouns, parser, is_group):
self.update_loaders_with_help_file_contents(nouns)
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
if not nouns:
help_file.command = ''
else:
AzCliHelp.update_examples(help_file)
self._print_detailed_help(cli_name, help_file)
from azure.cli.core.util import show_updates_available
show_updates_available(new_line_after=True)
show_link = self.cli_ctx.config.getboolean('output', 'show_survey_link', True)
from azure.cli.core.commands.constants import (SURVEY_PROMPT_STYLED, UX_SURVEY_PROMPT_STYLED)
from azure.cli.core.style import print_styled_text
if show_link:
print_styled_text(SURVEY_PROMPT_STYLED)
if not nouns:
print_styled_text(UX_SURVEY_PROMPT_STYLED)
def get_examples(self, command, parser, is_group):
"""Get examples of a certain command from the help file.
Get the text of the example, strip the newline character and
return a list of commands which start with the given command name.
"""
nouns = command.split(' ')[1:]
self.update_loaders_with_help_file_contents(nouns)
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
def strip_command(command):
command = command.replace('\\\n', '')
contents = [item for item in command.split(' ') if item]
return ' '.join(contents).strip()
examples = []
for example in help_file.examples:
if example.command and example.name:
examples.append({
'command': strip_command(example.command),
'description': example.name
})
return examples
def _register_help_loaders(self):
import azure.cli.core._help_loaders as help_loaders
import inspect
def is_loader_cls(cls):
return inspect.isclass(cls) and cls.__name__ != 'BaseHelpLoader' and issubclass(cls, help_loaders.BaseHelpLoader) # pylint: disable=line-too-long
versioned_loaders = {}
for cls_name, loader_cls in inspect.getmembers(help_loaders, is_loader_cls):
loader = loader_cls(self)
versioned_loaders[cls_name] = loader
if len(versioned_loaders) != len({ldr.version for ldr in versioned_loaders.values()}):
ldrs_str = " ".join("{}-version:{}".format(cls_name, ldr.version) for cls_name, ldr in versioned_loaders.items()) # pylint: disable=line-too-long
raise CLIError("Two loaders have the same version. Loaders:\n\t{}".format(ldrs_str))
self.versioned_loaders = versioned_loaders
def update_loaders_with_help_file_contents(self, nouns):
loader_file_names_dict = {}
file_name_set = set()
for ldr_cls_name, loader in self.versioned_loaders.items():
new_file_names = loader.get_noun_help_file_names(nouns) or []
loader_file_names_dict[ldr_cls_name] = new_file_names
file_name_set.update(new_file_names)
for file_name in file_name_set:
if file_name not in self._name_to_content:
with open(file_name, 'r') as f:
self._name_to_content[file_name] = f.read()
for ldr_cls_name, file_names in loader_file_names_dict.items():
file_contents = {}
for name in file_names:
file_contents[name] = self._name_to_content[name]
self.versioned_loaders[ldr_cls_name].update_file_contents(file_contents)
# This method is meant to be a hook that can be overridden by an extension or module.
@staticmethod
def update_examples(help_file):
pass
class CliHelpFile(KnackHelpFile):
def __init__(self, help_ctx, delimiters):
# Each help file (for a command or group) has a version denoting the source of its data.
super(CliHelpFile, self).__init__(help_ctx, delimiters)
self.links = []
def _should_include_example(self, ex):
supported_profiles = ex.get('supported-profiles')
unsupported_profiles = ex.get('unsupported-profiles')
if all((supported_profiles, unsupported_profiles)):
raise HelpAuthoringException("An example cannot have both supported-profiles and unsupported-profiles.")
if supported_profiles:
supported_profiles = [profile.strip() for profile in supported_profiles.split(',')]
return self.help_ctx.cli_ctx.cloud.profile in supported_profiles
if unsupported_profiles:
unsupported_profiles = [profile.strip() for profile in unsupported_profiles.split(',')]
return self.help_ctx.cli_ctx.cloud.profile not in unsupported_profiles
return True
# Needs to override base implementation to exclude unsupported examples.
def _load_from_data(self, data):
if not data:
return
if isinstance(data, str):
self.long_summary = data
return
if 'type' in data:
self.type = data['type']
if 'short-summary' in data:
self.short_summary = data['short-summary']
self.long_summary = data.get('long-summary')
if 'examples' in data:
self.examples = []
for d in data['examples']:
if self._should_include_example(d):
self.examples.append(HelpExample(**d))
def load(self, options):
ordered_loaders = sorted(self.help_ctx.versioned_loaders.values(), key=lambda ldr: ldr.version)
for loader in ordered_loaders:
loader.versioned_load(self, options)
class CliGroupHelpFile(KnackGroupHelpFile, CliHelpFile):
def load(self, options):
# forces class to use this load method even if KnackGroupHelpFile overrides CliHelpFile's method.
CliHelpFile.load(self, options)
class CliCommandHelpFile(KnackCommandHelpFile, CliHelpFile):
def __init__(self, help_ctx, delimiters, parser):
super(CliCommandHelpFile, self).__init__(help_ctx, delimiters, parser)
self.type = 'command'
self.command_source = getattr(parser, 'command_source', None)
self.parameters = []
for action in [a for a in parser._actions if a.help != argparse.SUPPRESS]: # pylint: disable=protected-access
if action.option_strings:
self._add_parameter_help(action)
else:
# use metavar for positional parameters
param_kwargs = {
'name_source': [action.metavar or action.dest],
'deprecate_info': getattr(action, 'deprecate_info', None),
'preview_info': getattr(action, 'preview_info', None),
'experimental_info': getattr(action, 'experimental_info', None),
'default_value_source': getattr(action, 'default_value_source', None),
'description': action.help,
'choices': action.choices,
'required': False,
'default': None,
'group_name': 'Positional'
}
self.parameters.append(HelpParameter(**param_kwargs))
help_param = next(p for p in self.parameters if p.name == '--help -h')
help_param.group_name = 'Global Arguments'
# update parameter type so we can use overriden update_from_data method to update value sources.
for param in self.parameters:
param.__class__ = HelpParameter
def _load_from_data(self, data):
super(CliCommandHelpFile, self)._load_from_data(data)
if isinstance(data, str) or not self.parameters or not data.get('parameters'):
return
loaded_params = []
loaded_param = {}
for param in self.parameters:
loaded_param = next((n for n in data['parameters'] if n['name'] == param.name), None)
if loaded_param:
param.update_from_data(loaded_param)
loaded_params.append(param)
self.parameters = loaded_params
def load(self, options):
# forces class to use this load method even if KnackCommandHelpFile overrides CliHelpFile's method.
CliHelpFile.load(self, options)
class ArgumentGroupRegistry(KnackArgumentGroupRegistry): # pylint: disable=too-few-public-methods
def __init__(self, group_list):
super(ArgumentGroupRegistry, self).__init__(group_list)
self.priorities = {
None: 0,
'Resource Id Arguments': 1,
'Generic Update Arguments': 998,
'Global Arguments': 1000,
}
priority = 2
# any groups not already in the static dictionary should be prioritized alphabetically
other_groups = [g for g in sorted(list(set(group_list))) if g not in self.priorities]
for group in other_groups:
self.priorities[group] = priority
priority += 1
class HelpExample(KnackHelpExample): # pylint: disable=too-few-public-methods
def __init__(self, **_data):
# Old attributes
_data['name'] = _data.get('name', '')
_data['text'] = _data.get('text', '')
super(HelpExample, self).__init__(_data)
self.name = _data.get('summary', '') if _data.get('summary', '') else self.name
self.text = _data.get('command', '') if _data.get('command', '') else self.text
self.long_summary = _data.get('description', '')
self.supported_profiles = _data.get('supported-profiles', None)
self.unsupported_profiles = _data.get('unsupported-profiles', None)
# alias old params with new
@property
def short_summary(self):
return self.name
@short_summary.setter
def short_summary(self, value):
self.name = value
@property
def command(self):
return self.text
@command.setter
def command(self, value):
self.text = value
class HelpParameter(KnackHelpParameter): # pylint: disable=too-many-instance-attributes
def __init__(self, **kwargs):
super(HelpParameter, self).__init__(**kwargs)
def update_from_data(self, data):
super(HelpParameter, self).update_from_data(data)
# original help.py value_sources are strings, update command strings to value-source dict
if self.value_sources:
self.value_sources = [str_or_dict if isinstance(str_or_dict, dict) else {"link": {"command": str_or_dict}}
for str_or_dict in self.value_sources]
|
the-stack_0_3400 | # -*- coding: UTF-8 -*-
import os
from setuptools import setup
from typing import List
def _get_relative_path(file_path: str) -> str:
return os.path.join(os.path.dirname(__file__), file_path)
def load_requirements() -> List[str]:
# Load requirements
requirements = [] # type: List[str]
with open(_get_relative_path("requirements.txt"), "r") as req_file:
lines = [line.rstrip("\n") for line in req_file]
lines = list(filter(lambda line: line != "" and line[0] != "#", lines))
for line in lines:
hash_pos = line.find("#")
if hash_pos != -1:
requirements.append(line[:hash_pos].strip())
else:
requirements.append(line)
return requirements
def main():
drive_ns = {}
with open(_get_relative_path("drive/__version__.py")) as f:
exec(f.read(), drive_ns)
setup(
name='drive',
version=drive_ns["__version__"],
author='Baptiste Fontaine',
author_email='[email protected]',
packages=['drive', 'drive.cli'],
url='https://github.com/NoName115/drive',
license='MIT License',
description='Google Drive client',
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
install_requires=load_requirements(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
entry_points={
"console_scripts": [
"gd-upload=drive.cli.upload:main",
"gd-download=drive.cli.download:main",
]
}
)
if __name__ == "__main__":
main()
|
the-stack_0_3403 | """
/*********************************************************************************/
* The MIT License (MIT) *
* *
* Copyright (c) 2014 EOX IT Services GmbH *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in *
* all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
* *
*********************************************************************************/
"""
import os
import logging
import pprint
from datetime import datetime, timedelta
from airflow import DAG
from airflow.models import XCOM_RETURN_KEY
from airflow.operators import PythonOperator
from airflow.operators import RSYNCOperator
from airflow.operators import DHUSSearchOperator
from airflow.operators import DHUSDownloadOperator
from airflow.operators import ZipInspector
from airflow.operators import S1MetadataOperator
from airflow.operators import GDALWarpOperator
from airflow.operators import GDALAddoOperator
from airflow.utils.trigger_rule import TriggerRule
from geoserver_plugin import publish_product
import config as CFG
import config.s1_grd_1sdv as S1GRD1SDV
log = logging.getLogger(__name__)
# Settings
default_args = {
##################################################
# General configuration
#
'start_date': datetime.now() - timedelta(hours=1),
'owner': 'airflow',
'depends_on_past': False,
'provide_context': True,
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'max_threads': 1,
'max_active_runs': 1,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
#
}
print("#######################")
print("Interval: ".format(S1GRD1SDV.dag_schedule_interval))
print("ID: {}".format(S1GRD1SDV.id))
print("DHUS: {} @ {}, Region: {}".format(CFG.dhus_username, CFG.dhus_url, S1GRD1SDV.dhus_search_bbox) )
print("GeoServer: {} @ {}".format(CFG.geoserver_username, CFG.geoserver_rest_url) )
print("RSYNC: {} @ {} using {}".format(CFG.rsync_username, CFG.rsync_hostname, CFG.rsync_ssh_key))
print("Date: {} / {}".format(S1GRD1SDV.dhus_search_startdate, S1GRD1SDV.dhus_search_enddate))
print("Search: max={}, order_by={}, keywords={}".format(S1GRD1SDV.dhus_filter_max, S1GRD1SDV.dhus_search_orderby,S1GRD1SDV.dhus_search_keywords))
print("Paths:\n collection_dir={}\n download_dir={}\n process_dir={}\n original_package_upload_dir={}\n repository_dir={}".format(S1GRD1SDV.collection_dir, S1GRD1SDV.download_dir, S1GRD1SDV.process_dir, S1GRD1SDV.original_package_upload_dir, S1GRD1SDV.repository_dir))
print("Collection:\n workspace={}\n layer={}".format(S1GRD1SDV.geoserver_workspace, S1GRD1SDV.geoserver_layer))
print("#######################")
TARGET_SRS = 'EPSG:4326'
TILE_SIZE = 512
OVERWRITE = True
RESAMPLING_METHOD = 'average'
MAX_OVERVIEW_LEVEL = 512
def prepare_band_paths(get_inputs_from, *args, **kwargs):
"""Get Product / Band files path Dictionary from ZipInspector and extract the list of band files """
task_instance = kwargs['ti']
# band number from task name
task_id = task_instance.task_id
band_number = int(task_id.split('_')[-1])
log.info("Getting inputs from: " + get_inputs_from)
product_bands_dict = task_instance.xcom_pull(task_ids=get_inputs_from, key=XCOM_RETURN_KEY)
if product_bands_dict is None:
log.info("No input from ZipInspector. Nothing to do")
return None
log.info("Product Band Dictionary: {}".format(pprint.pformat(product_bands_dict)))
files_path=[]
for k in product_bands_dict:
files_path += product_bands_dict[k]
# Push one of the band paths to XCom
file_path = files_path[band_number - 1]
return [file_path]
# DAG definition
dag = DAG(S1GRD1SDV.id,
description='DAG for searching, filtering and downloading Sentinel 1 data from DHUS server',
schedule_interval=S1GRD1SDV.dag_schedule_interval,
catchup=False,
default_args=default_args
)
# DHUS Search Task Operator
search_task = DHUSSearchOperator(task_id='search_product_task',
dhus_url=CFG.dhus_url,
dhus_user=CFG.dhus_username,
dhus_pass=CFG.dhus_password,
geojson_bbox=S1GRD1SDV.dhus_search_bbox,
startdate=S1GRD1SDV.dhus_search_startdate,
enddate=S1GRD1SDV.dhus_search_enddate,
filter_max=S1GRD1SDV.dhus_filter_max,
order_by=S1GRD1SDV.dhus_search_orderby,
keywords=S1GRD1SDV.dhus_search_keywords,
dag=dag)
# DHUS Download Task Operator
download_task = DHUSDownloadOperator(task_id='download_product_task',
dhus_url=CFG.dhus_url,
dhus_user=CFG.dhus_username,
dhus_pass=CFG.dhus_password,
download_max=S1GRD1SDV.dhus_download_max,
download_dir=S1GRD1SDV.download_dir,
get_inputs_from=search_task.task_id,
download_timeout=timedelta(hours=12),
dag=dag)
# Rsync Archive Task for Products
archive_task = RSYNCOperator(task_id="upload_original_package",
host = CFG.rsync_hostname,
remote_usr = CFG.rsync_username,
ssh_key_file = CFG.rsync_ssh_key,
remote_dir = S1GRD1SDV.original_package_upload_dir,
get_inputs_from=download_task.task_id,
dag=dag)
# Zip Inspector and Extractor Task
zip_task = ZipInspector(task_id='zip_inspector',
extension_to_search='tiff',
get_inputs_from=download_task.task_id,
dag=dag)
warp_tasks = []
addo_tasks = []
upload_tasks = []
band_paths_tasks = []
for i in range(1, 3):
band_paths = PythonOperator(task_id="get_band_paths_" + str(i),
python_callable=prepare_band_paths,
op_kwargs={
'get_inputs_from': zip_task.task_id
},
dag=dag)
band_paths_tasks.append(band_paths)
warp = GDALWarpOperator(
task_id='gdalwarp_' + str(i),
target_srs=TARGET_SRS,
tile_size=TILE_SIZE,
overwrite=OVERWRITE,
dstdir=S1GRD1SDV.process_dir,
get_inputs_from=band_paths.task_id,
dag=dag
)
warp_tasks.append(warp)
addo = GDALAddoOperator(
trigger_rule=TriggerRule.ALL_SUCCESS,
resampling_method=RESAMPLING_METHOD,
max_overview_level=MAX_OVERVIEW_LEVEL,
task_id='gdal_addo_' + str(i),
get_inputs_from=warp.task_id,
dag=dag
)
addo_tasks.append(addo)
upload = RSYNCOperator(task_id="upload_granule_{}_task".format(str(i)),
host=CFG.rsync_hostname,
remote_usr=CFG.rsync_username,
ssh_key_file=CFG.rsync_ssh_key,
remote_dir=S1GRD1SDV.repository_dir,
get_inputs_from=addo.task_id,
dag=dag)
upload_tasks.append(upload)
band_paths.set_upstream(zip_task)
warp.set_upstream(band_paths)
addo.set_upstream(warp)
upload.set_upstream(addo)
# Metadata Extraction task
addo_task_ids = ( task.task_id for task in addo_tasks )
upload_task_ids = ( task.task_id for task in upload_tasks )
metadata_task = S1MetadataOperator(task_id="extract_metadata_task",
product_safe_path=None,
granules_paths=None,
granules_upload_dir=S1GRD1SDV.repository_dir,
processing_dir=S1GRD1SDV.process_dir,
original_package_download_base_url=S1GRD1SDV.original_package_download_base_url,
gs_workspace=S1GRD1SDV.geoserver_workspace,
bands_dict = S1GRD1SDV.bands_dict,
gs_wms_layer=S1GRD1SDV.geoserver_layer,
gs_wfs_featuretype=S1GRD1SDV.geoserver_featuretype,
gs_wfs_format=S1GRD1SDV.geoserver_oseo_wfs_format,
gs_wfs_version=S1GRD1SDV.geoserver_oseo_wfs_version,
gs_wms_width=S1GRD1SDV.geoserver_oseo_wms_width,
gs_wms_height=S1GRD1SDV.geoserver_oseo_wms_height,
gs_wms_format=S1GRD1SDV.geoserver_oseo_wms_format,
gs_wms_version=S1GRD1SDV.geoserver_oseo_wms_version,
gs_wcs_coverage_id=S1GRD1SDV.geoserver_coverage,
gs_wcs_scale_i=S1GRD1SDV.geoserver_oseo_wcs_scale_i,
gs_wcs_scale_j=S1GRD1SDV.geoserver_oseo_wcs_scale_j,
gs_wcs_format=S1GRD1SDV.geoserver_oseo_wcs_format,
gs_wcs_version=S1GRD1SDV.geoserver_oseo_wcs_version,
get_inputs_from = {
'download_task_id': download_task.task_id,
'addo_task_ids': addo_task_ids,
'upload_task_ids': upload_task_ids,
'archive_product_task_id' : archive_task.task_id,
},
dag=dag)
# Publish product.zip to GeoServer
publish_task = PythonOperator(task_id="publish_product_task",
python_callable=publish_product,
op_kwargs={
'geoserver_username': CFG.geoserver_username,
'geoserver_password': CFG.geoserver_password,
'geoserver_rest_endpoint': '{}/oseo/collections/{}/products'.format(CFG.geoserver_rest_url, S1GRD1SDV.geoserver_oseo_collection), 'get_inputs_from': metadata_task.task_id,
},
dag = dag)
if CFG.eoxserver_rest_url:
publish_eox_task = PythonOperator(task_id="publish_product_eox_task",
python_callable=publish_product,
op_kwargs={
'geoserver_username': CFG.eoxserver_username,
'geoserver_password': CFG.eoxserver_password,
'geoserver_rest_endpoint': CFG.eoxserver_rest_url,
'get_inputs_from': metadata_task.task_id,
},
dag = dag)
download_task.set_upstream(search_task)
archive_task.set_upstream(download_task)
zip_task.set_upstream(download_task)
metadata_task.set_upstream(download_task)
metadata_task.set_upstream(archive_task)
for task in upload_tasks:
metadata_task.set_upstream(task)
publish_task.set_upstream(metadata_task)
if CFG.eoxserver_rest_url:
publish_eox_task.set_upstream(metadata_task)
|
the-stack_0_3404 | # -*- coding: utf-8 -*-
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import adam_v2
EPISODES = 1000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=adam_v2.Adam(lr=self.learning_rate))
return model
def memorize(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
# agent.load("./save/cartpole-dqn.h5")
done = False
batch_size = 4
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
# env.render()
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
agent.memorize(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, score: {}, e: {:.2}"
.format(e, EPISODES, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
if e % 10 == 0:
agent.save("./save/cartpole-dqn.h5") |
the-stack_0_3407 | import logging
import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Tuple
from blspy import AugSchemeMPL
from dogechia.types.blockchain_format.coin import Coin
from dogechia.types.blockchain_format.program import Program
from dogechia.types.blockchain_format.sized_bytes import bytes32
from dogechia.types.spend_bundle import SpendBundle
from dogechia.types.coin_solution import CoinSolution
from dogechia.util.byte_types import hexstr_to_bytes
from dogechia.util.db_wrapper import DBWrapper
from dogechia.util.hash import std_hash
from dogechia.util.ints import uint32, uint64
from dogechia.wallet.cc_wallet import cc_utils
from dogechia.wallet.cc_wallet.cc_utils import CC_MOD, SpendableCC, spend_bundle_for_spendable_ccs, uncurry_cc
from dogechia.wallet.cc_wallet.cc_wallet import CCWallet
from dogechia.wallet.puzzles.genesis_by_coin_id_with_0 import genesis_coin_id_for_genesis_coin_checker
from dogechia.wallet.trade_record import TradeRecord
from dogechia.wallet.trading.trade_status import TradeStatus
from dogechia.wallet.trading.trade_store import TradeStore
from dogechia.wallet.transaction_record import TransactionRecord
from dogechia.wallet.util.trade_utils import (
get_discrepancies_for_spend_bundle,
get_output_amount_for_puzzle_and_solution,
get_output_discrepancy_for_puzzle_and_solution,
)
from dogechia.wallet.util.transaction_type import TransactionType
from dogechia.wallet.util.wallet_types import WalletType
from dogechia.wallet.wallet import Wallet
from dogechia.wallet.wallet_coin_record import WalletCoinRecord
class TradeManager:
wallet_state_manager: Any
log: logging.Logger
trade_store: TradeStore
@staticmethod
async def create(
wallet_state_manager: Any,
db_wrapper: DBWrapper,
name: str = None,
):
self = TradeManager()
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.trade_store = await TradeStore.create(db_wrapper)
return self
async def get_offers_with_status(self, status: TradeStatus) -> List[TradeRecord]:
records = await self.trade_store.get_trade_record_with_status(status)
return records
async def get_coins_of_interest(
self,
) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:
"""
Returns list of coins we want to check if they are included in filter,
These will include coins that belong to us and coins that that on other side of treade
"""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
removals = {}
additions = {}
for trade in all_pending:
for coin in trade.removals:
removals[coin.name()] = coin
for coin in trade.additions:
additions[coin.name()] = coin
return removals, additions
async def get_trade_by_coin(self, coin: Coin) -> Optional[TradeRecord]:
all_trades = await self.get_all_trades()
for trade in all_trades:
if trade.status == TradeStatus.CANCELED.value:
continue
if coin in trade.removals:
return trade
if coin in trade.additions:
return trade
return None
async def coins_of_interest_farmed(self, removals: List[Coin], additions: List[Coin], height: uint32):
"""
If both our coins and other coins in trade got removed that means that trade was successfully executed
If coins from other side of trade got farmed without ours, that means that trade failed because either someone
else completed trade or other side of trade canceled the trade by doing a spend.
If our coins got farmed but coins from other side didn't, we successfully canceled trade by spending inputs.
"""
removal_dict = {}
addition_dict = {}
checked: Dict[bytes32, Coin] = {}
for coin in removals:
removal_dict[coin.name()] = coin
for coin in additions:
addition_dict[coin.name()] = coin
all_coins = []
all_coins.extend(removals)
all_coins.extend(additions)
for coin in all_coins:
if coin.name() in checked:
continue
trade = await self.get_trade_by_coin(coin)
if trade is None:
self.log.error(f"Coin: {Coin}, not in any trade")
continue
# Check if all coins that are part of the trade got farmed
# If coin is missing, trade failed
failed = False
for removed_coin in trade.removals:
if removed_coin.name() not in removal_dict:
self.log.error(f"{removed_coin} from trade not removed")
failed = True
checked[removed_coin.name()] = removed_coin
for added_coin in trade.additions:
if added_coin.name() not in addition_dict:
self.log.error(f"{added_coin} from trade not added")
failed = True
checked[coin.name()] = coin
if failed is False:
# Mark this trade as successful
await self.trade_store.set_status(trade.trade_id, TradeStatus.CONFIRMED, True, height)
self.log.info(f"Trade with id: {trade.trade_id} confirmed at height: {height}")
else:
# Either we canceled this trade or this trade failed
if trade.status == TradeStatus.PENDING_CANCEL.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.CANCELED, True)
self.log.info(f"Trade with id: {trade.trade_id} canceled at height: {height}")
elif trade.status == TradeStatus.PENDING_CONFIRM.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.FAILED, True)
self.log.warning(f"Trade with id: {trade.trade_id} failed at height: {height}")
async def get_locked_coins(self, wallet_id: int = None) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a dictionary of confirmed coins that are locked by a trade."""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
if len(all_pending) == 0:
return {}
result = {}
for trade_offer in all_pending:
if trade_offer.tx_spend_bundle is None:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.spend_bundle)
else:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.tx_spend_bundle)
for name, record in locked.items():
if wallet_id is None or record.wallet_id == wallet_id:
result[name] = record
return result
async def get_all_trades(self):
all: List[TradeRecord] = await self.trade_store.get_all_trades()
return all
async def get_trade_by_id(self, trade_id: bytes) -> Optional[TradeRecord]:
record = await self.trade_store.get_trade_record(trade_id)
return record
async def get_locked_coins_in_spend_bundle(self, bundle: SpendBundle) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a list of coin records that are used in this SpendBundle"""
result = {}
removals = bundle.removals()
for coin in removals:
coin_record = await self.wallet_state_manager.coin_store.get_coin_record(coin.name())
if coin_record is None:
continue
result[coin_record.name()] = coin_record
return result
async def cancel_pending_offer(self, trade_id: bytes32):
await self.trade_store.set_status(trade_id, TradeStatus.CANCELED, False)
async def cancel_pending_offer_safely(self, trade_id: bytes32):
"""This will create a transaction that includes coins that were offered"""
self.log.info(f"Secure-Cancel pending offer with id trade_id {trade_id.hex()}")
trade = await self.trade_store.get_trade_record(trade_id)
if trade is None:
return None
all_coins = trade.removals
for coin in all_coins:
wallet = await self.wallet_state_manager.get_wallet_for_coin(coin.name())
if wallet is None:
continue
new_ph = await wallet.get_new_puzzlehash()
if wallet.type() == WalletType.COLOURED_COIN.value:
tx = await wallet.generate_signed_transaction(
[coin.amount], [new_ph], 0, coins={coin}, ignore_max_send_amount=True
)
else:
tx = await wallet.generate_signed_transaction(
coin.amount, new_ph, 0, coins={coin}, ignore_max_send_amount=True
)
await self.wallet_state_manager.add_pending_transaction(tx_record=tx)
await self.trade_store.set_status(trade_id, TradeStatus.PENDING_CANCEL, False)
return None
async def save_trade(self, trade: TradeRecord):
await self.trade_store.add_trade_record(trade, False)
async def create_offer_for_ids(
self, offer: Dict[int, int], file_name: str
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
success, trade_offer, error = await self._create_offer_for_ids(offer)
if success is True and trade_offer is not None:
self.write_offer_to_disk(Path(file_name), trade_offer)
await self.save_trade(trade_offer)
return success, trade_offer, error
async def _create_offer_for_ids(self, offer: Dict[int, int]) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
"""
Offer is dictionary of wallet ids and amount
"""
spend_bundle = None
try:
for id in offer.keys():
amount = offer[id]
wallet_id = uint32(int(id))
wallet = self.wallet_state_manager.wallets[wallet_id]
if isinstance(wallet, CCWallet):
balance = await wallet.get_confirmed_balance()
if balance < abs(amount) and amount < 0:
raise Exception(f"insufficient funds in wallet {wallet_id}")
if amount > 0:
if spend_bundle is None:
to_exclude: List[Coin] = []
else:
to_exclude = spend_bundle.removals()
zero_spend_bundle: SpendBundle = await wallet.generate_zero_val_coin(False, to_exclude)
if spend_bundle is None:
spend_bundle = zero_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, zero_spend_bundle])
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
zero_val_coin: Optional[Coin] = None
for add in additions:
if add not in removals and add.amount == 0:
zero_val_coin = add
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount, zero_val_coin)
else:
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount)
elif isinstance(wallet, Wallet):
if spend_bundle is None:
to_exclude = []
else:
to_exclude = spend_bundle.removals()
new_spend_bundle = await wallet.create_spend_bundle_relative_dogechia(amount, to_exclude)
else:
return False, None, "unsupported wallet type"
if new_spend_bundle is None or new_spend_bundle.removals() == []:
raise Exception(f"Wallet {id} was unable to create offer.")
if spend_bundle is None:
spend_bundle = new_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
if spend_bundle is None:
return False, None, None
now = uint64(int(time.time()))
trade_offer: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=None,
created_at_time=now,
my_offer=True,
sent=uint32(0),
spend_bundle=spend_bundle,
tx_spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_ACCEPT.value),
sent_to=[],
)
return True, trade_offer, None
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with creating trade offer: {type(e)}{tb}")
return False, None, str(e)
def write_offer_to_disk(self, file_path: Path, offer: TradeRecord):
if offer is not None:
file_path.write_text(bytes(offer).hex())
async def get_discrepancies_for_offer(self, file_path: Path) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
self.log.info(f"trade offer: {file_path}")
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(bytes.fromhex(trade_offer_hex))
return get_discrepancies_for_spend_bundle(trade_offer.spend_bundle)
async def get_inner_puzzle_for_puzzle_hash(self, puzzle_hash) -> Program:
info = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(puzzle_hash.hex())
assert info is not None
puzzle = self.wallet_state_manager.main_wallet.puzzle_for_pk(bytes(info.pubkey))
return puzzle
async def maybe_create_wallets_for_offer(self, file_path: Path) -> bool:
success, result, error = await self.get_discrepancies_for_offer(file_path)
if not success or result is None:
return False
for key, value in result.items():
wsm = self.wallet_state_manager
wallet: Wallet = wsm.main_wallet
if key == "dogechia":
continue
self.log.info(f"value is {key}")
exists = await wsm.get_wallet_for_colour(key)
if exists is not None:
continue
await CCWallet.create_wallet_for_cc(wsm, wallet, key)
return True
async def respond_to_offer(self, file_path: Path) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
has_wallets = await self.maybe_create_wallets_for_offer(file_path)
if not has_wallets:
return False, None, "Unknown Error"
trade_offer = None
try:
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(hexstr_to_bytes(trade_offer_hex))
except Exception as e:
return False, None, f"Error: {e}"
if trade_offer is not None:
offer_spend_bundle: SpendBundle = trade_offer.spend_bundle
coinsols: List[CoinSolution] = [] # [] of CoinSolutions
cc_coinsol_outamounts: Dict[bytes32, List[Tuple[CoinSolution, int]]] = dict()
aggsig = offer_spend_bundle.aggregated_signature
cc_discrepancies: Dict[bytes32, int] = dict()
dogechia_discrepancy = None
wallets: Dict[bytes32, Any] = dict() # colour to wallet dict
for coinsol in offer_spend_bundle.coin_solutions:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
colour = bytes(genesis_checker).hex()
if colour not in wallets:
wallets[colour] = await self.wallet_state_manager.get_wallet_for_colour(colour)
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(wallets[colour].id())
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
# Store coinsol and output amount for later
if colour in cc_coinsol_outamounts:
cc_coinsol_outamounts[colour].append((coinsol, total))
else:
cc_coinsol_outamounts[colour] = [(coinsol, total)]
else:
# standard dogechia coin
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(1)
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
if dogechia_discrepancy is None:
dogechia_discrepancy = get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
else:
dogechia_discrepancy += get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
coinsols.append(coinsol)
dogechia_spend_bundle: Optional[SpendBundle] = None
if dogechia_discrepancy is not None:
dogechia_spend_bundle = await self.wallet_state_manager.main_wallet.create_spend_bundle_relative_dogechia(
dogechia_discrepancy, []
)
if dogechia_spend_bundle is not None:
for coinsol in coinsols:
dogechia_spend_bundle.coin_solutions.append(coinsol)
zero_spend_list: List[SpendBundle] = []
spend_bundle = None
# create coloured coin
self.log.info(cc_discrepancies)
for colour in cc_discrepancies.keys():
if cc_discrepancies[colour] < 0:
my_cc_spends = await wallets[colour].select_coins(abs(cc_discrepancies[colour]))
else:
if dogechia_spend_bundle is None:
to_exclude: List = []
else:
to_exclude = dogechia_spend_bundle.removals()
my_cc_spends = await wallets[colour].select_coins(0)
if my_cc_spends is None or my_cc_spends == set():
zero_spend_bundle: SpendBundle = await wallets[colour].generate_zero_val_coin(False, to_exclude)
if zero_spend_bundle is None:
return (
False,
None,
"Unable to generate zero value coin. Confirm that you have dogechia available",
)
zero_spend_list.append(zero_spend_bundle)
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
my_cc_spends = set()
for add in additions:
if add not in removals and add.amount == 0:
my_cc_spends.add(add)
if my_cc_spends == set() or my_cc_spends is None:
return False, None, "insufficient funds"
# Create SpendableCC list and innersol_list with both my coins and the offered coins
# Firstly get the output coin
my_output_coin = my_cc_spends.pop()
spendable_cc_list = []
innersol_list = []
genesis_id = genesis_coin_id_for_genesis_coin_checker(Program.from_bytes(bytes.fromhex(colour)))
# Make the rest of the coins assert the output coin is consumed
for coloured_coin in my_cc_spends:
inner_solution = self.wallet_state_manager.main_wallet.make_solution(consumed=[my_output_coin.name()])
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(coloured_coin.puzzle_hash)
assert inner_puzzle is not None
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, coloured_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(coloured_coin)
spendable_cc_list.append(SpendableCC(coloured_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Create SpendableCC for each of the coloured coins received
for cc_coinsol_out in cc_coinsol_outamounts[colour]:
cc_coinsol = cc_coinsol_out[0]
puzzle = Program.from_bytes(bytes(cc_coinsol.puzzle_reveal))
solution = Program.from_bytes(bytes(cc_coinsol.solution))
r = uncurry_cc(puzzle)
if r:
mod_hash, genesis_coin_checker, inner_puzzle = r
inner_solution = solution.first()
lineage_proof = solution.rest().rest().first()
spendable_cc_list.append(SpendableCC(cc_coinsol.coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Finish the output coin SpendableCC with new information
newinnerpuzhash = await wallets[colour].get_new_inner_hash()
outputamount = sum([c.amount for c in my_cc_spends]) + cc_discrepancies[colour] + my_output_coin.amount
inner_solution = self.wallet_state_manager.main_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": outputamount}]
)
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(my_output_coin.puzzle_hash)
assert inner_puzzle is not None
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(my_output_coin)
spendable_cc_list.append(SpendableCC(my_output_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, my_output_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
if spend_bundle is None:
spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
else:
new_spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
# reset sigs and aggsig so that they aren't included next time around
sigs = []
aggsig = AugSchemeMPL.aggregate(sigs)
my_tx_records = []
if zero_spend_list is not None and spend_bundle is not None:
zero_spend_list.append(spend_bundle)
spend_bundle = SpendBundle.aggregate(zero_spend_list)
if spend_bundle is None:
return False, None, "spend_bundle missing"
# Add transaction history for this trade
now = uint64(int(time.time()))
if dogechia_spend_bundle is not None:
spend_bundle = SpendBundle.aggregate([spend_bundle, dogechia_spend_bundle])
if dogechia_discrepancy < 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=token_bytes(),
amount=uint64(abs(dogechia_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=dogechia_spend_bundle,
additions=dogechia_spend_bundle.additions(),
removals=dogechia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=dogechia_spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(dogechia_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=dogechia_spend_bundle,
additions=dogechia_spend_bundle.additions(),
removals=dogechia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=dogechia_spend_bundle.name(),
)
my_tx_records.append(tx_record)
for colour, amount in cc_discrepancies.items():
wallet = wallets[colour]
if dogechia_discrepancy > 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=token_bytes(),
)
my_tx_records.append(tx_record)
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(0),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
now = uint64(int(time.time()))
trade_record: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=now,
created_at_time=now,
my_offer=False,
sent=uint32(0),
spend_bundle=offer_spend_bundle,
tx_spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.save_trade(trade_record)
await self.wallet_state_manager.add_pending_transaction(tx_record)
for tx in my_tx_records:
await self.wallet_state_manager.add_transaction(tx)
return True, trade_record, None
|
the-stack_0_3408 | def buddy(start, limit):
for i in range(start, limit):
res=prime(i)
if res>i:
res2=prime(res-1)
if (res2-i)==1:
return [i,res-1]
return "Nothing"
def prime(n):
total=1
for i in range(2, int(n**0.5)+1):
if n%i==0:
total+=(i)
if i==n//i: continue
total+=(n//i)
return total |
the-stack_0_3409 | from typing import Any
import torch
from torch import fx
class NodeProfiler(fx.Interpreter):
"""
This is basically a variant of shape prop in
https://github.com/pytorch/pytorch/blob/74849d9188de30d93f7c523d4eeceeef044147a9/torch/fx/passes/shape_prop.py#L65.
Instead of propagating just the shape, we record all the intermediate node Tensor values.
This is useful to debug some of lowering pass issue where we want to check a specific
tensor value. Note that output value can be tuple(Tensor) as well as Tensor.
"""
def __init__(self, module: fx.GraphModule):
super().__init__(module)
self.execution_time = {}
self.node_map = {}
self.iter = 100
def run_node(self, n: fx.Node) -> Any:
result = super().run_node(n)
if n.op not in {"call_function", "call_method", "call_module"}:
return result
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(self.iter):
result = super().run_node(n)
end_event.record()
torch.cuda.synchronize()
self.execution_time[f"{n.name}"] = (
start_event.elapsed_time(end_event) / self.iter
)
self.node_map[n.name] = n
return result
def propagate(self, *args):
"""
Run `module` via interpretation and return the result and
record the shape and type of each node.
Args:
*args (Tensor): the sample input.
Returns:
Any: The value returned from executing the Module
"""
return super().run(*args)
|
the-stack_0_3412 | import unicodedata
from .add_whitespace_around_character import AddWhitespaceAroundCharacter
class AddWhitespaceAroundPunctuation(AddWhitespaceAroundCharacter):
"""
Recognize punctuation characters and add whitespace around each punctuation character
E.g.
>>> from uttut.pipeline.ops.add_whitespace_around_punctuation import (
AddWhitespaceAroundPunctuation)
>>> op = AddWhitespaceAroundPunctuation()
>>> output_seq, label_aligner = op.transform("GB,薄餡亂入")
>>> output_labels = label_aligner.transform([1, 1, 2, 3, 3, 4, 5])
>>> output_seq
"GB , 薄餡亂入"
>>> output_labels
[1, 1, 0, 2, 0, 3, 3, 4, 5]
>>> label_aligner.inverse_transform(output_labels)
[1, 1, 2, 3, 3, 4, 5]
"""
def _is_valid_char(self, char: str) -> bool:
return is_punctuation(char)
def is_punctuation(char: str) -> bool:
"""Check whether char is a punctuation character.
This code is copied from Bert `tokenization.py`.
We treat all non-letter/number ASCII as punctuation.
Characters such as "^", "$", and "`" are not in the Unicode
Punctuation class but we treat them as punctuation anyways, for
consistency.
"""
code_point = ord(char)
if ((
code_point >= 33 and code_point <= 47) or (
code_point >= 58 and code_point <= 64) or (
code_point >= 91 and code_point <= 96) or (
code_point >= 123 and code_point <= 126)
):
return True
cat = unicodedata.category(char)
# For more details, please take a look at
# https://www.fileformat.info/info/unicode/category/index.htm
if cat.startswith("P"):
return True
return False
|
the-stack_0_3413 | """
Unit tests for Unified/Monitor.py module
Author: Valentin Kuznetsov <vkuznet [AT] gmail [DOT] com>
"""
from __future__ import division, print_function
import time
# system modules
import unittest
from copy import deepcopy
# WMCore modules
from WMCore.MicroService.Unified.MSMonitor import MSMonitor
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMQuality.Emulators.ReqMgrAux.MockReqMgrAux import MockReqMgrAux
class MSMonitorTest(EmulatedUnitTestCase):
"Unit test for Monitor module"
def setUp(self):
"init test class"
self.msConfig = {'verbose': False,
'group': 'DataOps',
'interval': 1 * 60,
'updateInterval': 0,
'enableStatusTransition': True,
'reqmgr2Url': 'https://cmsweb-testbed.cern.ch/reqmgr2',
'reqmgrCacheUrl': 'https://cmsweb-testbed.cern.ch/couchdb/reqmgr_workload_cache',
'phedexUrl': 'https://cmsweb-testbed.cern.ch/phedex/datasvc/json/prod',
'dbsUrl': 'https://cmsweb-testbed.cern.ch/dbs/int/global/DBSReader'}
self.ms = MSMonitor(self.msConfig)
self.ms.reqmgrAux = MockReqMgrAux()
super(MSMonitorTest, self).setUp()
def testUpdateCaches(self):
"""
Test the getCampaignConfig method
"""
campaigns, transfersDocs = self.ms.updateCaches()
self.assertNotEqual(transfersDocs, [])
self.assertEqual(len(transfersDocs[0]['transfers']), 1)
self.assertTrue(time.time() > transfersDocs[0]['lastUpdate'], 1)
self.assertNotEqual(campaigns, [])
for cname, cdict in campaigns.items():
self.assertEqual(cname, cdict['CampaignName'])
self.assertEqual(isinstance(cdict, dict), True)
self.assertNotEqual(cdict.get('CampaignName', {}), {})
def testGetTransferInfo(self):
"""
Test the getTransferInfo method
"""
_, transfersDocs = self.ms.updateCaches()
transfersDocs[0]['transfers'] = []
originalTransfers = deepcopy(transfersDocs)
self.ms.getTransferInfo(transfersDocs)
self.assertNotEqual(transfersDocs, [])
self.assertEqual(len(transfersDocs), len(originalTransfers))
for rec in transfersDocs:
self.assertEqual(isinstance(rec, dict), True)
keys = sorted(['workflowName', 'lastUpdate', 'transfers'])
self.assertEqual(keys, sorted(rec.keys()))
self.assertTrue(time.time() >= rec['lastUpdate'])
def testCompletion(self):
"""
Test the completion method
"""
campaigns, transfersDocs = self.ms.updateCaches()
transfersDocs.append(deepcopy(transfersDocs[0]))
transfersDocs.append(deepcopy(transfersDocs[0]))
transfersDocs[0]['transfers'] = []
transfersDocs[0]['workflowName'] = 'workflow_0'
transfersDocs[1]['transfers'][0]['completion'].append(100)
transfersDocs[1]['workflowName'] = 'workflow_1'
transfersDocs[2]['workflowName'] = 'workflow_2'
self.assertEqual(len(transfersDocs), 3)
completedWfs = self.ms.getCompletedWorkflows(transfersDocs, campaigns)
self.assertEqual(len(completedWfs), 2)
def testUpdateTransferInfo(self):
"""
Test the updateTransferInfo method
"""
_, transferRecords = self.ms.updateCaches()
failed = self.ms.updateTransferDocs(transferRecords)
self.assertEqual(len(failed), len(transferRecords))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3415 | # Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path, subprocess
from ..mesonlib import (
EnvironmentException, MachineChoice, version_compare, is_windows, is_osx
)
from .compilers import (
CompilerType,
d_dmd_buildtype_args,
d_gdc_buildtype_args,
d_ldc_buildtype_args,
clike_debug_args,
Compiler,
CompilerArgs,
)
from .mixins.gnu import get_gcc_soname_args, gnu_color_args, gnu_optimization_args
d_feature_args = {'gcc': {'unittest': '-funittest',
'debug': '-fdebug',
'version': '-fversion',
'import_dir': '-J'
},
'llvm': {'unittest': '-unittest',
'debug': '-d-debug',
'version': '-d-version',
'import_dir': '-J'
},
'dmd': {'unittest': '-unittest',
'debug': '-debug',
'version': '-version',
'import_dir': '-J'
}
}
ldc_optimization_args = {'0': [],
'g': [],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
}
dmd_optimization_args = {'0': [],
'g': [],
'1': ['-O'],
'2': ['-O'],
'3': ['-O'],
's': ['-O'],
}
class DCompiler(Compiler):
mscrt_args = {
'none': ['-mscrtlib='],
'md': ['-mscrtlib=msvcrt'],
'mdd': ['-mscrtlib=msvcrtd'],
'mt': ['-mscrtlib=libcmt'],
'mtd': ['-mscrtlib=libcmtd'],
}
def __init__(self, exelist, version, for_machine: MachineChoice, arch, **kwargs):
self.language = 'd'
super().__init__(exelist, version, for_machine, **kwargs)
self.id = 'unknown'
self.arch = arch
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanity.d')
output_name = os.path.join(work_dir, 'dtest')
with open(source_name, 'w') as ofile:
ofile.write('''void main() { }''')
pc = subprocess.Popen(self.exelist + self.get_output_args(output_name) + self.get_target_arch_args() + [source_name], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('D compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by D compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return True
def name_string(self):
return ' '.join(self.exelist)
def get_exelist(self):
return self.exelist
def get_linker_exelist(self):
return self.exelist[:]
def get_output_args(self, target):
return ['-of=' + target]
def get_linker_output_args(self, target):
return ['-of=' + target]
def get_include_args(self, path, is_system):
return ['-I=' + path]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:3] == '-I=':
parameter_list[idx] = i[:3] + os.path.normpath(os.path.join(build_dir, i[3:]))
if i[:4] == '-L-L':
parameter_list[idx] = i[:4] + os.path.normpath(os.path.join(build_dir, i[4:]))
if i[:5] == '-L=-L':
parameter_list[idx] = i[:5] + os.path.normpath(os.path.join(build_dir, i[5:]))
if i[:6] == '-Wl,-L':
parameter_list[idx] = i[:6] + os.path.normpath(os.path.join(build_dir, i[6:]))
return parameter_list
def get_warn_args(self, level):
return ['-wi']
def get_werror_args(self):
return ['-w']
def get_dependency_gen_args(self, outtarget, outfile):
# DMD and LDC does not currently return Makefile-compatible dependency info.
return []
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker".
return ['-Wl,-L' + dirname]
def get_coverage_args(self):
return ['-cov']
def get_preprocess_only_args(self):
return ['-E']
def get_compile_only_args(self):
return ['-c']
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'deps'
def get_pic_args(self):
if is_windows():
return []
return ['-fPIC']
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_soname_args(self, *args):
# FIXME: Make this work for cross-compiling
if is_windows():
return []
elif is_osx():
soname_args = get_gcc_soname_args(CompilerType.GCC_OSX, *args)
if soname_args:
return ['-Wl,' + ','.join(soname_args)]
return []
return get_gcc_soname_args(CompilerType.GCC_STANDARD, *args)
def get_feature_args(self, kwargs, build_to_src):
res = []
if 'unittest' in kwargs:
unittest = kwargs.pop('unittest')
unittest_arg = d_feature_args[self.id]['unittest']
if not unittest_arg:
raise EnvironmentException('D compiler %s does not support the "unittest" feature.' % self.name_string())
if unittest:
res.append(unittest_arg)
if 'debug' in kwargs:
debug_level = -1
debugs = kwargs.pop('debug')
if not isinstance(debugs, list):
debugs = [debugs]
debug_arg = d_feature_args[self.id]['debug']
if not debug_arg:
raise EnvironmentException('D compiler %s does not support conditional debug identifiers.' % self.name_string())
# Parse all debug identifiers and the largest debug level identifier
for d in debugs:
if isinstance(d, int):
if d > debug_level:
debug_level = d
elif isinstance(d, str) and d.isdigit():
if int(d) > debug_level:
debug_level = int(d)
else:
res.append('{0}={1}'.format(debug_arg, d))
if debug_level >= 0:
res.append('{0}={1}'.format(debug_arg, debug_level))
if 'versions' in kwargs:
version_level = -1
versions = kwargs.pop('versions')
if not isinstance(versions, list):
versions = [versions]
version_arg = d_feature_args[self.id]['version']
if not version_arg:
raise EnvironmentException('D compiler %s does not support conditional version identifiers.' % self.name_string())
# Parse all version identifiers and the largest version level identifier
for v in versions:
if isinstance(v, int):
if v > version_level:
version_level = v
elif isinstance(v, str) and v.isdigit():
if int(v) > version_level:
version_level = int(v)
else:
res.append('{0}={1}'.format(version_arg, v))
if version_level >= 0:
res.append('{0}={1}'.format(version_arg, version_level))
if 'import_dirs' in kwargs:
import_dirs = kwargs.pop('import_dirs')
if not isinstance(import_dirs, list):
import_dirs = [import_dirs]
import_dir_arg = d_feature_args[self.id]['import_dir']
if not import_dir_arg:
raise EnvironmentException('D compiler %s does not support the "string import directories" feature.' % self.name_string())
for idir_obj in import_dirs:
basedir = idir_obj.get_curdir()
for idir in idir_obj.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if idir not in ('', '.'):
expdir = os.path.join(basedir, idir)
else:
expdir = basedir
srctreedir = os.path.join(build_to_src, expdir)
res.append('{0}{1}'.format(import_dir_arg, srctreedir))
if kwargs:
raise EnvironmentException('Unknown D compiler feature(s) selected: %s' % ', '.join(kwargs.keys()))
return res
def get_buildtype_linker_args(self, buildtype):
if buildtype != 'plain':
return self.get_target_arch_args()
return []
def get_std_exe_link_args(self):
return []
def gen_import_library_args(self, implibname):
return ['-Wl,--out-implib=' + implibname]
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
if is_windows():
return []
# This method is to be used by LDC and DMD.
# GDC can deal with the verbatim flags.
if not rpath_paths and not install_rpath:
return []
paths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
if build_rpath != '':
paths += ':' + build_rpath
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
return ['-Wl,-rpath,{}'.format(paths)]
def _get_compiler_check_args(self, env, extra_args, dependencies, mode='compile'):
if callable(extra_args):
extra_args = extra_args(mode)
if extra_args is None:
extra_args = []
elif isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
# Collect compiler arguments
args = CompilerArgs(self)
for d in dependencies:
# Add compile flags needed by dependencies
args += d.get_compile_args()
if mode == 'link':
# Add link flags needed to find dependencies
args += d.get_link_args()
if mode == 'compile':
# Add DFLAGS from the env
args += env.coredata.get_external_args(self.for_machine, self.language)
elif mode == 'link':
# Add LDFLAGS from the env
args += env.coredata.get_external_link_args(self.for_machine, self.language)
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
def compiles(self, code, env, *, extra_args=None, dependencies=None, mode='compile'):
args = self._get_compiler_check_args(env, extra_args, dependencies, mode)
with self.cached_compile(code, env.coredata, extra_args=args, mode=mode) as p:
return p.returncode == 0, p.cached
def has_multi_arguments(self, args, env):
return self.compiles('int i;\n', env, extra_args=args)
def get_target_arch_args(self):
# LDC2 on Windows targets to current OS architecture, but
# it should follow the target specified by the MSVC toolchain.
if is_windows():
if self.arch == 'x86_64':
return ['-m64']
return ['-m32']
return []
@classmethod
def translate_args_to_nongnu(cls, args):
dcargs = []
# Translate common arguments to flags the LDC/DMD compilers
# can understand.
# The flags might have been added by pkg-config files,
# and are therefore out of the user's control.
for arg in args:
# Translate OS specific arguments first.
osargs = []
if is_windows():
osargs = cls.translate_arg_to_windows(arg)
elif is_osx():
osargs = cls.translate_arg_to_osx(arg)
if osargs:
dcargs.extend(osargs)
continue
# Translate common D arguments here.
if arg == '-pthread':
continue
if arg.startswith('-fstack-protector'):
continue
if arg.startswith('-D'):
continue
if arg.startswith('-Wl,'):
# Translate linker arguments here.
linkargs = arg[arg.index(',') + 1:].split(',')
for la in linkargs:
dcargs.append('-L=' + la.strip())
continue
elif arg.startswith(('-link-defaultlib', '-linker', '-link-internally', '-linkonce-templates', '-lib')):
# these are special arguments to the LDC linker call,
# arguments like "-link-defaultlib-shared" do *not*
# denote a library to be linked, but change the default
# Phobos/DRuntime linking behavior, while "-linker" sets the
# default linker.
dcargs.append(arg)
continue
elif arg.startswith('-l'):
# translate library link flag
dcargs.append('-L=' + arg)
continue
elif arg.startswith('-isystem'):
# translate -isystem system include path
# this flag might sometimes be added by C library Cflags via
# pkg-config.
# NOTE: -isystem and -I are not 100% equivalent, so this is just
# a workaround for the most common cases.
if arg.startswith('-isystem='):
dcargs.append('-I=' + arg[9:])
else:
dcargs.append('-I')
continue
elif arg.startswith('-L/') or arg.startswith('-L./'):
# we need to handle cases where -L is set by e.g. a pkg-config
# setting to select a linker search path. We can however not
# unconditionally prefix '-L' with '-L' because the user might
# have set this flag too to do what it is intended to for this
# compiler (pass flag through to the linker)
# Hence, we guess here whether the flag was intended to pass
# a linker search path.
# Make sure static library files are passed properly to the linker.
if arg.endswith('.a') or arg.endswith('.lib'):
if arg.startswith('-L='):
farg = arg[3:]
else:
farg = arg[2:]
if len(farg) > 0 and not farg.startswith('-'):
dcargs.append('-L=' + farg)
continue
dcargs.append('-L=' + arg)
continue
elif not arg.startswith('-') and arg.endswith(('.a', '.lib')):
# ensure static libraries are passed through to the linker
dcargs.append('-L=' + arg)
continue
else:
dcargs.append(arg)
return dcargs
@classmethod
def translate_arg_to_windows(cls, arg):
args = []
if arg.startswith('-Wl,'):
# Translate linker arguments here.
linkargs = arg[arg.index(',') + 1:].split(',')
for la in linkargs:
if la.startswith('--out-implib='):
# Import library name
args.append('-L=/IMPLIB:' + la[13:].strip())
elif arg.startswith('-mscrtlib='):
args.append(arg)
mscrtlib = arg[10:].lower()
if cls is LLVMDCompiler:
# Default crt libraries for LDC2 must be excluded for other
# selected crt options.
if mscrtlib != 'libcmt':
args.append('-L=/NODEFAULTLIB:libcmt')
args.append('-L=/NODEFAULTLIB:libvcruntime')
# Fixes missing definitions for printf-functions in VS2017
if mscrtlib.startswith('msvcrt'):
args.append('-L=/DEFAULTLIB:legacy_stdio_definitions.lib')
return args
@classmethod
def translate_arg_to_osx(cls, arg):
args = []
if arg.startswith('-install_name'):
args.append('-L=' + arg)
return args
def get_debug_args(self, is_debug):
ddebug_args = []
if is_debug:
ddebug_args = [d_feature_args[self.id]['debug']]
return clike_debug_args[is_debug] + ddebug_args
def get_crt_args(self, crt_val, buildtype):
if not is_windows():
return []
if crt_val in self.mscrt_args:
return self.mscrt_args[crt_val]
assert(crt_val == 'from_buildtype')
# Match what build type flags used to do.
if buildtype == 'plain':
return []
elif buildtype == 'debug':
return self.mscrt_args['mdd']
elif buildtype == 'debugoptimized':
return self.mscrt_args['md']
elif buildtype == 'release':
return self.mscrt_args['md']
elif buildtype == 'minsize':
return self.mscrt_args['md']
else:
assert(buildtype == 'custom')
raise EnvironmentException('Requested C runtime based on buildtype, but buildtype is "custom".')
def get_crt_compile_args(self, crt_val, buildtype):
return []
def get_crt_link_args(self, crt_val, buildtype):
return []
def thread_link_flags(self, env):
return ['-pthread']
class GnuDCompiler(DCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice, arch, **kwargs):
DCompiler.__init__(self, exelist, version, for_machine, arch, **kwargs)
self.id = 'gcc'
default_warn_args = ['-Wall', '-Wdeprecated']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
self.base_options = ['b_colorout', 'b_sanitize', 'b_staticpic', 'b_vscrt']
self._has_color_support = version_compare(self.version, '>=4.9')
# dependencies were implemented before, but broken - support was fixed in GCC 7.1+
# (and some backported versions)
self._has_deps_support = version_compare(self.version, '>=7.1')
def get_colorout_args(self, colortype):
if self._has_color_support:
return gnu_color_args[colortype][:]
return []
def get_dependency_gen_args(self, outtarget, outfile):
if not self._has_deps_support:
return []
return ['-MD', '-MQ', outtarget, '-MF', outfile]
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, target):
return ['-o', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return self.warn_args[level]
def get_werror_args(self):
return ['-Werror']
def get_linker_search_args(self, dirname):
return ['-L' + dirname]
def get_coverage_args(self):
return []
def get_buildtype_args(self, buildtype):
return d_gdc_buildtype_args[buildtype]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)
def get_optimization_args(self, optimization_level):
return gnu_optimization_args[optimization_level]
class LLVMDCompiler(DCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice, arch, **kwargs):
DCompiler.__init__(self, exelist, version, for_machine, arch, **kwargs)
self.id = 'llvm'
self.base_options = ['b_coverage', 'b_colorout', 'b_vscrt']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-enable-color']
return []
def get_warn_args(self, level):
if level == '2' or level == '3':
return ['-wi', '-dw']
elif level == '1':
return ['-wi']
else:
return []
def get_buildtype_args(self, buildtype):
if buildtype != 'plain':
return self.get_target_arch_args() + d_ldc_buildtype_args[buildtype]
return d_ldc_buildtype_args[buildtype]
def get_pic_args(self):
return ['-relocation-model=pic']
def get_crt_link_args(self, crt_val, buildtype):
return self.get_crt_args(crt_val, buildtype)
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
def get_optimization_args(self, optimization_level):
return ldc_optimization_args[optimization_level]
class DmdDCompiler(DCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice, arch, **kwargs):
DCompiler.__init__(self, exelist, version, for_machine, arch, **kwargs)
self.id = 'dmd'
self.base_options = ['b_coverage', 'b_colorout', 'b_vscrt']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-color=on']
return []
def get_buildtype_args(self, buildtype):
if buildtype != 'plain':
return self.get_target_arch_args() + d_dmd_buildtype_args[buildtype]
return d_dmd_buildtype_args[buildtype]
def get_std_exe_link_args(self):
if is_windows():
# DMD links against D runtime only when main symbol is found,
# so these needs to be inserted when linking static D libraries.
if self.arch == 'x86_64':
return ['phobos64.lib']
elif self.arch == 'x86_mscoff':
return ['phobos32mscoff.lib']
return ['phobos.lib']
return []
def get_std_shared_lib_link_args(self):
libname = 'libphobos2.so'
if is_windows():
if self.arch == 'x86_64':
libname = 'phobos64.lib'
elif self.arch == 'x86_mscoff':
libname = 'phobos32mscoff.lib'
else:
libname = 'phobos.lib'
return ['-shared', '-defaultlib=' + libname]
def get_target_arch_args(self):
# DMD32 and DMD64 on 64-bit Windows defaults to 32-bit (OMF).
# Force the target to 64-bit in order to stay consistent
# across the different platforms.
if is_windows():
if self.arch == 'x86_64':
return ['-m64']
elif self.arch == 'x86_mscoff':
return ['-m32mscoff']
return ['-m32']
return []
def get_crt_compile_args(self, crt_val, buildtype):
return self.get_crt_args(crt_val, buildtype)
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
def get_optimization_args(self, optimization_level):
return dmd_optimization_args[optimization_level]
|
the-stack_0_3416 | from django.apps.registry import Apps
from django.db import DatabaseError, models
from django.utils.functional import classproperty
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
"""
Deal with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
_migration_class = None
@classproperty
def Migration(cls):
"""
Lazy load to avoid AppRegistryNotReady if installed apps import
MigrationRecorder.
"""
if cls._migration_class is None:
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = 'migrations'
db_table = 'django_migrations'
def __str__(self):
return 'Migration %s for %s' % (self.name, self.app)
cls._migration_class = Migration
return cls._migration_class
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def has_table(self):
"""Return True if the django_migrations table exists."""
with self.connection.cursor() as cursor:
tables = self.connection.introspection.table_names(cursor)
return self.Migration._meta.db_table in tables
def ensure_schema(self):
"""Ensure the table exists and has the correct schema."""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.has_table():
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Return a dict mapping (app_name, migration_name) to Migration instances
for all applied migrations.
"""
if self.has_table():
return {(migration.app, migration.name): migration for migration in self.migration_qs}
else:
# If the django_migrations table doesn't exist, then no migrations
# are applied.
return {}
def record_applied(self, app, name):
"""Record that a migration was applied."""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""Record that a migration was unapplied."""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""Delete all migration records. Useful for testing migrations."""
self.migration_qs.all().delete()
|
the-stack_0_3417 | from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class EC2NetworkInterface(ModelElement):
"""
Ec2 network interface model lement
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
ModelElement.__init__(self, cfn_model)
self.groupSet= []
self.ipv6Addresses= []
self.privateIpAddresses= []
self.tags= []
self.security_groups= []
self.resource_type = 'AWS::EC2::NetworkInterface'
|
the-stack_0_3420 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the LKJ distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.internal import statistical_testing as st
tfd = tfp.distributions
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def _det_ok_mask(x, det_bounds, input_output_cholesky=False):
if input_output_cholesky:
logdet = 2.0 * tf.reduce_sum(
input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[-1])
else:
_, logdet = tf.linalg.slogdet(x)
return tf.cast(tf.exp(logdet) > det_bounds, dtype=x.dtype)
# Each leaf entry here is a confidence interval for the volume of some
# set of correlation matrices. To wit, k-by-k correlation matrices
# whose determinant is at least d appear as volume_bounds[k][d].
# These particular confidence intervals were estimated by the
# Clopper-Pearson method applied to 10^7 rejection samples, with an
# error probability of 5e-7. This computation may be performed by
# executing the correlation_matrix_volumes program with argument
# --num_samples 1e7. Doing so took about 45 minutes on a standard
# workstation.
volume_bounds = {
3: {0.01: (04.8334339757361420, 4.845866340472709),
0.25: (02.9993127232473036, 3.011629093880439),
0.30: (02.6791373340121916, 2.691146382760893),
0.35: (02.3763254004846030, 2.3879545568875358),
0.40: (02.0898224112869355, 2.1010041316917913),
0.45: (01.8202389505755674, 1.8309117190894892)},
4: {0.01: (10.983339932556953, 11.060156130783517),
0.25: (03.4305021152837020, 3.4764695469900464),
0.30: (02.6624323207206930, 2.703204389589173),
0.35: (02.0431263321809440, 2.0790437132708752),
0.40: (01.5447440594930320, 1.5761221057556805),
0.45: (01.1459065289947180, 1.1730410135527702)},
5: {0.01: (19.081135276668707, 19.523821224876603),
0.20: (02.8632254471072285, 3.0376848112309776),
0.25: (01.8225680180604158, 1.9623522646052605),
0.30: (01.1299612119639912, 1.2406126830051296),
0.35: (00.6871928383147943, 0.7740705901566753),
0.40: (00.4145900446719042, 0.482655106057178)}}
@test_util.run_all_in_graph_and_eager_modes
@parameterized.parameters(np.float32, np.float64)
class LKJTest(parameterized.TestCase, tf.test.TestCase):
def testNormConst2D(self, dtype):
expected = 2.
# 2x2 correlation matrices are determined by one number between -1
# and 1, so the volume of density 1 over all of them is 2.
answer = self.evaluate(tfd.LKJ(2, dtype([1.]))._log_normalization())
self.assertAllClose(answer, np.log([expected]))
def testNormConst3D(self, dtype):
expected = np.pi**2 / 2.
# 3x3 correlation matrices are determined by the three
# lower-triangular entries. In addition to being between -1 and
# 1, they must also obey the constraint that the determinant of
# the resulting symmetric matrix is non-negative. The post
# https://psychometroscar.com/the-volume-of-a-3-x-3-correlation-matrix/
# derives (with elementary calculus) that the volume of this set
# (with respect to Lebesgue^3 measure) is pi^2/2. The same result
# is also obtained by Rousseeuw, P. J., & Molenberghs,
# G. (1994). "The shape of correlation matrices." The American
# Statistician, 48(4), 276-279.
answer = self.evaluate(tfd.LKJ(3, dtype([1.]))._log_normalization())
self.assertAllClose(answer, np.log([expected]))
def _testSampleLogProbExact(self,
concentrations,
det_bounds,
dim,
means,
num_samples=int(1e5),
dtype=np.float32,
target_discrepancy=0.1,
input_output_cholesky=False,
seed=42):
# For test methodology see the comment in
# _testSampleConsistentLogProbInterval, except that this test
# checks those parameter settings where the true volume is known
# analytically.
concentration = np.array(concentrations, dtype=dtype)
det_bounds = np.array(det_bounds, dtype=dtype)
means = np.array(means, dtype=dtype)
# Add a tolerance to guard against some of the importance_weights exceeding
# the theoretical maximum (importance_maxima) due to numerical inaccuracies
# while lower bounding the determinant. See corresponding comment in
# _testSampleConsistentLogProbInterval.
high_tolerance = 1e-6
testee_lkj = tfd.LKJ(
dimension=dim,
concentration=concentration,
input_output_cholesky=input_output_cholesky,
validate_args=True)
x = testee_lkj.sample(num_samples, seed=seed)
importance_weights = (
tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds,
input_output_cholesky))
importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp(
testee_lkj._log_normalization())
chk1 = st.assert_true_mean_equal_by_dkwm(
importance_weights, low=0., high=importance_maxima + high_tolerance,
expected=means, false_fail_rate=1e-6)
chk2 = tf.compat.v1.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples,
low=0.,
high=importance_maxima + high_tolerance,
false_fail_rate=1e-6,
false_pass_rate=1e-6), dtype(target_discrepancy))
self.evaluate([chk1, chk2])
def testSampleConsistentLogProb2(self, dtype):
concentrations = np.array([
1.00, 1.30, 1.50, 1.70, 1.90, 2.00, 2.10, 2.50, 3.00])
det_bounds = np.array([
0.01, 0.25, 0.30, 0.40, 0.50, 0.50, 0.50, 0.70, 0.70])
exact_volumes = 2 * np.sqrt(1. - det_bounds)
for input_output_cholesky in [True, False]:
self._testSampleLogProbExact(
concentrations,
det_bounds,
2,
exact_volumes,
num_samples=int(1.1e5),
dtype=dtype,
input_output_cholesky=input_output_cholesky,
target_discrepancy=0.05,
seed=41)
def _testSampleConsistentLogProbInterval(self,
concentrations,
det_bounds,
dim,
num_samples=int(1e5),
dtype=np.float32,
input_output_cholesky=False,
false_fail_rate=1e-6,
target_discrepancy=0.1,
seed=42):
# Consider the set M of dim x dim correlation matrices whose
# determinant exceeds some bound (rationale for bound forthwith).
# - This is a (convex!) shape in dim * (dim - 1) / 2 dimensions
# (because a correlation matrix is determined by its lower
# triangle, and the main diagonal is all 1s).
# - Further, M is contained entirely in the [-1,1] cube,
# because no correlation can fall outside that interval.
#
# We have two different ways to estimate the volume of M:
# - Importance sampling from the LKJ distribution
# - Importance sampling from the uniform distribution on the cube
#
# This test checks that these two methods agree. However, because
# the uniform proposal leads to many rejections (thus slowness),
# those volumes are computed offline and the confidence intervals
# are presented to this test procedure in the "volume_bounds"
# table.
#
# Why place a lower bound on the determinant? Because for eta > 1,
# the density of LKJ approaches 0 as the determinant approaches 0.
# However, the test methodology requires an upper bound on the
# improtance weights produced. Rejecting matrices with too-small
# determinant (from both methods) allows me to supply that bound.
#
# I considered several alternative regions whose volume I might
# know analytically (without having to do rejection).
# - Option a: Some hypersphere guaranteed to be contained inside M.
# - Con: I don't know a priori how to find a radius for it.
# - Con: I still need a lower bound on the determinants that appear
# in this sphere, and I don't know how to compute it.
# - Option b: Some trapezoid given as the convex hull of the
# nearly-extreme correlation matrices (i.e., those that partition
# the variables into two strongly anti-correclated groups).
# - Con: Would have to dig up n-d convex hull code to implement this.
# - Con: Need to compute the volume of that convex hull.
# - Con: Need a bound on the determinants of the matrices in that hull.
# - Option c: Same thing, but with the matrices that make a single pair
# of variables strongly correlated (or anti-correlated), and leaves
# the others uncorrelated.
# - Same cons, except that there is a determinant bound (which
# felt pretty loose).
lows = [dtype(volume_bounds[dim][db][0]) for db in det_bounds]
highs = [dtype(volume_bounds[dim][db][1]) for db in det_bounds]
concentration = np.array(concentrations, dtype=dtype)
det_bounds = np.array(det_bounds, dtype=dtype)
# Due to possible numerical inaccuracies while lower bounding the
# determinant, the maximum of the importance weights may exceed the
# theoretical maximum (importance_maxima). We add a tolerance to guard
# against this. An alternative would have been to add a threshold while
# filtering in _det_ok_mask, but that would affect the mean as well.
high_tolerance = 1e-6
testee_lkj = tfd.LKJ(
dimension=dim,
concentration=concentration,
input_output_cholesky=input_output_cholesky,
validate_args=True)
x = testee_lkj.sample(num_samples, seed=seed)
importance_weights = (
tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds,
input_output_cholesky))
importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp(
testee_lkj._log_normalization())
check1 = st.assert_true_mean_in_interval_by_dkwm(
samples=importance_weights,
low=0.,
high=importance_maxima + high_tolerance,
expected_low=lows,
expected_high=highs,
false_fail_rate=false_fail_rate)
check2 = tf.compat.v1.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples,
low=0.,
high=importance_maxima + high_tolerance,
false_fail_rate=false_fail_rate,
false_pass_rate=false_fail_rate), dtype(target_discrepancy))
self.evaluate([check1, check2])
def testSampleConsistentLogProbInterval3(self, dtype):
# The hardcoded volume boundaries are (5e-7)-confidence intervals
# of a rejection sampling run. Ergo, I only have 5e-7 probability
# mass left for the false fail rate of the test so the aggregate
# false fail probability is 1e-6.
concentrations = [
1.00, 1.30, 1.50, 1.70, 1.90, 2.00, 2.10, 2.50, 3.00]
det_bounds = [
0.01, 0.25, 0.25, 0.30, 0.35, 0.35, 0.35, 0.40, 0.45]
for input_output_cholesky in [True, False]:
self._testSampleConsistentLogProbInterval(
concentrations,
det_bounds,
3,
dtype=dtype,
input_output_cholesky=input_output_cholesky,
false_fail_rate=5e-7,
target_discrepancy=0.11,
seed=40)
def testSampleConsistentLogProbInterval4(self, dtype):
# The hardcoded volume boundaries are (5e-7)-confidence intervals
# of a rejection sampling run. Ergo, I only have 5e-7 probability
# mass left for the false fail rate of the test so the aggregate
# false fail probability is 1e-6.
concentrations = [
1.00, 1.30, 1.50, 1.70, 1.90, 2.00, 2.10, 2.50, 3.00]
det_bounds = [
0.01, 0.25, 0.25, 0.30, 0.35, 0.35, 0.35, 0.40, 0.45]
for input_output_cholesky in [True, False]:
self._testSampleConsistentLogProbInterval(
concentrations,
det_bounds,
4,
dtype=dtype,
input_output_cholesky=input_output_cholesky,
false_fail_rate=5e-7,
target_discrepancy=0.22,
seed=39)
def testSampleConsistentLogProbInterval5(self, dtype):
# The hardcoded volume boundaries are (5e-7)-confidence intervals
# of a rejection sampling run. Ergo, I only have 5e-7 probability
# mass left for the false fail rate of the test so the aggregate
# false fail probability is 1e-6.
concentrations = [
1.00, 1.30, 1.50, 1.70, 1.90, 2.00, 2.10, 2.50, 3.00]
det_bounds = [
0.01, 0.20, 0.20, 0.25, 0.30, 0.30, 0.30, 0.35, 0.40]
for input_output_cholesky in [True, False]:
self._testSampleConsistentLogProbInterval(
concentrations,
det_bounds,
5,
dtype=dtype,
input_output_cholesky=input_output_cholesky,
false_fail_rate=5e-7,
target_discrepancy=0.41,
seed=37)
def testDimensionGuard(self, dtype):
testee_lkj = tfd.LKJ(
dimension=3, concentration=dtype([1., 4.]), validate_args=True)
with self.assertRaisesRegexp(ValueError, 'dimension mismatch'):
testee_lkj.log_prob(tf.eye(4))
def testZeroDimension(self, dtype):
testee_lkj = tfd.LKJ(
dimension=0, concentration=dtype([1., 4.]), validate_args=True)
results = testee_lkj.sample(sample_shape=[4, 3])
self.assertEqual(results.shape, [4, 3, 2, 0, 0])
def testOneDimension(self, dtype):
testee_lkj = tfd.LKJ(
dimension=1, concentration=dtype([1., 4.]), validate_args=True)
results = testee_lkj.sample(sample_shape=[4, 3])
self.assertEqual(results.shape, [4, 3, 2, 1, 1])
def testMean(self, dtype):
testee_lkj = tfd.LKJ(dimension=3, concentration=dtype([1., 3., 5.]))
num_samples = 20000
results = testee_lkj.sample(sample_shape=[num_samples])
mean = testee_lkj.mean()
self.assertEqual(mean.shape, [3, 3, 3])
check1 = st.assert_true_mean_equal_by_dkwm(
samples=results, low=-1., high=1.,
expected=mean,
false_fail_rate=1e-6)
check2 = tf.compat.v1.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples,
low=-1.,
high=1.,
# Smaller false fail rate because of different batch sizes between
# these two checks.
false_fail_rate=1e-7,
false_pass_rate=1e-6),
# 4% relative error
0.08)
self.evaluate([check1, check2])
class LKJTestGraphOnly(tf.test.TestCase):
def testDimensionGuardDynamicShape(self):
if tf.executing_eagerly():
return
testee_lkj = tfd.LKJ(
dimension=3, concentration=[1., 4.], validate_args=True)
with self.assertRaisesOpError('dimension mismatch'):
self.evaluate(
testee_lkj.log_prob(
tf.compat.v1.placeholder_with_default(tf.eye(4), shape=None)))
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_3422 | import cv2
import mediapipe as mp
import numpy as np
import pyautogui
from google.protobuf.json_format import MessageToDict
from datetime import datetime
import os
from os import path
import time
from tkinter import *
from tkinter import filedialog
from PIL import Image
from PIL import ImageTk
import imutils
import sys
def hands_detection(frame):
global bclick
global xp, yp
global xclick_menique, yclick_menique
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
color_pointer = (255,255,255)
ANCHO_P=1920
ALTO_P=1080
RATIO=ANCHO_P/ALTO_P
X=100
Y=200
xmano_ant=0
ymano_ant=0
b=3
pyautogui.FAILSAFE=False
with mp_hands.Hands(
static_image_mode=False,
max_num_hands=1,
min_detection_confidence=0.5) as hands:
height, width, _ = frame.shape
frame = cv2.flip(frame, 1)
area_width = width - X*2
area_height = int(area_width/RATIO)
aux = np.zeros(frame.shape, np.uint8)
aux = cv2.rectangle(aux,(X,Y),(X + area_width, Y + area_height), (255, 0, 0),-1)
output=cv2.addWeighted(frame,1,aux,0.7,0)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(frame_rgb)
if results.multi_hand_landmarks is not None:
for hand_landmarks in results.multi_hand_landmarks:
# Get hand data
handesness_dict = MessageToDict(results.multi_handedness[0])
# Type of hand (left or right)
type_hand = handesness_dict['classification'][0]['label']
# level of certainty
certainty_score = handesness_dict['classification'][0]['score']
# If the prediction is not
if(certainty_score<0.9):
continue
#MEDICION DE LOS PUNTOS DE LAS MANOS
xmano = int(hand_landmarks.landmark[0].x * width)
ymano = int(hand_landmarks.landmark[0].y * height)
xbase = int(hand_landmarks.landmark[4].x * width)
ybase = int(hand_landmarks.landmark[4].y * height)
xindice = int(hand_landmarks.landmark[8].x * width)
yindice = int(hand_landmarks.landmark[8].y * height)
xmedio = int(hand_landmarks.landmark[12].x * width)
ymedio = int(hand_landmarks.landmark[12].y * height)
xanular = int(hand_landmarks.landmark[16].x * width)
yanular = int(hand_landmarks.landmark[16].y * height)
xmenique = int(hand_landmarks.landmark[20].x * width)
ymenique = int(hand_landmarks.landmark[20].y * height)
#MEDICIONES ENTRE BASE Y DEDO
xclick_indice = xbase-xindice
yclick_indice = ybase-yindice
xclick_medio = xbase - xmedio
yclick_medio = ybase - ymedio
xclick_menique = xbase - xmenique
yclick_menique = ybase - ymenique
xclick_anular = xbase - xanular
yclick_anular = ybase - yanular
distancia_indice = int((xclick_indice**2 + yclick_indice**2)**(1/2))
distancia_medio = int((xclick_medio ** 2 + yclick_medio ** 2) ** (1 / 2))
distancia_anular = int((xclick_anular ** 2 + yclick_anular ** 2) ** (1 / 2))
distancia_menique = int((xclick_menique ** 2 + yclick_menique ** 2)** (1 / 2))
# Move mouse pointer with both hands
if((xmano<= xmano_ant-b) | (xmano>=xmano_ant+b)):
xmano_ant = xmano
if ((ymano <= ymano_ant - b) | (ymano >= ymano_ant + b)):
ymano_ant = ymano
xp = np.interp(xmano_ant, (X,X+ area_width), (0,ANCHO_P))
yp = np.interp(ymano_ant, (Y, Y + area_height), (0, ALTO_P))
pyautogui.moveTo(int(xp),int(yp))
# The right hand will have the mouse options
if(type_hand == 'Right'):
# Left click
if(distancia_indice<=50):
if(bclick[0]==False):
print("Click")
pyautogui.leftClick()
bclick[0]=True
if(distancia_indice>=60):
if(bclick[0]==True):
bclick[0]=False
# Middle click
if (distancia_medio<=50):
if (bclick[1] == False):
print("Click")
pyautogui.middleClick()
bclick[1] = True
if (distancia_medio>=60):
if (bclick[1] == True):
bclick[1] = False
# Right click
if (distancia_anular<=50):
if (bclick[2] == False):
print("Click")
pyautogui.rightClick()
bclick[2] = True
if (distancia_anular>=60):
if (bclick[2] == True):
bclick[2] = False
# Drag
if (distancia_menique<=50):
if (bclick[3] == False):
print("Arrastrar")
pyautogui.mouseDown()
bclick[3] = True
else:
pyautogui.moveTo(xp, yp)
if (distancia_menique>=60):
if (bclick[3] == True):
pyautogui.mouseUp()
bclick[3] = False
# The left hand will be able to set audio, brightness, etc
else:
# Volume up
if(distancia_indice<=30):
if(bclick[0]==False):
print("Volume up")
pyautogui.press("volumeup")
bclick[0]=True
if(distancia_indice>=40):
if(bclick[0]==True):
bclick[0]=False
# Screenshot
# image will be save in Images folder, under the present
# hour time name
if (distancia_medio<=50):
if (bclick[1] == False):
print("Screenshot")
now = datetime.now()
print(now.strftime("%d-%m-%Y_%H-%M-%S"))
image_name = folder+"/"+now.strftime("%d-%m-%Y_%H-%M-%S")+".png"
pyautogui.screenshot(image_name)
bclick[1] = True
if (distancia_medio>=60):
if (bclick[1] == True):
bclick[1] = False
# Volume down
if (distancia_anular<=30):
if (bclick[2] == False):
print("Volume down")
pyautogui.press("volumedown")
bclick[2] = True
if (distancia_anular>=40):
if (bclick[2] == True):
bclick[2] = False
#Texto rápido
if (distancia_menique<=50):
if (bclick[3] == False):
print("Texto")
pyautogui.typewrite("No puedo contestar por el momento, te marco cuanto me desocupe")
bclick[3] = True
if (distancia_menique>=60):
if (bclick[3] == True):
bclick[3] = False
def visualizar(lblVideo):
global cap
global xp, yp
if cap is not None:
ret, frame = cap.read()
if ret == True:
frame = imutils.resize(frame,width=640)
hands_detection(frame)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(image=im)
lblVideo.configure(image=img)
lblVideo.image = img
lblVideo.after(1,lambda : visualizar(lblVideo))
else:
lblVideo.image = ""
cap.release()
def iniciar():
global cap
global counter
global bclick
global xp, yp
bclick = np.full((4,1), False)
xp = 0
yp = 0
if counter < 1:
counter+=1
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
# Video
video = Toplevel()
lblVideo = Label(video)
lblVideo.grid(column=0,row=0,columnspan=2)
visualizar(lblVideo)
def finalizar():
global cap
if cap is not None:
cap.release()
sys.exit(0)
def main():
global cap
cap = None
global counter
counter = 0
global folder
# Set folder name for screenshots
folder = "./images"
# Check if the folder containing the images exits. If not, create it
if(not path.isdir(folder)):
os.mkdir(folder)
# Start main frame
root = Tk()
root.title('Hands-Free Mouse')
root.iconphoto(False, PhotoImage(file='./icons/icon.png'))
root.geometry('400x300+700+200')
root.configure(bg='black')
# Image
m_im = Image.open("./icons/hand.jpg")
m_im = m_im.resize((300,250), Image.ANTIALIAS)
m_image = ImageTk.PhotoImage(m_im)
main_image = Label(root, image=m_image)
main_image.grid(column=0, row=0, columnspan=2)
main_image.image = m_image
# Create a botton to start the application
btn = Button(root, text="Iniciar", width=25, command=iniciar, bg='white')
btn.grid(column=0,row=1,padx=5,pady=5)
# Create a button to finish the application
btnFinalizar = Button(root, text="Finalizar", width=25, command=finalizar, bg='white')
btnFinalizar.grid(column=1,row=1,padx=5,pady=5)
# Create an event loop
root.mainloop()
# Destroy all
cap.release()
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
the-stack_0_3424 | import numpy as np
from bpn import new, trf
from bpn.utils import get
from numpy.linalg.linalg import inv
def demo():
"""camera, object = demo()"""
d = Dancer()
d.translate((2, 1, 0))
d.turn(30)
d.turn_head(45)
return d
class Dancer:
def __init__(self):
# create dancer
body = new.cone(name='body',r1=0.2, r2=0.2, h=1.5)
body.translate(z=0.75)
body.scale((0.5, 1, 1))
body.apply_matrix()
head = new.cone(name='head', r1=0.2, r2=0, h=0.3)
head.rotate((0.,90.,0.))
head.translate(x=0.1)
head.apply_matrix()
head.translate(z=1.6)
self.gp = []
self.gp.append(body.show_frame())
self.gp.append(head.show_frame())
# create markers
m1 = new.sphere(name='m1', r=0.05)
self.m1_pos = trf.PointCloud((-0.1, 0, 1)) # both body frame and world frame
self.body = body
self.head = head
self.m1 = m1
self.screen = new.empty()
self.screen.frame = self.head.frame
self.screen.translate((1, 0, -0.3))
self.m1viz = new.sphere(name='m1viz', r=0.08)
self._update_m1()
def translate(self, delta=(0., 0., 0.)):
self.body.translate(delta)
self.head.translate(delta)
self.screen.translate(delta)
self._update_m1()
self.body.show_frame()
self.head.show_frame()
self.screen.show_frame()
def turn(self, angle_deg):
self.body.rotate((0., 0., angle_deg))
self.screen.frame = self.screen.frame.transform(trf.m4(trf.twisttf(angle_deg*np.pi/180)), tf_frame=self.head.frame)
self.head.rotate((0., 0., angle_deg))
self._update_m1()
self.body.show_frame()
self.head.show_frame()
self.screen.show_frame()
def turn_head(self, angle_deg):
self.head.rotate((0., 0., angle_deg))
self.screen.frame = self.screen.frame.transform(trf.m4(trf.twisttf(angle_deg*np.pi/180)), tf_frame=self.head.frame)
self.head.show_frame()
self.screen.show_frame()
self._update_m1()
def _update_m1(self):
self.m1.loc = self.m1_pos.transform(self.body.frame.m).co[0]
self.m1viz.loc = (self.screen.frame.m@inv(self.body.frame.m)@np.hstack((self.m1.loc, 1)))[:-1]
# self.m1viz.loc = trf.PointCloud(trf.PointCloud(self.m1.loc).in_frame(self.body.frame.m).co[0], self.screen.frame).in_world().co[0]
def __neg__(self):
-self.body
-self.head
-self.m1
for g in self.gp:
-g
|
the-stack_0_3425 | """
Bayesian Network class
"""
import pandas as pd
from .conditional_probability_table import ConditionalProbabilityTable as CPT
from .directed_acyclic_graph import DirectedAcyclicGraph
from .markov_network import MarkovNetwork
from .factor import Factor
from .null_graphviz_dag import NullGraphvizDag
class BayesianNetwork(DirectedAcyclicGraph):
"""
Bayesian Network that stores ConditionalProbabilityTables.
Parameters:
cpts: list[ConditionalProbabilityTable]. Optional.
Meant for specifying conditional probability tables of variables
that are endogenous..
priors: list[ConditionalProbabilityTable]. Optional.
Meant for probability tables of Variables that are exogenous.
graphviz_dag: DiGraph
Could be used to display the graph.
"""
def __init__(self, cpts=None, priors=None, graphviz_dag=None):
super().__init__()
if graphviz_dag is None:
self.graphviz_dag = NullGraphvizDag()
else:
self.graphviz_dag = graphviz_dag
if cpts is None:
self.cpts = {}
else:
self.cpts = {}
for cpt in cpts:
self.add_edge(cpt)
if priors:
for prior_cpt in priors:
self.add_node(prior_cpt)
def __repr__(self):
return f"BayesianNetwork(\n\t{self.cpts})"
def add_prior(self, cpt):
"""
Add a conditional probability table. This adds a node.
Parameters
cpt: ConditionalProbabilityTable
"""
self.add_node(cpt)
def set_priors(self, dictionary, data_class, data_storage_folder=None):
"""
Parameters:
dictionary: dict
Ex: {
'prior_var_a': {
'value_it_can_take_1': 0.2,
'value_it_can_take_2': 0.3,
...
}
'prior_var_b': {
'value_it_can_take_1': 0.4,
'value_it_can_take_2': 0.2,
...
}
}
"""
for prior_var, mapping in dictionary.items():
collection = []
for value_prior_var_can_take, proba in mapping.items():
collection.append(
{
prior_var: value_prior_var_can_take,
'value': proba
}
)
df = pd.DataFrame(collection)
givens = list(set(df.columns) - {'value', prior_var})
cpt = CPT(
data_class(
df,
data_storage_folder
),
givens=givens,
outcomes=[prior_var]
)
self.add_prior(cpt)
def add_cpt(self, cpt):
"""
Add a conditional probability table. This in turn adds an edge.
Parameters
cpt: ConditionalProbabilityTable
"""
self.add_edge(cpt)
def add_node(self, cpt):
"""
Add a conditional probability table. This adds a node.
Parameters:
cpt: ConditionalProbabilityTable
"""
outcomes = cpt.get_outcomes()
if cpt.get_givens():
raise ValueError(
"There should not be any givens for the CPT when adding a"
+ " node."
)
if len(outcomes) != 1:
raise ValueError(
"There should only be one outcome for a CPT of a "
+ "Bayesian Network."
)
for outcome in outcomes:
self.cpts[outcome] = cpt
self.graphviz_dag.node(outcome)
super().add_node(outcome)
def add_edge(self, cpt):
"""
Add a conditional probability table. This in turn adds an edge.
Parameters:
cpt: ConditionalProbabilityTable
"""
outcomes = cpt.get_outcomes()
givens = cpt.get_givens()
if len(outcomes) != 1:
raise ValueError(
"There should only be one outcome for a CPT of a "
+ "Bayesian Network."
)
for outcome in outcomes:
self.cpts[outcome] = cpt
for given in givens:
self.graphviz_dag.edge(given, outcome)
super().add_edge(start=given, end=outcome)
def find_cpt_for_node(self, node):
"""
Find conditional probability table for node.
Parameters:
node: str
Returns: ConditionalProbabilityTable
"""
return self.cpts[node]
def to_markov_network(self):
"""
Returns: MarkovNetwork
"""
markov_network = MarkovNetwork()
for _, cpt in self.cpts.items():
factor = Factor(cpt=cpt)
markov_network.add_factor(factor)
return markov_network
|
the-stack_0_3428 | import operator
import uuid
from functools import reduce
import arrow
import django_filters
from arrow.parser import ParserError
from django.conf import settings
from guardian.core import ObjectPermissionChecker
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import (
PermissionDenied, ValidationError as DjangoValidationError
)
from django.db.models import Q
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, serializers, filters, exceptions, permissions
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
from rest_framework.fields import BooleanField, IntegerField
from rest_framework import renderers
from rest_framework.exceptions import NotAcceptable, ValidationError
from rest_framework.settings import api_settings as drf_settings
from munigeo import api as munigeo_api
from resources.models import (
Reservation, Resource, ReservationMetadataSet, ReservationCancelReasonCategory, ReservationCancelReason)
from resources.models.reservation import RESERVATION_EXTRA_FIELDS
from resources.pagination import ReservationPagination
from resources.models.utils import generate_reservation_xlsx, get_object_or_none
from ..auth import is_general_admin
from .base import (
NullableDateTimeField, TranslatedModelSerializer, register_view, DRFFilterBooleanWidget,
ExtraDataMixin
)
from respa.renderers import ResourcesBrowsableAPIRenderer
User = get_user_model()
# FIXME: Make this configurable?
USER_ID_ATTRIBUTE = 'id'
try:
User._meta.get_field('uuid')
USER_ID_ATTRIBUTE = 'uuid'
except Exception:
pass
class UserSerializer(TranslatedModelSerializer):
display_name = serializers.ReadOnlyField(source='get_display_name')
email = serializers.ReadOnlyField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if USER_ID_ATTRIBUTE == 'id':
# id field is read_only by default, that needs to be changed
# so that the field will be validated
self.fields['id'] = IntegerField(label='ID')
else:
# if the user id attribute isn't id, modify the id field to point to the right attribute.
# the field needs to be of the right type so that validation works correctly
model_field_type = type(get_user_model()._meta.get_field(USER_ID_ATTRIBUTE))
serializer_field = self.serializer_field_mapping[model_field_type]
self.fields['id'] = serializer_field(source=USER_ID_ATTRIBUTE, label='ID')
class Meta:
model = get_user_model()
fields = ('id', 'display_name', 'email')
class ReservationCancelReasonCategorySerializer(TranslatedModelSerializer):
class Meta:
model = ReservationCancelReasonCategory
fields = [
'id', 'reservation_type', 'name', 'description'
]
class ReservationCancelReasonSerializer(serializers.ModelSerializer):
category = ReservationCancelReasonCategorySerializer(read_only=True)
category_id = serializers.PrimaryKeyRelatedField(write_only=True,
source='category',
queryset=ReservationCancelReasonCategory.objects.all())
class Meta:
model = ReservationCancelReason
fields = [
'category', 'description', 'reservation', 'category_id'
]
class ReservationSerializer(ExtraDataMixin, TranslatedModelSerializer, munigeo_api.GeoModelSerializer):
begin = NullableDateTimeField()
end = NullableDateTimeField()
user = UserSerializer(required=False)
is_own = serializers.SerializerMethodField()
state = serializers.ChoiceField(choices=Reservation.STATE_CHOICES, required=False)
need_manual_confirmation = serializers.ReadOnlyField()
user_permissions = serializers.SerializerMethodField()
cancel_reason = ReservationCancelReasonSerializer(required=False)
patchable_fields = ['state', 'cancel_reason']
class Meta:
model = Reservation
fields = [
'url', 'id', 'resource', 'user', 'begin', 'end', 'comments', 'is_own', 'state', 'need_manual_confirmation',
'staff_event', 'access_code', 'user_permissions', 'type', 'cancel_reason'
] + list(RESERVATION_EXTRA_FIELDS)
read_only_fields = list(RESERVATION_EXTRA_FIELDS)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
data = self.get_initial()
resource = None
# try to find out the related resource using initial data if that is given
resource_id = data.get('resource') if data else None
if resource_id:
resource = get_object_or_none(Resource, id=resource_id)
# if that didn't work out use the reservation's old resource if such exists
if not resource:
if isinstance(self.instance, Reservation) and isinstance(self.instance.resource, Resource):
resource = self.instance.resource
# set supported and required extra fields
if resource:
cache = self.context.get('reservation_metadata_set_cache')
supported = resource.get_supported_reservation_extra_field_names(cache=cache)
required = resource.get_required_reservation_extra_field_names(cache=cache)
# staff events have less requirements
request_user = self.context['request'].user
is_staff_event = data.get('staff_event', False)
if is_staff_event and resource.can_create_staff_event(request_user):
required = {'reserver_name', 'event_description'}
# we don't need to remove a field here if it isn't supported, as it will be read-only and will be more
# easily removed in to_representation()
for field_name in supported:
self.fields[field_name].read_only = False
for field_name in required:
self.fields[field_name].required = True
self.context.update({'resource': resource})
def get_extra_fields(self, includes, context):
from .resource import ResourceInlineSerializer
""" Define extra fields that can be included via query parameters. Method from ExtraDataMixin."""
extra_fields = {}
if 'resource_detail' in includes:
extra_fields['resource'] = ResourceInlineSerializer(read_only=True, context=context)
return extra_fields
def validate_state(self, value):
instance = self.instance
request_user = self.context['request'].user
# new reservations will get their value regardless of this value
if not instance:
return value
# state not changed
if instance.state == value:
return value
if instance.resource.can_approve_reservations(request_user):
allowed_states = (Reservation.REQUESTED, Reservation.CONFIRMED, Reservation.DENIED)
if instance.state in allowed_states and value in allowed_states:
return value
if instance.can_modify(request_user) and value == Reservation.CANCELLED:
return value
raise ValidationError(_('Illegal state change'))
def validate(self, data):
reservation = self.instance
request_user = self.context['request'].user
# this check is probably only needed for PATCH
try:
resource = data['resource']
except KeyError:
resource = reservation.resource
if not resource.can_make_reservations(request_user):
raise PermissionDenied(_('You are not allowed to make reservations in this resource.'))
if 'end' in data and data['end'] < timezone.now():
raise ValidationError(_('You cannot make a reservation in the past'))
if not resource.can_ignore_opening_hours(request_user):
reservable_before = resource.get_reservable_before()
if reservable_before and data['begin'] >= reservable_before:
raise ValidationError(_('The resource is reservable only before %(datetime)s' %
{'datetime': reservable_before}))
reservable_after = resource.get_reservable_after()
if reservable_after and data['begin'] < reservable_after:
raise ValidationError(_('The resource is reservable only after %(datetime)s' %
{'datetime': reservable_after}))
# normal users cannot make reservations for other people
if not resource.can_create_reservations_for_other_users(request_user):
data.pop('user', None)
# Check user specific reservation restrictions relating to given period.
resource.validate_reservation_period(reservation, request_user, data=data)
if data.get('staff_event', False):
if not resource.can_create_staff_event(request_user):
raise ValidationError(dict(staff_event=_('Only allowed to be set by resource managers')))
if 'type' in data:
if (data['type'] != Reservation.TYPE_NORMAL and
not resource.can_create_special_type_reservation(request_user)):
raise ValidationError({'type': _('You are not allowed to make a reservation of this type')})
if 'comments' in data:
if not resource.can_comment_reservations(request_user):
raise ValidationError(dict(comments=_('Only allowed to be set by staff members')))
if 'access_code' in data:
if data['access_code'] is None:
data['access_code'] = ''
access_code_enabled = resource.is_access_code_enabled()
if not access_code_enabled and data['access_code']:
raise ValidationError(dict(access_code=_('This field cannot have a value with this resource')))
if access_code_enabled and reservation and data['access_code'] != reservation.access_code:
raise ValidationError(dict(access_code=_('This field cannot be changed')))
# Mark begin of a critical section. Subsequent calls with this same resource will block here until the first
# request is finished. This is needed so that the validations and possible reservation saving are
# executed in one block and concurrent requests cannot be validated incorrectly.
Resource.objects.select_for_update().get(pk=resource.pk)
# Check maximum number of active reservations per user per resource.
# Only new reservations are taken into account ie. a normal user can modify an existing reservation
# even if it exceeds the limit. (one that was created via admin ui for example).
if reservation is None:
resource.validate_max_reservations_per_user(request_user)
if self.context['request'] and self.context['request'].method == 'PATCH':
for key, val in data.items():
if key not in self.patchable_fields:
raise ValidationError(_('Patching of field %(field)s is not allowed' % {'field': key}))
else:
# Run model clean
instance = Reservation(**data)
try:
instance.clean(original_reservation=reservation, user=request_user)
except DjangoValidationError as exc:
# Convert Django ValidationError to DRF ValidationError so that in the response
# field specific error messages are added in the field instead of in non_field_messages.
if not hasattr(exc, 'error_dict'):
raise ValidationError(exc)
error_dict = {}
for key, value in exc.error_dict.items():
error_dict[key] = [error.message for error in value]
raise ValidationError(error_dict)
return data
def to_internal_value(self, data):
user_data = data.copy().pop('user', None) # handle user manually
deserialized_data = super().to_internal_value(data)
# validate user and convert it to User object
if user_data:
UserSerializer(data=user_data).is_valid(raise_exception=True)
try:
deserialized_data['user'] = User.objects.get(**{USER_ID_ATTRIBUTE: user_data['id']})
except User.DoesNotExist:
raise ValidationError({
'user': {
'id': [_('Invalid pk "{pk_value}" - object does not exist.').format(pk_value=user_data['id'])]
}
})
return deserialized_data
def to_representation(self, instance):
data = super(ReservationSerializer, self).to_representation(instance)
resource = instance.resource
prefetched_user = self.context.get('prefetched_user', None)
user = prefetched_user or self.context['request'].user
if self.context['request'].accepted_renderer.format == 'xlsx':
# Return somewhat different data in case we are dealing with xlsx.
# The excel renderer needs datetime objects, so begin and end are passed as objects
# to avoid needing to convert them back and forth.
data.update(**{
'unit': resource.unit.name, # additional
'resource': resource.name, # resource name instead of id
'begin': instance.begin, # datetime object
'end': instance.end, # datetime object
'user': instance.user.email if instance.user else '', # just email
'created_at': instance.created_at
})
if not resource.can_access_reservation_comments(user):
del data['comments']
if not resource.can_view_reservation_user(user):
del data['user']
if instance.are_extra_fields_visible(user):
cache = self.context.get('reservation_metadata_set_cache')
supported_fields = set(resource.get_supported_reservation_extra_field_names(cache=cache))
else:
del data['cancel_reason']
supported_fields = set()
for field_name in RESERVATION_EXTRA_FIELDS:
if field_name not in supported_fields:
data.pop(field_name, None)
if not (resource.is_access_code_enabled() and instance.can_view_access_code(user)):
data.pop('access_code')
if 'access_code' in data and data['access_code'] == '':
data['access_code'] = None
if instance.can_view_catering_orders(user):
data['has_catering_order'] = instance.catering_orders.exists()
return data
def update(self, instance, validated_data):
request = self.context['request']
cancel_reason = validated_data.pop('cancel_reason', None)
new_state = validated_data.pop('state', instance.state)
validated_data['modified_by'] = request.user
reservation = super().update(instance, validated_data)
if new_state in [Reservation.DENIED, Reservation.CANCELLED] and cancel_reason:
if hasattr(instance, 'cancel_reason'):
instance.cancel_reason.delete()
cancel_reason['reservation'] = reservation
reservation.cancel_reason = ReservationCancelReason(**cancel_reason)
reservation.cancel_reason.save()
reservation.set_state(new_state, request.user)
return reservation
def get_is_own(self, obj):
return obj.user == self.context['request'].user
def get_user_permissions(self, obj):
request = self.context.get('request')
prefetched_user = self.context.get('prefetched_user', None)
user = prefetched_user or request.user
can_modify_and_delete = obj.can_modify(user) if request else False
return {
'can_modify': can_modify_and_delete,
'can_delete': can_modify_and_delete,
}
class UserFilterBackend(filters.BaseFilterBackend):
"""
Filter by user uuid and by is_own.
"""
def filter_queryset(self, request, queryset, view):
user = request.query_params.get('user', None)
if user:
try:
user_uuid = uuid.UUID(user)
except ValueError:
raise exceptions.ParseError(_('Invalid value in filter %(filter)s') % {'filter': 'user'})
queryset = queryset.filter(user__uuid=user_uuid)
if not request.user.is_authenticated:
return queryset
is_own = request.query_params.get('is_own', None)
if is_own is not None:
is_own = is_own.lower()
if is_own in ('true', 't', 'yes', 'y', '1'):
queryset = queryset.filter(user=request.user)
elif is_own in ('false', 'f', 'no', 'n', '0'):
queryset = queryset.exclude(user=request.user)
else:
raise exceptions.ParseError(_('Invalid value in filter %(filter)s') % {'filter': 'is_own'})
return queryset
class ExcludePastFilterBackend(filters.BaseFilterBackend):
"""
Exclude reservations in the past.
"""
def filter_queryset(self, request, queryset, view):
past = request.query_params.get('all', 'false')
past = BooleanField().to_internal_value(past)
if not past:
now = timezone.now()
return queryset.filter(end__gte=now)
return queryset
class ReservationFilterBackend(filters.BaseFilterBackend):
"""
Filter reservations by time.
"""
def filter_queryset(self, request, queryset, view):
params = request.query_params
times = {}
past = False
for name in ('start', 'end'):
if name not in params:
continue
# whenever date filtering is in use, include past reservations
past = True
try:
times[name] = arrow.get(params[name]).to('utc').datetime
except ParserError:
raise exceptions.ParseError("'%s' must be a timestamp in ISO 8601 format" % name)
is_detail_request = 'pk' in request.parser_context['kwargs']
if not past and not is_detail_request:
past = params.get('all', 'false')
past = BooleanField().to_internal_value(past)
if not past:
now = timezone.now()
queryset = queryset.filter(end__gte=now)
if times.get('start', None):
queryset = queryset.filter(end__gte=times['start'])
if times.get('end', None):
queryset = queryset.filter(begin__lte=times['end'])
return queryset
class NeedManualConfirmationFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
filter_value = request.query_params.get('need_manual_confirmation', None)
if filter_value is not None:
need_manual_confirmation = BooleanField().to_internal_value(filter_value)
return queryset.filter(resource__need_manual_confirmation=need_manual_confirmation)
return queryset
class StateFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
state = request.query_params.get('state', None)
if state:
queryset = queryset.filter(state__in=state.replace(' ', '').split(','))
return queryset
class CanApproveFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
filter_value = request.query_params.get('can_approve', None)
if filter_value:
queryset = queryset.filter(resource__need_manual_confirmation=True)
allowed_resources = Resource.objects.with_perm('can_approve_reservation', request.user)
can_approve = BooleanField().to_internal_value(filter_value)
if can_approve:
queryset = queryset.filter(resource__in=allowed_resources)
else:
queryset = queryset.exclude(resource__in=allowed_resources)
return queryset
class ReservationFilterSet(django_filters.rest_framework.FilterSet):
class Meta:
model = Reservation
fields = ('event_subject', 'host_name', 'reserver_name', 'resource_name', 'is_favorite_resource', 'unit')
@property
def qs(self):
qs = super().qs
user = self.request.user
query_params = set(self.request.query_params)
# if any of the extra field related filters are used, restrict results to reservations
# the user has right to see
if bool(query_params & set(RESERVATION_EXTRA_FIELDS)):
qs = qs.extra_fields_visible(user)
if 'has_catering_order' in query_params:
qs = qs.catering_orders_visible(user)
return qs
event_subject = django_filters.CharFilter(lookup_expr='icontains')
host_name = django_filters.CharFilter(lookup_expr='icontains')
reserver_name = django_filters.CharFilter(lookup_expr='icontains')
resource_name = django_filters.CharFilter(field_name='resource', lookup_expr='name__icontains')
is_favorite_resource = django_filters.BooleanFilter(method='filter_is_favorite_resource',
widget=DRFFilterBooleanWidget)
resource_group = django_filters.Filter(field_name='resource__groups__identifier', lookup_expr='in',
widget=django_filters.widgets.CSVWidget, distinct=True)
unit = django_filters.CharFilter(field_name='resource__unit_id')
has_catering_order = django_filters.BooleanFilter(method='filter_has_catering_order', widget=DRFFilterBooleanWidget)
resource = django_filters.Filter(lookup_expr='in', widget=django_filters.widgets.CSVWidget)
reserver_info_search = django_filters.CharFilter(method="filter_reserver_info_search")
def filter_is_favorite_resource(self, queryset, name, value):
user = self.request.user
if not user.is_authenticated:
return queryset.none() if value else queryset
filtering = {'resource__favorited_by': user}
return queryset.filter(**filtering) if value else queryset.exclude(**filtering)
def filter_has_catering_order(self, queryset, name, value):
return queryset.exclude(catering_orders__isnull=value)
def filter_reserver_info_search(self, queryset, name, value):
"""
A partial copy of rest_framework.filters.SearchFilter.filter_queryset.
Needed due to custom filters applied to queryset within this ReservationFilterSet.
Does not support comma separation of values, i.e. '?reserver_info_search=foo,bar' will
be considered as one string - 'foo,bar'.
"""
if not value:
return queryset
fields = ('user__first_name', 'user__last_name', 'user__email')
conditions = []
for field in fields:
conditions.append(Q(**{field + '__icontains': value}))
# assume that first_name and last_name were provided if empty space was found
if ' ' in value and value.count(' ') == 1:
name1, name2 = value.split()
filters = Q(
user__first_name__icontains=name1,
user__last_name__icontains=name2,
) | Q(
user__first_name__icontains=name2,
user__last_name__icontains=name1,
)
conditions.append(filters)
return queryset.filter(reduce(operator.or_, conditions))
class ReservationPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.can_modify(request.user)
class ReservationExcelRenderer(renderers.BaseRenderer):
media_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
format = 'xlsx'
charset = None
render_style = 'binary'
def render(self, data, media_type=None, renderer_context=None):
if not renderer_context or renderer_context['response'].status_code == 404:
return bytes()
if renderer_context['view'].action == 'retrieve':
return generate_reservation_xlsx([data])
elif renderer_context['view'].action == 'list':
return generate_reservation_xlsx(data['results'])
else:
return NotAcceptable()
class ReservationCacheMixin:
def _preload_permissions(self):
units = set()
resource_groups = set()
resources = set()
checker = ObjectPermissionChecker(self.request.user)
for rv in self._page:
resources.add(rv.resource)
rv.resource._permission_checker = checker
for res in resources:
units.add(res.unit)
for g in res.groups.all():
resource_groups.add(g)
if units:
checker.prefetch_perms(units)
if resource_groups:
checker.prefetch_perms(resource_groups)
def _get_cache_context(self):
context = {}
set_list = ReservationMetadataSet.objects.all().prefetch_related('supported_fields', 'required_fields')
context['reservation_metadata_set_cache'] = {x.id: x for x in set_list}
self._preload_permissions()
return context
class ReservationViewSet(munigeo_api.GeoModelAPIView, viewsets.ModelViewSet, ReservationCacheMixin):
queryset = Reservation.objects.select_related('user', 'resource', 'resource__unit')\
.prefetch_related('catering_orders').prefetch_related('resource__groups').order_by('begin', 'resource__unit__name', 'resource__name')
if settings.RESPA_PAYMENTS_ENABLED:
queryset = queryset.prefetch_related('order', 'order__order_lines', 'order__order_lines__product')
filter_backends = (DjangoFilterBackend, filters.OrderingFilter, UserFilterBackend, ReservationFilterBackend,
NeedManualConfirmationFilterBackend, StateFilterBackend, CanApproveFilterBackend)
filterset_class = ReservationFilterSet
permission_classes = (permissions.IsAuthenticatedOrReadOnly, ReservationPermission)
renderer_classes = (renderers.JSONRenderer, ResourcesBrowsableAPIRenderer, ReservationExcelRenderer)
pagination_class = ReservationPagination
authentication_classes = (
list(drf_settings.DEFAULT_AUTHENTICATION_CLASSES) +
[TokenAuthentication, SessionAuthentication])
ordering_fields = ('begin',)
def get_serializer_class(self):
if settings.RESPA_PAYMENTS_ENABLED:
from payments.api.reservation import PaymentsReservationSerializer # noqa
return PaymentsReservationSerializer
else:
return ReservationSerializer
def get_serializer(self, *args, **kwargs):
if 'data' not in kwargs and len(args) == 1:
# It's a read operation
instance_or_page = args[0]
if isinstance(instance_or_page, Reservation):
self._page = [instance_or_page]
else:
self._page = instance_or_page
return super().get_serializer(*args, **kwargs)
def get_serializer_context(self, *args, **kwargs):
context = super().get_serializer_context(*args, **kwargs)
if hasattr(self, '_page'):
context.update(self._get_cache_context())
request_user = self.request.user
if request_user.is_authenticated:
prefetched_user = get_user_model().objects.prefetch_related('unit_authorizations', 'unit_group_authorizations__subject__members').\
get(pk=request_user.pk)
context['prefetched_user'] = prefetched_user
return context
def get_queryset(self):
queryset = super().get_queryset()
user = self.request.user
# General Administrators can see all reservations
if is_general_admin(user):
return queryset
# normal users can see only their own reservations and reservations that are confirmed, requested or
# waiting for payment
filters = Q(state__in=(Reservation.CONFIRMED, Reservation.REQUESTED, Reservation.WAITING_FOR_PAYMENT))
if user.is_authenticated:
filters |= Q(user=user)
queryset = queryset.filter(filters)
queryset = queryset.filter(resource__in=Resource.objects.visible_for(user))
return queryset
def perform_create(self, serializer):
override_data = {'created_by': self.request.user, 'modified_by': self.request.user}
if 'user' not in serializer.validated_data:
override_data['user'] = self.request.user
override_data['state'] = Reservation.CREATED
instance = serializer.save(**override_data)
resource = serializer.validated_data['resource']
if resource.need_manual_confirmation and not resource.can_bypass_manual_confirmation(self.request.user):
new_state = Reservation.REQUESTED
else:
if instance.get_order():
new_state = Reservation.WAITING_FOR_PAYMENT
else:
new_state = Reservation.CONFIRMED
instance.set_state(new_state, self.request.user)
def perform_destroy(self, instance):
instance.set_state(Reservation.CANCELLED, self.request.user)
def list(self, request, *args, **kwargs):
response = super().list(request, *args, **kwargs)
if request.accepted_renderer.format == 'xlsx':
response['Content-Disposition'] = 'attachment; filename={}.xlsx'.format(_('reservations'))
return response
def retrieve(self, request, *args, **kwargs):
response = super().retrieve(request, *args, **kwargs)
if request.accepted_renderer.format == 'xlsx':
response['Content-Disposition'] = 'attachment; filename={}-{}.xlsx'.format(_('reservation'), kwargs['pk'])
return response
class ReservationCancelReasonCategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ReservationCancelReasonCategory.objects.all()
filter_backends = (DjangoFilterBackend,)
serializer_class = ReservationCancelReasonCategorySerializer
filterset_fields = ['reservation_type']
pagination_class = None
register_view(ReservationViewSet, 'reservation')
register_view(ReservationCancelReasonCategoryViewSet, 'cancel_reason_category')
|
the-stack_0_3429 | #!/usr/bin/env python
from pyscf import gto, scf, dft
from pyscf.prop import hfc
mol = gto.M(atom='''
C 0 0 0
N 0 0 1.1747
''',
basis='ccpvdz', spin=1, charge=0, verbose=3)
mf = scf.UHF(mol).run()
gobj = hfc.uhf.HFC(mf).set(verbose=4)
gobj.sso = True
gobj.soo = True
gobj.so_eff_charge = False
gobj.kernel()
|
the-stack_0_3431 | from orbit_fits import *
def ref_frame():
"""Print properties of the reference frame"""
print(gc_frame)
def potentials():
"""Print properties of the gravitational potentials used"""
pot = [ham, ham_bovy, ham_heavy]
name = ['fiducial', 'bovy', 'heavy']
#pos = np.array([[0, 0, 25], [0,0,200]]).T * u.kpc
pos = np.array([[25, 0, 0], [200,0,0]]).T * u.kpc
mass = np.zeros((3,2)) * u.Msun
for e, p in enumerate(pot):
print(name[e])
# potential parameters
keys = p.potential.parameters.keys()
for k in keys:
print(k, p.potential.parameters[k])
# enclosed mass
mass[e] = p.potential.mass_enclosed(pos)
print(mass[e])
print(mass[0])
print(mass[1]/mass[0])
print(mass[2]/mass[0])
def plot_enclosed_mass():
"""Plot the ratio of enclosed mass for the adopted potentials"""
pot = [ham, ham_bovy, ham_heavy]
name = ['Fiducial', 'MWPotential2014', 'Price-Whelan & Bonaca (2018)']
colors = ['k', 'tab:blue', 'tab:red']
pos = np.zeros((3,100)) * u.kpc
pos[0] = np.logspace(np.log10(20./100.), np.log10(20*10.), pos.shape[1]) * u.kpc
mass = np.zeros((3,100))
for e, p in enumerate(pot):
mass[e] = p.potential.mass_enclosed(pos)
plt.close()
plt.figure(figsize=(8,6))
for i in range(3):
plt.plot(pos[0], mass[i]/mass[0], '-', color=colors[i], label=name[i])
plt.axvline(25, color='k', ls=':')
plt.legend(fontsize='small', loc=0)
plt.ylim(0.7, 1.3)
plt.xlim(0,200)
plt.xlabel('r [kpc]')
plt.ylabel('M(<r) / M$_{fid}$(<r)')
plt.tight_layout()
plt.savefig('../plots/response/enclosed_mass_potentials.png')
|
the-stack_0_3433 | # -*- coding: utf-8 -*-
# Copyright (c) St. Anne's University Hospital in Brno. International Clinical
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Std imports
# Third pary imports
import numpy as np
# Local imports
from ..utils.method import Method
def compute_lincorr(sig, lag=0, lag_step=0):
"""
Linear correlation (Pearson's coefficient) between two time series
When lag and lag_step is not 0, shifts the sig[1] from negative
to positive lag and takes the max correlation (best fit)
Parameters
----------
sig: np.array
2D numpy array of shape (signals, samples), time series (int, float)
lag: int
negative and positive shift of time series in samples
lag_step: int
step of shift
Returns
-------
lincorr: list
maximum linear correlation in shift
tau: float
shift of maximum correlation in samples,
value in range <-lag,+lag> (float)
tau<0: sig[1] -> sig[0]
tau>0: sig[0] -> sig[1]
Example
-------
lincorr,tau = compute_lincorr(sig, 200, 20)
"""
if type(sig) != np.ndarray:
raise TypeError(f"Signals have to be in numpy arrays!")
if lag == 0:
lag_step = 1
nstep_lag = int(lag * 2 / lag_step)
sig1_w = sig[0]
sig2_w = sig[1]
sig1_wl = sig1_w[lag:len(sig1_w) - lag]
lincorr = []
for i in range(0, nstep_lag + 1):
ind1 = i * lag_step
ind2 = ind1 + len(sig1_wl)
sig2_wl = sig2_w[ind1:ind2]
corr_val = np.corrcoef(sig1_wl, sig2_wl)
lincorr.append(corr_val[0][1])
return np.max(lincorr), lincorr.index(max(lincorr))
class LinearCorrelation(Method):
algorithm = 'LINEAR_CORRELATION'
algorithm_type = 'bivariate'
version = '1.0.0'
dtype = [('max_corr', 'float32'),
('tau', 'float32')]
def __init__(self, **kwargs):
"""
Linear correlation (Pearson's coefficient) between two time series
When win and win_step is not 0, calculates evolution of correlation
When win>len(sig) or win<=0, calculates only one corr coef
When lag and lag_step is not 0, shifts the sig[1] from negative
to positive lag and takes the max correlation (best fit)
Parameters
----------
lag: int
negative and positive shift of time series in samples
lag_step: int
step of shift
"""
super().__init__(compute_lincorr, **kwargs)
|
the-stack_0_3434 | #!/usr/bin/env python3
"""
Tests of ktrain text classification flows
"""
import testenv
import numpy as np
from unittest import TestCase, main, skip
import ktrain
from ktrain.imports import ACC_NAME, VAL_ACC_NAME
from ktrain import utils as U
Sequential = ktrain.imports.keras.models.Sequential
Dense = ktrain.imports.keras.layers.Dense
Embedding = ktrain.imports.keras.layers.Embedding
GlobalAveragePooling1D = ktrain.imports.keras.layers.GlobalAveragePooling1D
def synthetic_multilabel():
# data
X = [[1,0,0,0,0,0,0],
[1,2,0,0,0,0,0],
[3,0,0,0,0,0,0],
[3,4,0,0,0,0,0],
[2,0,0,0,0,0,0],
[3,0,0,0,0,0,0],
[4,0,0,0,0,0,0],
[2,3,0,0,0,0,0],
[1,2,3,0,0,0,0],
[1,2,3,4,0,0,0],
[0,0,0,0,0,0,0],
[1,1,2,3,0,0,0],
[2,3,3,4,0,0,0],
[4,4,1,1,2,0,0],
[1,2,3,3,3,3,3],
[2,4,2,4,2,0,0],
[1,3,3,3,0,0,0],
[4,4,0,0,0,0,0],
[3,3,0,0,0,0,0],
[1,1,4,0,0,0,0]]
Y = [[1,0,0,0],
[1,1,0,0],
[0,0,1,0],
[0,0,1,1],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
[0,1,1,0],
[1,1,1,0],
[1,1,1,1],
[0,0,0,0],
[1,1,1,0],
[0,1,1,1],
[1,1,0,1],
[1,1,1,0],
[0,1,0,0],
[1,0,1,0],
[0,0,0,1],
[0,0,1,0],
[1,0,0,1]]
X = np.array(X)
Y = np.array(Y)
return (X, Y)
class TestMultilabel(TestCase):
def test_multilabel(self):
X, Y = synthetic_multilabel()
self.assertTrue(U.is_multilabel( (X,Y)))
MAXLEN = 7
MAXFEATURES = 4
NUM_CLASSES = 4
model = Sequential()
model.add(Embedding(MAXFEATURES+1,
50,
input_length=MAXLEN))
model.add(GlobalAveragePooling1D())
model.add(Dense(NUM_CLASSES, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
learner = ktrain.get_learner(model,
train_data=(X, Y),
val_data=(X, Y),
batch_size=1)
learner.lr_find(max_epochs=5) # use max_epochs until TF 2.4
# use loss instead of accuracy due to: https://github.com/tensorflow/tensorflow/issues/41114
hist = learner.fit(0.001, 200)
learner.view_top_losses(n=5)
learner.validate()
#final_acc = hist.history[VAL_ACC_NAME][-1]
#print('final_accuracy:%s' % (final_acc))
#self.assertGreater(final_acc, 0.97)
final_loss = hist.history['val_loss'][-1]
print('final_loss:%s' % (final_loss))
self.assertLess(final_loss, 0.05)
if __name__ == "__main__":
main()
|
the-stack_0_3435 | #!/usr/bin/env python3
import pytest
import sys
import fileinput
from os.path import splitext, abspath
F_NAME = splitext(abspath(__file__))[0][:-1]
def answer(lines):
tot = 0
for line in map(str.strip, lines):
print(line)
l, w, h = map(int, line.split('x'))
sides = []
sides.append(l*w)
sides.append(w*h)
sides.append(h*l)
print(sides)
tot += 2*sum(sides) + min(sides) #add smallest side as slack
return tot
@pytest.mark.parametrize( "inp,outp", [
(['2x3x4'], 58),
(['1x1x10'], 43),
])
def test_answer(inp, outp):
assert answer(inp) == outp
if __name__ == '__main__':
print(answer(fileinput.input(F_NAME + '.input')))
|
the-stack_0_3437 | #!/usr/bin/env python
import rospy
import math
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float32
from kobuki_msgs.msg import BumperEvent
from kobuki_msgs.msg import CliffEvent
import sys, select, termios, tty
range_center = Float32()
range_left = Float32()
range_right = Float32()
range_left_last = Float32()
range_right_last = Float32()
turnAngle = 17
backUpDistance = 6
speed = 0.5
def callback(data):
#rospy.loginfo("Center: %f", data.ranges[359])
#rospy.loginfo("Left: %f", data.ranges[180])
#rospy.loginfo("Right: %f", data.ranges[540])
range_center.data = data.ranges[359]
range_left.data = data.ranges[180]
range_right.data = data.ranges[540]
def processBump(data):
print ("Bump!")
global bp
global which_bp
if (data.state == BumperEvent.PRESSED):
bp = True
else:
bp = False
rospy.loginfo("Bumper Event")
rospy.loginfo(data.bumper)
which_bp = data.bumper
def processCliff(data):
print ("Cliff!")
global cf
global which_cf
global dis
if (data.state == CliffEvent.CLIFF):
cf = True
else:
cf = False
rospy.loginfo("Cliff Event")
rospy.loginfo(data.sensor)
which_cf = data.sensor
dis = data.bottom
def set_normal_speed():
twist.linear.x = speed
twist.linear.y, twist.linear.z = 0, 0
twist.angular.x, twist.angular.y = 0, 0
twist.angular.z = 0
def turn_left():
twist.linear.x = 0
twist.linear.y, twist.linear.z = 0, 0
twist.angular.x, twist.angular.y = 0, 0
twist.angular.z = 1 * speed
def turn_right():
twist.linear.x = 0
twist.linear.y, twist.linear.z = 0, 0
twist.angular.x, twist.angular.y = 0, 0
twist.angular.z = -1 * speed
def set_backup_speed():
twist.linear.x = -1 * speed
twist.linear.y, twist.linear.z = 0, 0
twist.angular.x, twist.angular.y = 0, 0
twist.angular.z = 0
# mode = {'forward', 'backup', 'turnLeft', 'turnRight'}
def move():
pub = rospy.Publisher('mobile_base/commands/velocity', Twist, queue_size = 20)
sub1 = rospy.Subscriber("scan", LaserScan, callback)
sub2 = rospy.Subscriber('mobile_base/events/bumper', BumperEvent, processBump)
sub3 = rospy.Subscriber('mobile_base/events/cliff', CliffEvent, processCliff)
rospy.init_node('test', anonymous = True)
rate = rospy.Rate(10) # 10HZ
global twist, mode, cf, which_cf, which_bp, dis, bp
cf = False
which_cf = 0
which_bp = 0
twist = Twist()
left, right, bp = False, False, False
mode = 'Forward'
BackupCounter = 0
TurnRightCounter, TurnLeftCounter = 0, 0
ignoreCliff = False
while not rospy.is_shutdown():
if (mode == 'Forward'):
set_normal_speed()
elif (mode == 'Backup'):
ignoreCliff = True
set_backup_speed()
BackupCounter += 1
elif (mode == 'TurnLeft'):
ignoreCliff = False
left = False
turn_left()
TurnLeftCounter += 1
elif (mode == 'TurnRight'):
ignoreCliff = False
right = False
turn_right()
TurnRightCounter += 1
pub.publish(twist)
if (left and BackupCounter > backUpDistance):
BackupCounter = 0
mode = 'TurnLeft'
if (right and BackupCounter > backUpDistance):
BackupCounter = 0
mode = 'TurnRight'
if (TurnRightCounter > turnAngle):
TurnRightCounter = 0
mode = 'Forward'
if (TurnLeftCounter > turnAngle):
TurnLeftCounter = 0
mode = 'Forward'
# if (range_center.data > 1 and not mode == 'Backup' and not mode == 'TurnLeft' and not mode == 'TurnRight'):
# if (range_left.data < 0.2):
# mode = 'Backup'
# if (not right and not left):
# BackupCounter = 0
# right, left = True, False
# elif (range_right.data < 0.2):
# mode = 'Backup'
# if (not right and not left):
# BackupCounter = 0
# right, left = False, True
# elif (range_center.data < 1 and range_center.data > 0.001):
# mode = 'Backup'
# if (not right and not left):
# BackupCounter = 0
# right, left = False, True
if (not ignoreCliff and cf and which_cf == 0):
if (dis < 50000):
which_cf = 0
mode = 'Backup'
print("left cliff")
if (not right and not left):
BackupCounter = 0
right, left = True, False
elif (not ignoreCliff and cf and (which_cf == 2 or which_cf == 1)):
if (dis < 50000):
which_cf = 0
print("right cliff")
mode = 'Backup'
if (not right and not left):
BackupCounter = 0
right, left = False, True
if (bp and which_bp == 0):
which_bp = 0
mode = 'Backup'
print("left bump")
if (not right and not left):
BackupCounter = 0
right, left = True, False
elif (bp and (which_bp == 2 or which_bp == 1)):
which_bp = 0
print("right bump")
mode = 'Backup'
if (not right and not left):
BackupCounter = 0
right, left = False, True
print(mode)
rate.sleep()
if __name__ == '__main__':
try:
move()
except rospy.ROSInterruptException:
pass
|
the-stack_0_3438 | import numpy as np
import pandas as pd
import os
import sqlalchemy
from time import sleep
import pickle
from numbers import Number
def serialize(x):
if not isinstance(x, (str, Number)):
return pickle.dumps(x)
else:
return x
def unserialize(x):
if not isinstance(x, (str, Number)):
return pickle.loads(x)
else:
return x
class ScanStoreSQL(object):
"""Generic SQLite parameter scan store."""
def __init__(self, scanname, scanid=1, datadir=None):
self.scanname = scanname
self.scanid = scanname
if datadir is None:
datadir = os.getcwd()
self.datadir = datadir
_filename = '{}_{}.sqlite'.format(scanname, scanid)
self.filename = os.path.join(self.datadir, _filename)
@property
def engine(self):
return sqlalchemy.create_engine('sqlite:///' + self.filename)
def store_df(self, key, df, append=True, **kwargs):
"""Store a Pandas DataFrame in a table."""
if_exists = 'append' if append else 'replace'
while True:
try:
df.applymap(serialize).to_sql(key, self.engine, if_exists=if_exists)
break
except sqlalchemy.exc.OperationalError:
sleep(0.001)
def store_row(self, key, array, index=0, columns=None, append=True, **kwargs):
"""Store a numpy array or a list in a table."""
df = pd.DataFrame([array], columns=columns, index=(index,))
self.store_df(key, df, append=append)
def store_dict(self, key, dict, index=0, append=True, **kwargs):
"""Store a dictionary in a table."""
self.store_row(key,
array=list(dict.values()),
index=index,
columns=list(dict.keys()),
**kwargs)
def store_array(self, key, array, index=None, columns=None, append=True, **kwargs):
"""Store a numpy array in a table."""
df = pd.DataFrame(array, columns=columns, index=index, dtype=complex)
self.store_df(key, df, append=append)
def get(self, key):
"""Return a DataFrame for a given key (table name)."""
while True:
try:
with self.engine.connect() as conn, conn.begin():
data = pd.read_sql_table(key, conn)
break
except sqlalchemy.exc.OperationalError:
sleep(0.001)
data = data.set_index('index')
return data.applymap(unserialize)
def drop_table(self, key):
"""Delete (drop) a table"""
pd.io.sql.execute('DROP TABLE {};'.format(key), self.engine)
def read_sql(self, sql):
"""Read SQL query into a data frame."""
return pd.io.sql.read_sql(sql, self.engine)
|
the-stack_0_3440 | import socket
host = '127.0.0.1'
port = 80
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(b'AAABBBCCC', (host, port))
data, addr = client.recvfrom(4096)
print(data)
|
the-stack_0_3441 | #!/usr/bin/env python3
import operator
from collections import Counter
def read_pt_br_words(filename='/usr/share/dict/brazilian'):
with open(filename) as f:
lines = list(f)
words = [line[:-1] for line in lines]
print(f"Leu {len(words)} palavras em pt-BR")
return words
def make_sbk_table(words):
c = Counter()
for word in words:
c.update(word)
chars = sorted(c.keys())
print(f"Tabela SBK ({len(chars)} caracteres): {''.join(chars)}")
return {ch: i for i, ch in enumerate(chars)}
def _sbk(text, char_to_int=ord):
s = 0
for ch in text:
s += char_to_int(ch)
return {'text': text, 'sum': s, 'sbk': s % 1000}
def _show_sbk(text, char_to_int=ord):
info = _sbk(text, char_to_int)
print(f'Texto: {info["text"]!r}')
print(f'Soma: {info["sum"]}')
print(f'SBK: {info["sbk"]:03d}')
print()
tweet = """Uma função hash bem simples para um trecho de texto é
"Atribua um valor pra cada caractere, some todos, e pegue os últimos 3 dígitos"
Vamos chamar essa função de SBK (Soma do Bruno Kim). Ela produz, pra qualquer texto, um número entre 000 e 999."""
def main():
words = read_pt_br_words()
table = make_sbk_table(words)
def char_to_int(ch):
if ch in table:
return table[ch] + 1
return ord(ch) + len(table) + 1
def sbk(text):
return _sbk(text, char_to_int)
def show_sbk(text):
return _show_sbk(text, char_to_int)
print()
show_sbk(tweet)
show_sbk("patos")
show_sbk("nadam")
show_sbk("debaixo")
show_sbk("d'água")
from collections import Counter
sbk_infos = [sbk(word) for word in words]
sums = Counter(info['sum'] for info in sbk_infos)
hashes = Counter(info['sbk'] for info in sbk_infos)
import csv
with open('sbk-sum-freq.csv', 'w') as f:
w = csv.writer(f)
w.writerow(['sum', 'freq'])
for i in range(max(sums)+1):
w.writerow([i, sums[i]])
with open('sbk-freq.csv', 'w') as f:
w = csv.writer(f)
w.writerow(['hash', 'freq'])
for i in range(1000):
w.writerow([i, hashes[i]])
cum = 0
by_freq = sorted(hashes.items(), reverse=True, key=lambda entry: entry[1])
for i, (h, freq) in enumerate(by_freq):
#print(f"{h:03d}: {freq} (cum={cum:06d})")
cum += freq
if cum > len(words)/2:
print(f"{i} hashes ocupam >50% de todas as palavras")
break
print()
print("SBK:")
print(f" patos: {sbk('patos')['sbk']:03d}")
print(f" patas: {sbk('patas')['sbk']:03d}")
print(f" pat: {sbk('pat')['sbk']:03d}")
print(f" patoso: {sbk('patoso')['sbk']:03d}")
words_201 = [word for word, info
in zip(words, sbk_infos)
if info['sbk'] == 201]
smallest_201 = sorted(words_201, key = lambda s: len(s))[:200]
print(smallest_201)
import hashlib
def sha256(text):
m = hashlib.sha256()
m.update(text.encode("utf-8"))
x = 0
for b in m.digest():
x = 256*x + b
return x
print()
print("SHA-256:")
print(f" patos: {sha256('patos'):076d}")
print(f" patas: {sha256('patas'):076d}")
print(f" pat: {sha256('pat'):076d}")
print(f" patoso: {sha256('patoso'):076d}")
if __name__ == '__main__':
main()
|
the-stack_0_3442 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
name_ = 'lindh-jsonobject'
github_name = 'jsonobject'
version_ = '1.4.0'
packages_ = [
'lindh.jsonobject',
]
with open("README.rst", "r") as fh:
long_description = fh.read()
classifiers = [
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
]
setup(
name=name_,
version=version_,
author='Johan Egneblad',
author_email='[email protected]',
description='JSON serializable objects',
long_description=long_description,
long_description_content_type="text/x-rst",
license="MIT",
url='https://github.com/eblade/'+github_name,
download_url=('https://github.com/eblade/%s/archive/v%s.tar.gz'
% (github_name, version_)),
packages=packages_,
install_requires=[],
classifiers=classifiers,
)
|
the-stack_0_3443 | import pandas as pd
def get_density(s, T):
try:
s = s.replace('%20', '+')
except:
pass
density_url = 'http://ddbonline.ddbst.de/DIPPR105DensityCalculation/DIPPR105CalculationCGI.exe?component=' + s
if s == 'Hexane':
rho = float(655)
else:
density = pd.read_html(density_url)[6]
density = density.drop(density.index[0:3]).drop('No.', axis=1)
A = float(density['A'])
B = float(density['B'])
C = float(density['C'])
D = float(density['D'])
Tmin, Tmax = float(density['Tmin']), float(density['Tmax']) # in K
def rho(T):
return A / B ** (1 + (1 - T / C) ** D)
return rho(T) if s != 'Hexane' else rho
|
the-stack_0_3444 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Optional, Sequence
from hydra.core.utils import JobReturn
from hydra.plugins.launcher import Launcher
from hydra.types import HydraContext, TaskFunction
from omegaconf import DictConfig
from hydra_plugins.hydra_ray_launcher._config import ( # type: ignore
RayAWSConf,
RsyncConf,
)
log = logging.getLogger(__name__)
class RayAWSLauncher(Launcher):
def __init__(
self,
env_setup: DictConfig,
ray: RayAWSConf,
stop_cluster: bool,
sync_up: RsyncConf,
sync_down: RsyncConf,
) -> None:
self.ray_cfg = ray
self.stop_cluster = stop_cluster
self.sync_up = sync_up
self.sync_down = sync_down
self.config: Optional[DictConfig] = None
self.hydra_context: Optional[HydraContext] = None
self.task_function: Optional[TaskFunction] = None
self.ray_yaml_path: Optional[str] = None
self.env_setup = env_setup
def setup(
self,
*,
hydra_context: HydraContext,
task_function: TaskFunction,
config: DictConfig,
) -> None:
self.config = config
self.hydra_context = hydra_context
self.task_function = task_function
def launch(
self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int
) -> Sequence[JobReturn]:
from . import _core_aws
return _core_aws.launch(
launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx
)
|
the-stack_0_3447 | # -*- coding: utf-8 -*-
import logging
import operator
from pymongo.errors import DuplicateKeyError, BulkWriteError
import pymongo
from anytree import RenderTree, Node, search, resolver
from anytree.exporter import DictExporter
from scout.exceptions import IntegrityError
LOG = logging.getLogger(__name__)
class HpoHandler(object):
def load_hpo_term(self, hpo_obj):
"""Add a hpo object
Arguments:
hpo_obj(dict)
"""
LOG.debug("Loading hpo term %s into database", hpo_obj["_id"])
try:
self.hpo_term_collection.insert_one(hpo_obj)
except DuplicateKeyError as err:
raise IntegrityError("Hpo term %s already exists in database".format(hpo_obj["_id"]))
LOG.debug("Hpo term saved")
def load_hpo_bulk(self, hpo_bulk):
"""Add a hpo object
Arguments:
hpo_bulk(list(scout.models.HpoTerm))
Returns:
result: pymongo bulkwrite result
"""
LOG.debug("Loading hpo bulk")
try:
result = self.hpo_term_collection.insert_many(hpo_bulk)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result
def hpo_term(self, hpo_id):
"""Fetch a hpo term
Args:
hpo_id(str)
Returns:
hpo_obj(dict)
"""
LOG.debug("Fetching hpo term %s", hpo_id)
return self.hpo_term_collection.find_one({"_id": hpo_id})
def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):
"""Return all HPO terms
If a query is sent hpo_terms will try to match with regex on term or
description.
Args:
query(str): Part of a hpoterm or description
hpo_term(str): Search for a specific hpo term
limit(int): the number of desired results
Returns:
result(pymongo.Cursor): A cursor with hpo terms
"""
query_dict = {}
search_term = None
if query:
query_dict = {
"$or": [
{"hpo_id": {"$regex": query, "$options": "i"}},
{"description": {"$regex": query, "$options": "i"}},
]
}
search_term = query
elif text:
new_string = ""
for i, word in enumerate(text.split(" ")):
if i == 0:
new_string += word
else:
new_string += ' "{0}"'.format(word)
LOG.info("Search HPO terms with %s", new_string)
query_dict["$text"] = {"$search": new_string}
search_term = text
elif hpo_term:
query_dict["hpo_id"] = hpo_term
search_term = hpo_term
limit = limit or 0
res = (
self.hpo_term_collection.find(query_dict)
.limit(limit)
.sort("hpo_number", pymongo.ASCENDING)
)
return res
def generate_hpo_gene_list(self, *hpo_terms):
"""Generate a sorted list with namedtuples of hpogenes
Each namedtuple of the list looks like (hgnc_id, count)
Args:
hpo_terms(iterable(str))
Returns:
hpo_genes(list(HpoGene))
"""
genes = {}
for term in hpo_terms:
hpo_obj = self.hpo_term(term)
if hpo_obj:
for hgnc_id in hpo_obj["genes"]:
if hgnc_id in genes:
genes[hgnc_id] += 1
else:
genes[hgnc_id] = 1
else:
LOG.warning("Term %s could not be found", term)
sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_genes
def organize_tree(self, all_terms, root):
"""Organizes a set of Tree node objects into a tree, according to their ancestors and children
Args:
all_terms(dict): a dictionary with "term_name" as keys and term_dict as values
root(anytree.Node)
Returns
root(anytree.Node): the updated root node of the tree
"""
# Move tree nodes in the right position according to the ontology
for key, term in all_terms.items():
ancestors = term["ancestors"]
if len(ancestors) == 0:
continue
for ancestor in ancestors:
ancestor_node = search.find_by_attr(root, ancestor)
if ancestor_node is None: # It's probably the term on the top
continue
node = search.find_by_attr(root, key)
node.parent = ancestor_node
return root
def build_phenotype_tree(self, hpo_id):
"""Creates an HPO Tree based on one or more given ancestors
Args:
hpo_id(str): an HPO term
Returns:
tree_dict(dict): a tree of all HPO children of the given term, as a dictionary
"""
root = Node(id="root", name="root", parent=None)
all_terms = {}
unique_terms = set()
def _hpo_terms_list(hpo_ids):
for term_id in hpo_ids:
term_obj = self.hpo_term(term_id)
if term_obj is None:
continue
# sort term children by ascending HPO number
children = sorted(
term_obj["children"], key=lambda x: int("".join([i for i in x if i.isdigit()]))
)
term_obj["children"] = children
all_terms[term_id] = term_obj
if term_id not in unique_terms:
node = Node(term_id, parent=root, description=term_obj["description"])
unique_terms.add(term_id)
# recursive loop to collect children, children of children and so on
_hpo_terms_list(term_obj["children"])
# compile a list of all HPO term objects to include in the submodel
_hpo_terms_list([hpo_id]) # trigger the recursive loop to collect nested HPO terms
# rearrange tree according to the HPO ontology
root = self.organize_tree(all_terms, root)
node_resolver = resolver.Resolver("name")
# Extract a tree structure having the chosen HPO term (hpo_id) as ancestor of all the children terms
term_node = node_resolver.get(root, hpo_id)
LOG.info(f"Built ontology for HPO term:{hpo_id}:\n{RenderTree(term_node)}")
exporter = DictExporter()
# Export this tree structure as dictionary, so that can be saved in database
tree_dict = exporter.export(term_node)
return tree_dict
|
the-stack_0_3448 | from __future__ import unicode_literals, absolute_import
import io
import os
import re
import abc
import csv
import sys
import zipp
import operator
import functools
import itertools
import collections
from ._compat import (
install,
NullFinder,
ConfigParser,
suppress,
map,
FileNotFoundError,
IsADirectoryError,
NotADirectoryError,
PermissionError,
pathlib,
PYPY_OPEN_BUG,
ModuleNotFoundError,
MetaPathFinder,
email_message_from_string,
ensure_is_path,
)
from importlib import import_module
from itertools import starmap
__metaclass__ = type
__all__ = [
'Distribution',
'DistributionFinder',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
class EntryPoint(collections.namedtuple('EntryPointBase', 'name value group')):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
try:
config.read_string(text)
except AttributeError: # pragma: nocover
# Python 2 has no read_string
config.readfp(io.StringIO(text))
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(dists, None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context)
for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(ensure_is_path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None)
for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email_message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and list(starmap(make_file, csv.reader(file_lines)))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in
itertools.groupby(section_pairs, operator.itemgetter('section'))
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
name = None
"""
Specific name for which a distribution finder should match.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The path that a distribution finder should search.
"""
return vars(self).get('path', sys.path)
@property
def pattern(self):
return '.*' if self.name is None else re.escape(self.name)
@abc.abstractmethod
def find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.pattern, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, pattern, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
cls._search_path(path, pattern)
for path in map(cls._switch_path, paths)
)
@staticmethod
def _switch_path(path):
if not PYPY_OPEN_BUG or os.path.isfile(path): # pragma: no branch
with suppress(Exception):
return zipp.Path(path)
return pathlib.Path(path)
@classmethod
def _matches_info(cls, normalized, item):
template = r'{pattern}(-.*)?\.(dist|egg)-info'
manifest = template.format(pattern=normalized)
return re.match(manifest, item.name, flags=re.IGNORECASE)
@classmethod
def _matches_legacy(cls, normalized, item):
template = r'{pattern}-.*\.egg[\\/]EGG-INFO'
manifest = template.format(pattern=normalized)
return re.search(manifest, str(item), flags=re.IGNORECASE)
@classmethod
def _search_path(cls, root, pattern):
if not root.is_dir():
return ()
normalized = pattern.replace('-', '_')
return (item for item in root.iterdir()
if cls._matches_info(normalized, item)
or cls._matches_legacy(normalized, item))
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
:param path: A pathlib.Path or similar object supporting
.joinpath(), __div__, .parent, and .read_text().
"""
self._path = path
def read_text(self, filename):
with suppress(FileNotFoundError, IsADirectoryError, KeyError,
NotADirectoryError, PermissionError):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name):
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: An email.Message containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {
group: tuple(eps)
for group, eps in grouped
}
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
try:
__version__ = version(__name__)
except:
__version__ = '0.22'
|
the-stack_0_3450 | import os
import torch
import torch.nn as nn
import torchvision.models
import collections
import math
def weights_init(modules, type='xavier'):
m = modules
if isinstance(m, nn.Conv2d):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Module):
for m in modules:
if isinstance(m, nn.Conv2d):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if type == 'xavier':
torch.nn.init.xavier_normal_(m.weight)
elif type == 'kaiming': # msra
torch.nn.init.kaiming_normal_(m.weight)
else:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
class FullImageEncoder(nn.Module):
def __init__(self, dataset='kitti'):
super(FullImageEncoder, self).__init__()
self.global_pooling = nn.AvgPool2d(8, stride=8, padding=(1, 0)) # KITTI 16 16
self.dropout = nn.Dropout2d(p=0.5)
self.global_fc = nn.Linear(2048 * 4 * 5, 512)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(512, 512, 1) # 1x1 卷积
self.upsample = nn.UpsamplingBilinear2d(size=(30, 40)) # KITTI 49X65 NYU 33X45
self.dataset = dataset
weights_init(self.modules(), 'xavier')
def forward(self, x):
x1 = self.global_pooling(x)
# print('# x1 size:', x1.size())
x2 = self.dropout(x1)
x3 = x2.view(-1, 2048 * 4 * 5)
x4 = self.relu(self.global_fc(x3))
# print('# x4 size:', x4.size())
x4 = x4.view(-1, 512, 1, 1)
# print('# x4 size:', x4.size())
x5 = self.conv1(x4)
out = self.upsample(x5)
return out
class SceneUnderstandingModule(nn.Module):
def __init__(self, output_channel=136, dataset='kitti'):
super(SceneUnderstandingModule, self).__init__()
self.encoder = FullImageEncoder(dataset=dataset)
self.aspp1 = nn.Sequential(
nn.Conv2d(2048, 512, 1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 1),
nn.ReLU(inplace=True)
)
self.aspp2 = nn.Sequential(
nn.Conv2d(2048, 512, 3, padding=6, dilation=6),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 1),
nn.ReLU(inplace=True)
)
self.aspp3 = nn.Sequential(
nn.Conv2d(2048, 512, 3, padding=12, dilation=12),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 1),
nn.ReLU(inplace=True)
)
self.aspp4 = nn.Sequential(
nn.Conv2d(2048, 512, 3, padding=18, dilation=18),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 1),
nn.ReLU(inplace=True)
)
self.concat_process = nn.Sequential(
nn.Dropout2d(p=0.5),
nn.Conv2d(512 * 5, 2048, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(p=0.5),
nn.Conv2d(2048, output_channel, 1), # KITTI 142 NYU 136 In paper, K = 80 is best, so use 160 is good!
# nn.UpsamplingBilinear2d(scale_factor=8)
nn.UpsamplingBilinear2d(size=(240, 320))
)
weights_init(self.modules(), type='xavier')
def forward(self, x):
x1 = self.encoder(x)
x2 = self.aspp1(x)
x3 = self.aspp2(x)
x4 = self.aspp3(x)
x5 = self.aspp4(x)
x6 = torch.cat((x1, x2, x3, x4, x5), dim=1)
# print('cat x6 size:', x6.size())
out = self.concat_process(x6)
return out
class OrdinalRegressionLayer(nn.Module):
def __init__(self):
super(OrdinalRegressionLayer, self).__init__()
#self.logsoftmax = nn.Logsoftmax(dim=1)
def forward(self, x):
N, C, H, W = x.size()
ord_num = C // 2
A = x[:, ::2, :, :].clone()
B = x[:, 1::2, :, :].clone()
A = A.view(N, 1, ord_num * H * W)
B = B.view(N, 1, ord_num * H * W)
C = torch.cat((A, B), dim=1)
#C = torch.clamp(C, min=1e-7, max=1e7) # prevent nans
ord_c = nn.functional.softmax(C, dim=1)
ord_c1 = ord_c[:, 1, :].clone()
ord_c2 = nn.LogSoftmax(dim=1)(C)
ord_c1 = ord_c1.view(-1, ord_num, H, W)
ord_c2 = ord_c2.view(-1, ord_num * 2, H, W)
decode_c = torch.sum((ord_c1 >= 0.5), dim=1).view(-1, 1, H, W).float()
return decode_c, ord_c2
class ResNet(nn.Module):
def __init__(self, in_channels=3, pretrained=True, freeze=True):
super(ResNet, self).__init__()
pretrained_model = torchvision.models.__dict__['resnet{}'.format(101)](pretrained=pretrained)
self.channel = in_channels
self.conv1 = nn.Sequential(collections.OrderedDict([
('conv1_1', nn.Conv2d(self.channel, 64, kernel_size=3, stride=2, padding=1, bias=False)),
('bn1_1', nn.BatchNorm2d(64)),
('relu1_1', nn.ReLU(inplace=True)),
('conv1_2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)),
('bn_2', nn.BatchNorm2d(64)),
('relu1_2', nn.ReLU(inplace=True)),
('conv1_3', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('bn1_3', nn.BatchNorm2d(128)),
('relu1_3', nn.ReLU(inplace=True))
]))
self.bn1 = nn.BatchNorm2d(128)
# print(pretrained_model._modules['layer1'][0].conv1)
self.relu = pretrained_model._modules['relu']
self.maxpool = pretrained_model._modules['maxpool']
self.layer1 = pretrained_model._modules['layer1']
self.layer1[0].conv1 = nn.Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1[0].downsample[0] = nn.Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2 = pretrained_model._modules['layer2']
self.layer3 = pretrained_model._modules['layer3']
self.layer3[0].conv2 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3[0].downsample[0] = nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4 = pretrained_model._modules['layer4']
self.layer4[0].conv2 = nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer4[0].downsample[0] = nn.Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
# clear memory
del pretrained_model
if pretrained:
weights_init(self.conv1, type='kaiming')
weights_init(self.layer1[0].conv1, type='kaiming')
weights_init(self.layer1[0].downsample[0], type='kaiming')
weights_init(self.layer3[0].conv2, type='kaiming')
weights_init(self.layer3[0].downsample[0], type='kaiming')
weights_init(self.layer4[0].conv2, 'kaiming')
weights_init(self.layer4[0].downsample[0], 'kaiming')
else:
weights_init(self.modules(), type='kaiming')
if freeze:
self.freeze()
def forward(self, x):
# print(pretrained_model._modules)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# print('conv1:', x.size())
x = self.maxpool(x)
# print('pool:', x.size())
x1 = self.layer1(x)
# print('layer1 size:', x1.size())
x2 = self.layer2(x1)
# print('layer2 size:', x2.size())
x3 = self.layer3(x2)
# print('layer3 size:', x3.size())
x4 = self.layer4(x3)
# print('layer4 size:', x4.size())
return x4
def freeze(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
class DORN(nn.Module):
def __init__(self, output_size=(240, 320), losstype=1, channel=3, pretrained=True, freeze=True, output_channel=3, dataset='kitti'):
super(DORN, self).__init__()
self.output_size = output_size
self.channel = channel
self.feature_extractor = ResNet(in_channels=channel, pretrained=pretrained, freeze=freeze)
self.aspp_module = SceneUnderstandingModule(output_channel=output_channel, dataset=dataset)
self.orl = OrdinalRegressionLayer()
self.losstype = losstype
def forward(self, x):
x1 = self.feature_extractor(x)
x2 = self.aspp_module(x1)
return x2
def get_1x_lr_params(self):
b = [self.feature_extractor]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(self):
b = [self.aspp_module, self.orl]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
# os.environ["CUDA_VISIBLE_DEVICES"] = "1" # 默认使用GPU 0
if __name__ == "__main__":
model = DORN()
model = model.cuda()
model.eval()
image = torch.randn(1, 3, 257, 353)
image = image.cuda()
with torch.no_grad():
out0, out1 = model(image)
print('out0 size:', out0.size())
print('out1 size:', out1.size())
print(out0)
|
the-stack_0_3451 | #!/usr/bin/env python
from os import listdir
from os.path import isfile, join
import pickle
import os
from loguru import logger
def diff(l1, l2):
return list(set(l2) - set(l1))
logger.info("Running")
WORK_DIR = os.environ['WORK_DIR']
cur_files = [f for f in listdir(WORK_DIR) if isfile(join(WORK_DIR, f))]
try:
last_files_pickle = open('/tmp/workdir_files.pickle','rb')
last_files = pickle.load(last_files_pickle)
last_files_pickle.close()
logger.info("Compare Work Dir")
if len(diff(cur_files, last_files)) > 0 or len(diff(last_files, cur_files)) > 0:
logger.warning("Changes found, restarting Frontail")
os.system("pkill -f frontail")
os.system("/root/run_trail.sh")
except:
pass
# Write status
logger.info("Writing current dir status")
cur_files_pickle = open('/tmp/workdir_files.pickle','wb')
pickle.dump(cur_files, cur_files_pickle)
cur_files_pickle.close()
|
the-stack_0_3452 | """
Copyright (c) 2013, SMART Technologies ULC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Copyright holder (SMART Technologies ULC) nor
the names of its contributors (Joshua Henn) may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies
ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from org.sikuli.script import Location, Pattern
import types
from sikuli import Env, Region
from org.sikuli.script import Region as JRegion
from org.sikuli.script import Env as JEnv
#from config import Config
#from logger import Logger
from sikuli.Sikuli import capture
# =============================================== #
# Overwritten sikuli methods #
# =============================================== #
# function for calling native sikuli methods
def sikuli_method(name, *args, **kwargs):
return sys.modules['sikuli.Sikuli'].__dict__[name](*args, **kwargs)
# overwritten Screen.exists method
def exists(target, timeout=None):
addFoundImage(getFilename(target))
return sikuli_method('exists', target, timeout)
# =============================================== #
# Overwritten sikuli classes #
# =============================================== #
@staticmethod
def EnvGetOSVersion(fullName=None):
if not fullName:
return Env.oldGetOSVersion();
elif Env.oldGetOSVersion() == '5.1':
return 'XP'
elif Env.oldGetOSVersion() == '6.0':
return 'Vista'
elif Env.oldGetOSVersion() == '6.1':
return 'Win7'
Env.oldGetOSVersion = Env.getOSVersion
Env.getOSVersion = EnvGetOSVersion
## Java Region patching
def add(self, operand):
# If we're trying to add None, just return the original region
if not operand:
return self
regions = [self, operand]
# more than one region, get min/max region
minX, minY = 9999, 9999
maxX, maxY = -9999, -9999
for region in regions:
if region.getX() < minX: minX = int(region.getX())
if region.getY() < minY: minY = int(region.getY())
# If this is a region type
if hasattr(region, "getW") and hasattr(region, "getH"):
if (region.getX() + region.getW()) > maxX: maxX = region.getX() + region.getW()
if (region.getY() + region.getH()) > maxY: maxY = region.getY() + region.getH()
else:
if region.getX() > maxX: maxX = int(region.getX())
if region.getY() > maxY: maxY = int(region.getY())
return Region(minX, minY, maxX-minX, maxY-minY)
JRegion.add = add
# Java Region patching
def limit(self, operand):
# If we're trying to limit None, return original
if not operand:
return self
x1 = self.getX() if self.getX() > operand.getX() else operand.getX()
y1 = self.getY() if self.getY() > operand.getY() else operand.getY()
x2 = (self.getX() + self.getW()) if (self.getX() + self.getW()) < (operand.getX() + operand.getW()) else (operand.getX() + operand.getW())
y2 = (self.getY() + self.getH()) if (self.getY() + self.getH()) < (operand.getY() + operand.getH()) else (operand.getY() + operand.getH())
# Check region is valid positive
if x2-x1 < 0 or y2-y1 < 0:
raise Exception("Region %s is outside the bounds of the ParentRegion %s" % (self, operand))
return Region(x1, y1, x2-x1, y2-y1)
JRegion.limit = limit
def offset(self, operand):
self.setX(self.getX() + operand.getX())
self.setY(self.getY() + operand.getY())
return self
##
def regionInit(self, operand, *args, **kargs):
# Handle a list of regions
if isinstance(operand, list):
region = None
for item in operand:
if region:
region = region.add(item)
else:
region = item
self.oldInit(region, *args, **kargs)
else:
self.oldInit(operand, *args, **kargs)
JRegion.oldInit = JRegion.__init__
JRegion.__init__ = regionInit
## Region patching
#JRegion.timeout = Config.regionTimeout
JRegion.clickOffset = Location(0,0)
# Define setClickOffset
def setClickOffset(self, offset):
assert isinstance(offset, Location)
self.clickOffset = offset
JRegion.setClickOffset = setClickOffset
# Define getClickLocation
def getClickLocation(self):
x = self.x + (self.w/2) + self.clickOffset.getX()
y = self.y + (self.h/2) + self.clickOffset.getY()
return Location(x, y)
JRegion.getClickLocation = getClickLocation
# Define getClickLocation
def getClickOffset(self):
return self.clickOffset
JRegion.getClickOffset = getClickOffset
|
the-stack_0_3454 | """
Robust Principal Component Analysis
"""
import numpy as np
from numpy.linalg import norm
from numpy.linalg import svd
def rpca_alm(M, mu=None, l=None, mu_tol=1E7, tol=1E-7, max_iter=1000):
"""Matrix recovery/decomposition using Robust Principal Component Analysis
Decompose a rectengular matrix M into a low-rank component, and a sparse
component, by solving a convex minimization problem via Augmented Lagrangian
Method.
minimize ||A||_* + λ ||E||_1
subject to A + E = M
where ||A||_* is the nuclear norm of A (sum of singular values)
- surrogate of matrix rank
||E||_1 is the l1 norm of E (absolute values of elements)
- surrogate of matrix sparseness
Relaxed to
L(A,E,Y,λ) .= ||A||_* + λ||E||_1 + <Y, M-A-E> + µ/2 ||M-A-E||_F^2
Parameters
----------
M : array-like, shape (n_samples, n_features)
Matrix to decompose, where n_samples in the number of samples and
n_features is the number of features.
l : float (default 1/sqrt(max(m,n)), for m x n of M)
Parameter λ (lambda) of the convex problem ||A||_* + λ ||E||_1. [2]_
mu : float (default 1.25 * ||M||_2)
Parameter µ (mu) of the Augmented Lagrange Multiplier form of Principal
Component Pursuit (PCP). [2]_
mu_tol : float >= 0 (default 1E-7)
Weight parameter.
tol : float >= 0 (default 1E-7)
Tolerance for accuracy of matrix reconstruction of low rank and sparse
components.
max_iter : int >= 0 (default 1000)
Maximum number of iterations to perform.
Returns
-------
A : array, shape (n_samples, n_features)
Low-rank component of the matrix decomposition.
E : array, shape (n_samples, n_features)
Sparse component of the matrix decomposition.
err : float
Error of matrix reconstruction
||M-A-E||_F / ||M||_F
References
----------
.. [1] Z. Lin, M. Chen, Y. Ma. The Augmented Lagrange Multiplier Method for
Exact Recovery of Corrupted Low-Rank Matrices, arXiv:1009.5055
.. [2] E. J. Candés, X. Li, Y. Ma, J. Wright. Robust principal
component analysis? Journal of the ACM v.58 n.11 May 2011
"""
rho = 1.5
if not mu:
mu = 1.25 * norm(M, ord=2)
if not l:
l = np.max(M.shape)**-.5
M_sign = np.sign(M)
norm_spectral = norm(M_sign, ord=2)
norm_inf = norm(M_sign, ord=np.inf)
norm_dual = np.max([norm_spectral, norm_inf * l**-1])
Y = M_sign * norm_dual**-1
A = np.zeros(M.shape)
E = np.zeros(M.shape)
err = np.inf
i = 0
while err > tol and i < max_iter:
U, S, V = svd(M - E + Y * mu**-1, full_matrices=False)
A = np.dot(U, np.dot(np.diag(_shrink(S, mu**-1)), V))
E = _shrink(M - A + Y * mu**-1, l * mu**-1)
Y = Y + mu * (M - A - E)
err = _fro_error(M, A, E)
mu *= rho
mu = np.min([mu, mu_tol])
i += 1
return A, E, err
def _fro_error(M, A, E):
"""Error of matrix reconstruction"""
return norm(M - A - E, ord='fro') * norm(M, ord='fro')**-1
def _shrink(M, t):
"""Shrinkage operator"""
return np.sign(M) * np.maximum((np.abs(M) - t), np.zeros(M.shape))
|
the-stack_0_3455 | #PART 1 - CORPUS
import random
import time
import csv
from collections import Counter
t1=time.time()
print("Warning: This program is long, and takes some time to execute, because of the big file sizes.")
print("It took around 30s on an i7 7700HQ laptop with 16 GB of RAM. Performance might vary.")
def combine_lists(l1, l2):
return list(map(lambda x, y:"{} {}".format(x,y), l1, l2))
def givetaggivenword(some_dict):
temp_dict={}
temp=list(some_dict.values())
for a_dict in temp:
for tag in a_dict:
if tag in temp_dict:
temp_dict[tag]=temp_dict[tag]+a_dict[tag]
else:
temp_dict[tag]=a_dict[tag]
best_tag=keywithmaxval(temp_dict)
return(best_tag)
#Function to return the key having maximum value in a dictionary
def keywithmaxval(dic):
val=list(dic.values())
key=list(dic.keys())
return key[val.index(max(val))]
def givesingletons(arr):
freq = Counter(arr)
return [elem for elem in arr if freq[elem] == 1]
#MODEL SCRAPPED AS NOT ENOUGH TIME
#Function to give a tag which is calculated randomly by using the test tag set frequency as weights
# def weighted_random_tag(tag_list):
# import numpy as np
# unique_elements, counts_elements = np.unique(tag_list, return_counts=True)
# counts_elements=counts_elements/np.sum(counts_elements)
# weighted_tag=np.random.choice(unique_elements,p=counts_elements)
# return(weighted_tag)
#Open File and read brown.txt
file=open("brown.txt","r")
all_text=file.read()
file.close()
clean_text=all_text.strip()
#Get all words along with their tags
trainfile=open("brown-train.txt","w")
testfile=open("brown-test.txt","w")
all_words=clean_text.split()
all_sent=[]
sent=""
#Join words to form sentences using the following loop
i=0 #Number of sentences
for word in all_words:
if word[-2:]=="/.":
sent=sent+word+" "
all_sent.append(sent.strip())
if len(all_sent[-1])==3: #This is to remove all duplicates
# print("All sent of -2 is{}".format(all_sent[-2]))
# print("All sent of -1 is{}".format(all_sent[-1]))
# print("Current sent is{}".format(sent))
# print(all_sent[-1])
del all_sent[-1]
i=i-1
sent=""
i=i+1
continue
sent=sent+word+" "
#The first 2000 sentences of all sentences will form training set, while remaining will form test dataset
train_sent=all_sent[:2000]
test_sent=all_sent[2000:]
trainfile.write('\n'.join(train_sent))
testfile.write('\n'.join(test_sent))
#Write these training and test datasets to files
trainfile.close()
testfile.close()
print("brown-train.txt saved succesfully.")
print("brown-test.txt saved succesfully.")
#PART 2 - TAGGER IMPLEMENTATION
#Subpart 1 - Unigram
print("------------UNIGRAM------------")
#Create a nested dictionary of form {WORD1:{Tag1:Freqeuncy,Tag2:Freqeuncy,Tag3:Freqeuncy...},WORD2:{Tag1:Freqeuncy,Tag2:Freqeuncy,Tag3:Freqeuncy...},WORD3:{Tag1:Freqeuncy,Tag2:Freqeuncy,Tag3:Freqeuncy...}...}
unigram_tagger_dict={} #Nested Dictionary
unigram_list=[] #List of all unigrams
tag_list=[] #List of all tags
for sent in train_sent:
for word in sent.split():
unigram=word.rsplit("/",1)[0]
tag=word.rsplit("/",1)[1]
unigram_list.append(unigram)
tag_list.append(tag)
#A Tag Dictionary for the current word i.e for current word {Tag1,Tag2,Tag3...}
if unigram in unigram_tagger_dict:
tag_dict=unigram_tagger_dict[unigram]
else:
tag_dict={}
if tag not in tag_dict:
tag_dict[tag]=0
tag_dict[tag]=tag_dict[tag]+1
unigram_tagger_dict[unigram]=tag_dict
#Get the list of all unique unigrams and tags
unigram_set=list(set(unigram_list))
tag_set=list(set(tag_list))
max_tag_unigram_dict={}
unigramfile=open("unigram-tag.txt","w")
#Find out the most frequent tag for each word in training set and store as a dictionary
for unigram in unigram_set:
current_unigram_dict=unigram_tagger_dict[unigram]
unigram_values=list(current_unigram_dict.values())
unigram_keys=list(current_unigram_dict.keys())
max_tag=unigram_keys[unigram_values.index(max(unigram_values))]
max_tag_unigram_dict[unigram]=max_tag
#Write the dictionary to a file outside the loop to save time
unigramfile.write(str(max_tag_unigram_dict))
unigramfile.close()
print("unigram-tag.txt saved succesfully.")
#Assign the most frequent tag calculated above to all words in training set
unigramresultfile=open("unigram-results.txt","w")
unigramresult="" #String that holds all sentences after they've been tagged using unigram model
true_unigam_tag_counts=0 #To count how many assigned tags match the original correct tags
false_unigam_tag_counts=0 #To count how many assigned tags were assigned wrongly
unknown_correct=0
all_unknown={} #Dictionary of all unknown unigrams
unigram_confusion={} # { (tag1(true), tag2(model)) : freq }
hapax=givesingletons(unigram_list)
hapax_tags=[]
for elem in hapax:
hapax_tags.append(max_tag_unigram_dict[elem])
#We have multiple models to assign tags to unknown words
print("Enter model number you would like to use : 0,1 or 2 based on:")
print("Approach 0: Mark all unknowns as UNK tags")
print("Approach 1: For unknown unigrams, give them a random tag with equal prob (1/n)")
print("Approach 2: For unknown unigrams, give them a random tag where the random prob is based ONLY ON THE UNIGRAMS WHICH APPEARED ONCE in the training data set.")
inp=int(input("Enter your choice:\n"))
for sent in test_sent:
for word in sent.split():
#Extract unigram and true_tag from "unigram/true_tag"
unigram=word.rsplit("/",1)[0]
true_tag=word.rsplit("/",1)[1]
#Find out tag based on our model:
#If the current unigram is a known unigram, then assign it the tag calculated earlier
if unigram in max_tag_unigram_dict:
model_tag=max_tag_unigram_dict[unigram]
#If it's unknown, we have various strategies for that
else:
if inp==0:
model_tag="UNK"
if inp==1:
model_tag=random.choice(tag_set)
# if inp==2: #MODEL SCRAPPED AS NOT ENOUGH TIME
# model_tag=weighted_random_tag(tag_list)
if inp==2:
model_tag=random.choice(hapax_tags)
if model_tag==true_tag:
unknown_correct+=1
all_unknown.setdefault(unigram,0)
all_unknown[unigram]=all_unknown[unigram]+1
unigramresult=unigramresult+"{}/{} ".format(unigram,model_tag)
#Update true and false tag counters
if true_tag==model_tag:
true_unigam_tag_counts+=1
else:
false_unigam_tag_counts+=1
#CONFUSION
unigram_confusion.setdefault((true_tag,model_tag),0)
unigram_confusion[(true_tag,model_tag)]+=1
unigramresult=unigramresult+"\n"
unigramresultfile.write(unigramresult)
unigramresultfile.close()
print("unigram-results.txt saved succesfully.")
unigram_accuracy=100*true_unigam_tag_counts/(false_unigam_tag_counts+true_unigam_tag_counts)
unknown_accuracy=unknown_correct/len(all_unknown)
print("Unigram Tagger Accuracy is {}%".format(unigram_accuracy))
print("Total unknowns is {}".format(len(all_unknown)))
print("Unknown Accuracy is {}%".format(unknown_accuracy))
#all_unknown_list=list(all_unknown.keys())
#Subpart 2 - Bigram
print("------------BIGRAM------------")
next_word_list=all_words[1:]
bigram_word_list=combine_lists(all_words,next_word_list)
bigram_tagger_dict={} # Word1:{Tag1:{Possible Next Tags: Count},Tag2:{Possible Next Tags: Count}},Word2:...
bigramfile=open("bigram-tag.txt","w")
bigramtagtext="The format is:\nCurrent Word:\n\tPrevious Tag:\n\t\tNext Tag :\tFrequency\n-------------------\n"
#Order is Count(previous,next)
for i in range(len(bigram_word_list)):
bigram_4_parts=bigram_word_list[i].replace(" ","/").rsplit("/")
prev_tag=bigram_4_parts[1]
next_tag=bigram_4_parts[3]
next_word=bigram_4_parts[2]
if next_word in bigram_tagger_dict:
next_word_dict=bigram_tagger_dict[next_word]
else:
next_word_dict={}
both_tags=bigram_4_parts[1]+bigram_4_parts[3]
if prev_tag in next_word_dict:
tag_dict=next_word_dict[prev_tag]
else:
tag_dict={}
if next_tag not in tag_dict:
tag_dict[next_tag]=0
tag_dict[next_tag]=tag_dict[next_tag]+1
next_word_dict[prev_tag]=tag_dict
bigram_tagger_dict[next_word]=next_word_dict
bigramfile.write(str(bigram_tagger_dict))
bigramfile.close()
print("bigram-tag.txt saved succesfully.")
#Calculate the most probable next tag given previous tag for current word:
bigramresultfile=open("bigram-results.txt","w")
bigramresult="" #String that holds all sentences after they've been tagged using unigram model
true_bigam_tag_counts=0 #To count how many assigned tags match the original correct tags
false_bigam_tag_counts=0 #To count how many assigned tags were assigned wrongly
unknown_correct_bigram=0
all_unknown_bigram={}
bigram_confusion={} # { (tag1(true), tag2(model)) : freq }
i=0
j=0
print("Enter model number you would like to use : 1 or 2 based on:")
print("Approach 1: For unknown words, give them a random tag with equal prob (1/n)")
print("Approach 2: For unknown words, give them a random tag where the random prob is based ONLY ON THE UNIGRAMS WHICH APPEARED ONCE in the training data set.")
inp2=int(input("Enter your choice:\n"))
starting_tag="." #Because this is a new sentence.
for sent in test_sent:
for word in sent.split():
if i==0 and j==0:
prev_tag=starting_tag
#Extract unigram and true_tag from "unigram/true_tag"
unigram=word.rsplit("/",1)[0]
true_tag=word.rsplit("/",1)[1]
if unigram in bigram_tagger_dict:
try:
bigram_model_tag=keywithmaxval(bigram_tagger_dict[unigram][prev_tag])
except Exception as e:
#WORD FOUND, BUT NO TAG FOR PREV_TAG FOR THIS WORD Unknown Model
if inp2==1:
bigram_model_tag=random.choice(tag_set)
if inp2==2:
bigram_model_tag=random.choice(hapax_tags)
#bigram_model_tag=givetaggivenword(bigram_tagger_dict[unigram])
else:
#WORD NOT FOUND: Unkown Model
if inp2==1:
bigram_model_tag=random.choice(tag_set)
if inp2==2:
bigram_model_tag=random.choice(hapax_tags)
all_unknown_bigram.setdefault(prev_tag,0)
all_unknown_bigram[prev_tag]=all_unknown_bigram[prev_tag]+1
bigramresult=bigramresult+"{}/{} ".format(unigram,bigram_model_tag)
if true_tag==bigram_model_tag:
true_bigam_tag_counts+=1
else:
false_bigam_tag_counts+=1
#CONFUSION
bigram_confusion.setdefault((true_tag,model_tag),0)
bigram_confusion[(true_tag,model_tag)]+=1
prev_tag=bigram_model_tag
j+=1
bigramresult=bigramresult+"\n"
i+=1
bigramresultfile.write(bigramresult)
bigramresultfile.close()
print("bigram-results.txt saved succesfully.")
bigram_accuracy=100*true_bigam_tag_counts/(false_bigam_tag_counts+true_bigam_tag_counts)
unknown_accuracy_bigram=unknown_correct_bigram/len(all_unknown_bigram)
print("Bigram Tagger Accuracy is {}%".format(bigram_accuracy))
print("Total unknowns is {}".format(len(all_unknown_bigram)))
print("Unknown Accuracy is {}%".format(unknown_accuracy_bigram))
print("------------CONFUSION MATRICES------------")
#A part of the below code has been re-used from my earlier assignment https://github.com/AKnightWing/bigram/blob/master/comp_ling.py
#Unigram Tagger Confusion Matrix
#Normalise both confusion dictionarues
for key in unigram_confusion:
unigram_confusion[key]=100*unigram_confusion[key]/false_unigam_tag_counts
for key in bigram_confusion:
bigram_confusion[key]=100*bigram_confusion[key]/false_bigam_tag_counts
firstrow=[' '] #The first row in the 2D list
for key in tag_set:
firstrow.append(key)
unigram_matrix=[] #A n*n 2D list which stores only the skeleton of the matrix
for i in range(len(tag_set)+1):
if i==0:
unigram_matrix.append(firstrow)
else:
row=[]
for j in range(len(tag_set)+1):
if j==0:
row.append(firstrow[i])
else:
try:
row.append(unigram_confusion[(tag_set[i]),(tag_set[j])])
except Exception as e:
row.append("0")
unigram_matrix.append(row)
bigram_matrix=[]
for i in range(len(tag_set)+1):
if i==0:
bigram_matrix.append(firstrow)
else:
row=[]
for j in range(len(tag_set)+1):
if j==0:
row.append(firstrow[i])
else:
try:
row.append(bigram_confusion[(tag_set[i]),(tag_set[j])])
except Exception as e:
row.append("0")
bigram_matrix.append(row)
with open('unigram_confusion.csv', 'w') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(unigram_matrix)
writeFile.close()
print("unigram_confusion.csv saved succesfully.")
with open('bigram_confusion.csv', 'w') as writeFile2:
writer = csv.writer(writeFile2)
writer.writerows(bigram_matrix)
writeFile2.close()
print("bigram_confusion.csv saved succesfully.")
t2=time.time()
print("Total time taken by program = {} seconds".format(t2-t1)) |
the-stack_0_3457 | """
Agent namespaced tasks
"""
from __future__ import print_function
import glob
import os
import shutil
import sys
import platform
from distutils.dir_util import copy_tree
import invoke
from invoke import task
from invoke.exceptions import Exit
from .utils import bin_name, get_build_flags, get_version_numeric_only, load_release_versions
from .utils import REPO_PATH
from .build_tags import get_build_tags, get_default_build_tags, LINUX_ONLY_TAGS, DEBIAN_ONLY_TAGS
from .go import deps
# constants
BIN_PATH = os.path.join(".", "bin", "agent")
AGENT_TAG = "datadog/agent:master"
DEFAULT_BUILD_TAGS = [
"apm",
"consul",
"cpython",
"docker",
"ec2",
"etcd",
"gce",
"jmx",
"kubeapiserver",
"kubelet",
"log",
"systemd",
"process",
"snmp",
"zk",
"zlib",
]
@task
def build(ctx, rebuild=False, race=False, build_include=None, build_exclude=None,
puppy=False, use_embedded_libs=False, development=True, precompile_only=False,
skip_assets=False):
"""
Build the agent. If the bits to include in the build are not specified,
the values from `invoke.yaml` will be used.
Example invokation:
inv agent.build --build-exclude=snmp,systemd
"""
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
ldflags, gcflags, env = get_build_flags(ctx, use_embedded_libs=use_embedded_libs)
if not sys.platform.startswith('linux'):
for ex in LINUX_ONLY_TAGS:
if ex not in build_exclude:
build_exclude.append(ex)
# remove all tags that are only available on debian distributions
distname = platform.linux_distribution()[0].lower()
if distname not in ['debian', 'ubuntu']:
for ex in DEBIAN_ONLY_TAGS:
if ex not in build_exclude:
build_exclude.append(ex)
if sys.platform == 'win32':
# This generates the manifest resource. The manifest resource is necessary for
# being able to load the ancient C-runtime that comes along with Python 2.7
# command = "rsrc -arch amd64 -manifest cmd/agent/agent.exe.manifest -o cmd/agent/rsrc.syso"
ver = get_version_numeric_only(ctx)
build_maj, build_min, build_patch = ver.split(".")
command = "windmc --target pe-x86-64 -r cmd/agent cmd/agent/agentmsg.mc "
ctx.run(command, env=env)
command = "windres --define MAJ_VER={build_maj} --define MIN_VER={build_min} --define PATCH_VER={build_patch} ".format(
build_maj=build_maj,
build_min=build_min,
build_patch=build_patch
)
command += "-i cmd/agent/agent.rc --target=pe-x86-64 -O coff -o cmd/agent/rsrc.syso"
ctx.run(command, env=env)
if puppy:
# Puppy mode overrides whatever passed through `--build-exclude` and `--build-include`
build_tags = get_default_build_tags(puppy=True)
else:
build_tags = get_build_tags(build_include, build_exclude)
cmd = "go build {race_opt} {build_type} -tags \"{go_build_tags}\" "
cmd += "-o {agent_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/agent"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else ("-i" if precompile_only else ""),
"go_build_tags": " ".join(build_tags),
"agent_bin": os.path.join(BIN_PATH, bin_name("agent", android=False)),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
# Render the configuration file template
#
# We need to remove cross compiling bits if any because go generate must
# build and execute in the native platform
env.update({
"GOOS": "",
"GOARCH": "",
})
cmd = "go generate {}/cmd/agent"
ctx.run(cmd.format(REPO_PATH), env=env)
if not skip_assets:
refresh_assets(ctx, development=development)
@task
def refresh_assets(ctx, development=True):
"""
Clean up and refresh Collector's assets and config files
"""
# ensure BIN_PATH exists
if not os.path.exists(BIN_PATH):
os.mkdir(BIN_PATH)
dist_folder = os.path.join(BIN_PATH, "dist")
if os.path.exists(dist_folder):
shutil.rmtree(dist_folder)
copy_tree("./cmd/agent/dist/", dist_folder)
copy_tree("./pkg/status/dist/", dist_folder)
copy_tree("./cmd/agent/gui/views", os.path.join(dist_folder, "views"))
if development:
copy_tree("./dev/dist/", dist_folder)
# copy the dd-agent placeholder to the bin folder
bin_ddagent = os.path.join(BIN_PATH, "dd-agent")
shutil.move(os.path.join(dist_folder, "dd-agent"), bin_ddagent)
@task
def run(ctx, rebuild=False, race=False, build_include=None, build_exclude=None,
puppy=False, skip_build=False):
"""
Execute the agent binary.
By default it builds the agent before executing it, unless --skip-build was
passed. It accepts the same set of options as agent.build.
"""
if not skip_build:
build(ctx, rebuild, race, build_include, build_exclude, puppy)
ctx.run(os.path.join(BIN_PATH, bin_name("agent")))
@task
def system_tests(ctx):
"""
Run the system testsuite.
"""
pass
@task
def image_build(ctx, base_dir="omnibus"):
"""
Build the docker image
"""
base_dir = base_dir or os.environ.get("OMNIBUS_BASE_DIR")
pkg_dir = os.path.join(base_dir, 'pkg')
list_of_files = glob.glob(os.path.join(pkg_dir, 'datadog-agent*_amd64.deb'))
# get the last debian package built
if not list_of_files:
print("No debian package build found in {}".format(pkg_dir))
print("See agent.omnibus-build")
raise Exit(code=1)
latest_file = max(list_of_files, key=os.path.getctime)
shutil.copy2(latest_file, "Dockerfiles/agent/")
ctx.run("docker build -t {} Dockerfiles/agent".format(AGENT_TAG))
ctx.run("rm Dockerfiles/agent/datadog-agent*_amd64.deb")
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for the Agent
"""
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'go test {race_opt} -tags "{go_build_tags}" {exec_opts}'.format(**test_args)
prefixes = [
"./test/integration/config_providers/...",
"./test/integration/corechecks/...",
"./test/integration/listeners/...",
"./test/integration/util/kubelet/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task(help={'skip-sign': "On macOS, use this option to build an unsigned package if you don't have Datadog's developer keys."})
def omnibus_build(ctx, puppy=False, log_level="info", base_dir=None, gem_path=None,
skip_deps=False, skip_sign=False, release_version="nightly", omnibus_s3_cache=False):
"""
Build the Agent packages with Omnibus Installer.
"""
if not skip_deps:
deps(ctx, no_checks=True) # no_checks since the omnibus build installs checks with a dedicated software def
# omnibus config overrides
overrides = []
# base dir (can be overridden through env vars, command line takes precedence)
base_dir = base_dir or os.environ.get("OMNIBUS_BASE_DIR")
if base_dir:
overrides.append("base_dir:{}".format(base_dir))
overrides_cmd = ""
if overrides:
overrides_cmd = "--override=" + " ".join(overrides)
with ctx.cd("omnibus"):
env = load_release_versions(ctx, release_version)
cmd = "bundle install"
if gem_path:
cmd += " --path {}".format(gem_path)
ctx.run(cmd, env=env)
omnibus = "bundle exec omnibus.bat" if sys.platform == 'win32' else "bundle exec omnibus"
cmd = "{omnibus} build {project_name} --log-level={log_level} {populate_s3_cache} {overrides}"
args = {
"omnibus": omnibus,
"project_name": "puppy" if puppy else "agent",
"log_level": log_level,
"overrides": overrides_cmd,
"populate_s3_cache": ""
}
if omnibus_s3_cache:
args['populate_s3_cache'] = " --populate-s3-cache "
if skip_sign:
env['SKIP_SIGN_MAC'] = 'true'
ctx.run(cmd.format(**args), env=env)
@task
def clean(ctx):
"""
Remove temporary objects and binary artifacts
"""
# go clean
print("Executing go clean")
ctx.run("go clean")
# remove the bin/agent folder
print("Remove agent binary folder")
ctx.run("rm -rf ./bin/agent")
|
the-stack_0_3458 | import pymysql
from .function import create_insert_sql_values, create_update_sql, create_insert_sql_column
from . import SQLConfig
class MySqldb(object):
def __init__(self):
self.SQLConfig = SQLConfig
# self.db = pymysql.connect(SQLConfig.SQL_ADDRESS,SQLConfig.SQL_USERNAME,\
# SQLConfig.SQL_PASSWORD,SQLConfig.SQL_DATABASE)
def connect(self):
self.db = pymysql.connect(self.SQLConfig.SQL_ADDRESS,self.SQLConfig.SQL_USERNAME,\
self.SQLConfig.SQL_PASSWORD,self.SQLConfig.SQL_DATABASE)
# 一共就四个方法,增删改查。
# 增,也就是insert
# 增加一共有两个变量,一个是需要增加到哪个表里面去,另一个是数据。
# 数据必须是一个dict
def insert(self, table, values):
if not isinstance(values,dict):
raise TypeError('values must be dict')
if not isinstance(table,str):
raise TypeError('table must be str')
cursor = self.db.cursor()
# 创建sql
sql = "INSERT INTO %s%s VALUES %s"%(table,\
create_insert_sql_column(values),create_insert_sql_values(values))
try:
cursor.execute(sql)
self.db.commit()
return True
except:
print('insert fail')
return False
# 删除,变量只有两个
# 表名, 条件
def delete(self, table, condition):
if not isinstance(condition,dict):
raise TypeError('condition must be dict')
if not isinstance(table,str):
raise TypeError('table must be str')
cursor = self.db.cursor()
sql = "DELETE FROM %s WHERE %s = '%s'" % \
(table,list(condition.keys())[0],condition[list(condition.keys())[0]])
try:
cursor.execute(sql)
self.db.commit()
return True
except:
print('delete fail')
return False
# 改
# 传入参数依次为,表名,需要修改的值, 寻找条件
def update(self, table, values, condition):
if not isinstance(condition,dict):
raise TypeError('condition must be dict')
if not isinstance(values,dict):
raise TypeError('values must be dict')
if not isinstance(table,str):
raise TypeError('table must be str')
cursor = self.db.cursor()
sql = "UPDATE %s SET %s WHERE %s = '%s'"%\
(table,create_update_sql(values),list(condition.keys())[0],condition[list(condition.keys())[0]])
try:
print(sql)
cursor.execute(sql)
self.db.commit()
return True
except:
print("update fail")
return False
# 全查
# 传入参数依次:表名
def list_all(self, table):
if not isinstance(table,str):
raise TypeError('table must be str')
cursor = self.db.cursor()
# 获取当前表头
sql = "select COLUMN_NAME from information_schema.COLUMNS where table_name = '%s'"%(table)
cursor.execute(sql)
table_name = cursor.fetchall()
table_column = []
for i in table_name:
table_column.append(i[0])
sql = "SELECT * FROM %s" % (table)
try:
cursor.execute(sql)
table_data = []
data = cursor.fetchall()
for i in data:
table_data.append(dict(zip(table_column,list(i))))
return table_data
except:
print('get fail')
return False
def list_one(self, table, condition):
if not isinstance(condition,dict):
raise TypeError('condition must be dict')
if not isinstance(table,str):
raise TypeError('table must be str')
cursor = self.db.cursor()
# 获取当前表头
sql = "select COLUMN_NAME from information_schema.COLUMNS where table_name = '%s'"%(table)
cursor.execute(sql)
table_name = cursor.fetchall()
table_column = []
for i in table_name:
table_column.append(i[0])
sql = "SELECT * FROM %s WHERE %s = '%s'" % (table,\
list(condition.keys())[0], condition[list(condition.keys())[0]])
try:
cursor.execute(sql)
table_data = []
data = cursor.fetchall()
for i in data:
table_data.append(dict(zip(table_column,list(i))))
return table_data
except:
print("list one fail")
return False
def list_column(self, table, columns):
if not isinstance(table,str):
raise TypeError('table must be str')
if not isinstance(columns,list):
raise TypeError('columns must be list')
cursor = self.db.cursor()
sql = "SELECT %s FROM %s" % (",".join(columns),table)
try:
cursor.execute(sql)
data = cursor.fetchall()
columnData = []
for i in data:
columnData.append(i[0])
return columnData
except:
print("list one fail")
return False
def close(self):
self.db.close() |
the-stack_0_3459 | from oracles.abstract_oracle import *
from json import loads
import requests
class WebStatusBinaryOracle(AbstractOracle):
name = 'web_status_boolean_oracle'
description = 'Creates a binary oracle based on HTTP status code'
arguments = [OracleArgumentDescription('url','Base URL', True),
OracleArgumentDescription('verb', 'HTTP Method for the request', False, defaultValue='GET'),
OracleArgumentDescription('cipherparam', 'Parameter that contains the ciphertext', True),
OracleArgumentDescription('params', 'Other parameters to add to the request', False, defaultValue='{}'),
OracleArgumentDescription('goodstatuses', 'Comma separated list of good status codes', True),
]
def makeoracle(self):
url = self.get_argument_value('url')
verb = self.get_argument_value('verb')
cipherparam = self.get_argument_value('cipherparam')
paramstring = self.get_argument_value('params')
params = loads(paramstring)
goodstatuses = set(map(int,self.get_argument_value('goodstatuses').split(',')))
def oracle(ctext):
params[cipherparam] = ctext
if verb == 'GET':
resp = requests.get(url,params=params)
else:
resp = requests.post(url,params=params)
return resp.status_code in goodstatuses
return oracle |
the-stack_0_3460 | # Copyright 2021 Karan Sharma - [email protected]
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import chess
import tensorflow as tf
from tensorflow import keras
import numpy as np
piece_cp_values = {
chess.PAWN: 100,
chess.KNIGHT: 300,
chess.BISHOP: 300,
chess.ROOK: 500,
chess.QUEEN: 900,
chess.KING: 0
}
def boardToOneHotNNInput(board: chess.Board):
array = np.zeros(320, dtype=int)
piecesDict = board.piece_map()
white_map = {
chess.PAWN: [100, 0, 0, 0, 0],
chess.KNIGHT: [0, 100, 0, 0, 0],
chess.BISHOP: [0, 0, 100, 0, 0],
chess.ROOK: [0, 0, 0, 100, 0],
chess.QUEEN: [0, 0, 133, 100, 0],
chess.KING: [0, 0, 0, 0, 100]
}
black_map = {
chess.PAWN: [-100, 0, 0, 0, 0],
chess.KNIGHT: [0, -100, 0, 0, 0],
chess.BISHOP: [0, 0, -100, 0, 0],
chess.ROOK: [0, 0, 0, -100, 0],
chess.QUEEN: [0, 0, -133, -100, 0],
chess.KING: [0, 0, 0, 0, -100]
}
data_map = {
chess.WHITE: white_map,
chess.BLACK: black_map
}
for square in piecesDict:
piece = piecesDict.get(square)
array[square*5:(square+1)*5] = data_map[piece.color][piece.piece_type]
return np.array([array])
class Evaluator:
def __init__(self, num_inputs: int, hidden_layer_sizes=[32, 32]):
input = keras.Input(shape=(num_inputs,))
x = input
for i in range(len(hidden_layer_sizes)):
x = keras.layers.Dense(hidden_layer_sizes[i], activation="relu")(x)
output = keras.layers.Dense(1)(x)
self.model = keras.Model(inputs=input, outputs=output)
def __init__(self, model: keras.Model):
self.model = model
def func(self, board: chess.Board) -> float:
return 0.0
@classmethod
def randomModelFromModel(cls, model: keras.Model, deviation=1):
new_model = keras.models.clone_model(model)
for layer in new_model.layers:
layer.set_weights(np.random.uniform(layer.get_weights() - deviation, layer.get_weights() + deviation))
return Evaluator(new_model)
class Evaluator_Type3(Evaluator):
def func(self, board: chess.Board) -> float:
return self.model.predict(boardToOneHotNNInput(board))
class ColorError(Exception):
"""Raised if the wrong chess color was detected"""
pass
class Engine:
def __init__(self, evaluator: Evaluator, color: chess.Color):
self.evaluator = evaluator
self.color = color
def best_move(self, board: chess.Board) -> chess.Move:
if board.turn != self.color:
raise ColorError
def is_better(x, y):
if self.color == chess.WHITE:
return x > y
else:
return y > x
high = None
best_move = None
for move in board.legal_moves:
board.push(move)
rating = self.evaluator.func(board)
print("Considering " + move.uci() + ": " + str(rating)) #DEBUG
if high is None or is_better(rating, high):
high = rating
best_move = move
board.pop()
return best_move
# -------------------------------------------------- DEPRECATED CODE -------------------------------------------------
def boardToNNInput_deprecated(board: chess.Board):
array = np.zeros(64, dtype=int)
piecesDict = board.piece_map()
for square in piecesDict:
if piecesDict.get(square).color == chess.WHITE:
array[square] = piece_cp_values[piecesDict.get(square).piece_type]
else:
array[square] = -piece_cp_values[piecesDict.get(square).piece_type]
return np.array([array])
class Evaluator_Type1_deprecated(Evaluator):
def func(self, board: chess.Board) -> float:
input = boardToNNInput_deprecated(board)
print("SHAPE:" + str(input.shape))
return self.model.predict(input)
|
the-stack_0_3462 | """
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-15518
# _get_ndarray_c_version is semi-public, on purpose not added to __all__
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, from_dlpack, _insert, _reconstruct,
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
_set_madvise_hugepage,
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', 'from_dlpack', '_insert', '_reconstruct', '_vec_string',
'_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring',
'get_handler_name', 'get_handler_version', 'inner', 'interp',
'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory',
'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
from_dlpack.__module__ = 'numpy'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
asarray.__module__ = 'numpy'
asanyarray.__module__ = 'numpy'
ascontiguousarray.__module__ = 'numpy'
asfortranarray.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `prototype`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # uninitialized
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.20.0
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.20.0
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
block : Assemble arrays from blocks.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b, /)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
``out.shape = (*a.shape[:-1], *b.shape[:-1])``
Raises
------
ValueError
If both `a` and `b` are nonscalar and their last dimensions have
different sizes.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
= sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
Some multidimensional examples:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> c = np.inner(a, b)
>>> c.shape
(2, 3)
>>> c
array([[ 14, 38, 62],
[ 86, 110, 134]])
>>> a = np.arange(2).reshape((1,1,2))
>>> b = np.arange(6).reshape((3,2))
>>> c = np.inner(a, b)
>>> c.shape
(1, 1, 3)
>>> c
array([[[1, 3, 5]]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y], /)
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, its rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the maximum
integer/float value converted.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a, /)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
linalg.multi_dot : Chained dot product.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b, /)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, /, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
according to the rule 'safe'
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(a, axis=None, bitorder='big'):
"""
packbits(a, /, axis=None, bitorder='big')
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
a : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
bitorder : {'big', 'little'}, optional
The order of the input bits. 'big' will mimic bin(val),
``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],
[ 64]],
[[192],
[ 32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
unpackbits(a, /, axis=None, count=None, bitorder='big')
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `a` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is
either 1-D (if `axis` is ``None``) or the same shape as the input
array with unpacking done along the axis specified.
Parameters
----------
a : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
count : int or None, optional
The number of elements to unpack along `axis`, provided as a way
of undoing the effect of packing a size that is not a multiple
of eight. A non-negative number means to only unpack `count`
bits. A negative number means to trim off that many bits from
the end. ``None`` means to unpack the entire array (the
default). Counts larger than the available number of bits will
add zero padding to the output. Negative counts must not
exceed the available number of bits.
.. versionadded:: 1.17.0
bitorder : {'big', 'little'}, optional
The order of the returned bits. 'big' will mimic bin(val),
``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in
a uint8 array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
>>> c = np.unpackbits(a, axis=1, count=-3)
>>> c
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=uint8)
>>> p = np.packbits(b, axis=0)
>>> np.unpackbits(p, axis=0)
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
True
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, /, max_work=None)
Determine if two arrays share memory.
.. warning::
This function can be exponentially slow for some inputs, unless
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
If in doubt, use `numpy.may_share_memory` instead.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays. Finding
the exact solution may take extremely long in some cases.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> np.shares_memory(x, np.array([5, 6, 7]))
False
>>> np.shares_memory(x[::2], x)
True
>>> np.shares_memory(x[::2], x[1::2])
False
Checking whether two arrays share memory is NP-complete, and
runtime may increase exponentially in the number of
dimensions. Hence, `max_work` should generally be set to a finite
number, as it is possible to construct examples that take
extremely long to run:
>>> from numpy.lib.stride_tricks import as_strided
>>> x = np.zeros([192163377], dtype=np.int8)
>>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
>>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
>>> np.shares_memory(x1, x2, max_work=1000)
Traceback (most recent call last):
...
numpy.TooHardError: Exceeded max_work
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
around 1 minute for this case. It is possible to find problems
that take still significantly longer.
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, /, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
>>> np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> import pytz
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
Traceback (most recent call last):
...
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
|
the-stack_0_3464 | # Third party
from github import UnknownObjectException
# Local
from utils import (
set_up_github_client,
get_cc_organization,
get_team_slug_name
)
PERMISSIONS = {
'Project Contributor': None,
'Project Collaborator': 'triage',
'Project Core Committer': 'push',
'Project Maintainer': 'maintain'
}
def create_teams_for_data(databag):
client = set_up_github_client()
organization = get_cc_organization(client)
print("Creating and populating teams...")
projects = databag["projects"]
for project in projects:
project_name = project["name"]
print(f" Creating and populating teams for project {project_name}...")
roles = project["roles"]
for role, members in roles.items():
if PERMISSIONS[role] is None:
print(f" Skipping {role} as it has no privileges.")
continue
print(f" Finding team for role {role}...")
team = map_role_to_team(organization, project_name, role)
print(" Done.")
print(f" Populating repos for team {team.name}...")
repos = project["repos"]
map_team_to_repos(organization, team, repos, True)
set_team_repo_permissions(team, PERMISSIONS[role])
print(" Done.")
print(f" Populating members for team {team.name}...")
members = [member["github"] for member in members]
map_team_to_members(client, team, members, True)
print(" Done.")
print(" Done.")
print("Done.")
def map_team_to_members(client, team, final_user_logins, non_destructive=False):
"""
Map the team to the given set of members. Any members that are not already
a part of the team will be added and any additional members that are a part
of the team will be removed, unless chosen not to.
@param client: the GitHub client
@param team: the Team object representing the team
@param final_user_logins: the list of users to associate with the team
@param non_destructive: whether to trim extra repos or preserve them
"""
initial_users = team.get_members()
initial_user_logins = [user.login for user in initial_users]
if not non_destructive:
users_to_drop = [
member
for member in initial_users
if member.login not in final_user_logins
]
for user in users_to_drop:
team.remove_membership(user)
users_to_add = [
client.get_user(login)
for login in final_user_logins
if login not in initial_user_logins
]
for user in users_to_add:
team.add_membership(user)
current_login = client.get_user().login
if current_login not in final_user_logins:
current_user = client.get_user(current_login)
team.remove_membership(current_user)
def map_team_to_repos(organization, team, final_repo_names, non_destructive=False):
"""
Map the team to the given set of repositories. Any repositories that are
not already a part of the team will be added and any additional repositories
that are a part of the team will be removed, unless chosen not to.
@param organization: the Organisation object of which the team is a part
@param team: the Team object representing the team
@param final_repo_names: the list of repo names to associate with the team
@param non_destructive: whether to trim extra repos or preserve them
"""
initial_repos = team.get_repos()
initial_repo_names = [repo.name for repo in initial_repos]
if not non_destructive:
repos_to_drop = [
repo
for repo in initial_repos
if repo.name not in final_repo_names
]
for repo in repos_to_drop:
team.remove_from_repos(repo)
repos_to_add = [
organization.get_repo(repo_name)
for repo_name in final_repo_names
if repo_name not in initial_repo_names
]
for repo in repos_to_add:
team.add_to_repos(repo)
def set_team_repo_permissions(team, permission):
"""
Set the given permission for each repository belonging to the team. The
permissions are determined by the role corresponding to team.
@param team: the team to update the permissions for
@param permission: the permission to set on each repo assigned to the team
"""
repos = team.get_repos()
for repo in repos:
print(f" Populating {permission} permission on {repo} repo...")
team.set_repo_permission(repo, permission)
print(" Done.")
def map_role_to_team(organization, project_name, role, create_if_absent=True):
"""
Map the given role in the given project to a team. Creates the team if one
such does not already exist.
@param organization: the Organisation object of which the team is a part
@param project_name: the name of the project to which the team belongs
@param role: the role held by folks in the team
@param create_if_absent: whether to create the team if it does not exist
@return: the team associated with the role
"""
team_slug, team_name = get_team_slug_name(project_name, role)
properties = {
'name': team_name,
'description': (f'Community Team for {project_name} '
f'containing folks with the role "{role}"'),
'privacy': 'closed'
}
try:
team = organization.get_team_by_slug(team_slug)
print(" Team exists, reconciling...")
if team.description == properties['description']:
del properties['description']
if team.privacy == properties['privacy']:
del properties['privacy']
if properties:
team.edit(**properties)
print(" Done.")
except UnknownObjectException:
if not create_if_absent:
print(" Did not exist, not creating.")
team = None
else:
print(" Did not exist, creating...")
team = organization.create_team(**properties)
print(" Done.")
return team
|
the-stack_0_3465 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import itertools
from pathlib import Path
from typing import Iterator, List, Optional, Any
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import cm
from ..common import tools
from ..common.typetools import PathLike
# pylint: disable=too-many-locals
_DPI = 100
# %% Basic tools
def _make_style_generator() -> Iterator[str]:
lines = itertools.cycle(["-", "--", ":", "-."]) # 4
markers = itertools.cycle("ov^<>8sp*hHDd") # 13
colors = itertools.cycle("bgrcmyk") # 7
return (l + m + c for l, m, c in zip(lines, markers, colors))
class NameStyle(dict):
"""Provides a style for each name, and keeps to it
"""
def __init__(self) -> None:
super().__init__()
self._gen = _make_style_generator()
def __getitem__(self, name: str) -> Any:
if name not in self:
self[name] = next(self._gen)
return super().__getitem__(name)
def _make_winners_df(df: pd.DataFrame, all_optimizers: List[str]) -> tools.Selector:
"""Finds mean loss over all runs for each of the optimizers, and creates a matrix
winner_ij = 1 if opt_i is better (lower loss) then opt_j (and .5 for ties)
"""
if not isinstance(df, tools.Selector):
df = tools.Selector(df)
all_optim_set = set(all_optimizers)
assert all(x in all_optim_set for x in df.unique("optimizer_name"))
assert all(x in df.columns for x in ["optimizer_name", "loss"])
winners = tools.Selector(index=all_optimizers, columns=all_optimizers, data=0.)
grouped = df.loc[:, ["optimizer_name", "loss"]].groupby(["optimizer_name"]).mean()
df_optimizers = list(grouped.index)
values = np.array(grouped)
diffs = values - values.T
# loss_ij = 1 means opt_i beats opt_j once (beating means getting a lower loss/regret)
winners.loc[df_optimizers, df_optimizers] = (diffs < 0) + .5 * (diffs == 0)
return winners
def _make_sorted_winrates_df(victories: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe counting number of victories into a sorted
winrate dataframe. The algorithm which performs better than all other
algorithms comes first.
"""
assert all(x == y for x, y in zip(victories.index, victories.columns))
winrates = victories / (victories + victories.T)
mean_win = winrates.mean(axis=1).sort_values(ascending=False)
return winrates.loc[mean_win.index, mean_win.index]
# %% plotting functions
def remove_errors(df: pd.DataFrame) -> tools.Selector:
df = tools.Selector(df)
if "error" not in df.columns: # backward compatibility
return df # type: ignore
# errors with no recommendation
errordf = df.select(error=lambda x: isinstance(x, str) and x, loss=np.isnan)
for _, row in errordf.iterrows():
print(f'Removing "{row["optimizer_name"]}" with dimension {row["dimension"]}: got error "{row["error"]}".')
# error with recoreded recommendation
handlederrordf = df.select(error=lambda x: isinstance(x, str) and x, loss=lambda x: not np.isnan(x))
for _, row in handlederrordf.iterrows():
print(f'Keeping non-optimal recommendation of "{row["optimizer_name"]}" '
f'with dimension {row["dimension"]} which raised "{row["error"]}".')
err_inds = set(errordf.index)
output = df.loc[[i for i in df.index if i not in err_inds], [c for c in df.columns if c != "error"]]
assert not output.loc[:, "loss"].isnull().values.any(), "Some nan values remain while there should not be any!"
output = tools.Selector(output.reset_index(drop=True))
return output # type: ignore
def create_plots(df: pd.DataFrame, output_folder: PathLike, max_combsize: int = 1) -> None:
"""Saves all representing plots to the provided folder
Parameters
----------
df: pd.DataFrame
the experiment data
output_folder: PathLike
path of the folder where the plots should be saved
max_combsize: int
maximum number of parameters to fix (combinations) when creating experiment plots
"""
df = remove_errors(df)
df.loc[:, "loss"] = pd.to_numeric(df.loc[:, "loss"])
df = tools.Selector(df.fillna("N-A")) # remove NaN in non score values
assert not any("Unnamed: " in x for x in df.columns), f"Remove the unnamed index column: {df.columns}"
assert "error " not in df.columns, f"Remove error rows before plotting"
required = {"optimizer_name", "budget", "loss", "elapsed_time", "elapsed_budget"}
missing = required - set(df.columns)
assert not missing, f"Missing fields: {missing}"
output_folder = Path(output_folder)
os.makedirs(output_folder, exist_ok=True)
# check which descriptors do vary
descriptors = sorted(set(df.columns) - (required | {"seed"})) # all other columns are descriptors
to_drop = [x for x in descriptors if len(df.unique(x)) == 1]
df = tools.Selector(df.loc[:, [x for x in df.columns if x not in to_drop]])
descriptors = sorted(set(df.columns) - (required | {"seed"})) # now those should be actual interesting descriptors
print(f"Descriptors: {descriptors}")
#
# fight plot
# choice of the combination variables to fix
fight_descriptors = descriptors + ["budget"] # budget can be used as a descriptor for fight plots
combinable = [x for x in fight_descriptors if len(df.unique(x)) > 1] # should be all now
num_rows = 6
for fixed in list(itertools.chain.from_iterable(itertools.combinations(combinable, order) for order in range(max_combsize + 1))):
# choice of the cases with values for the fixed variables
for case in df.unique(fixed):
print("\n# new case #", fixed, case)
casedf = df.select(**dict(zip(fixed, case)))
name = "fight_" + ",".join("{}{}".format(x, y) for x, y in zip(fixed, case)) + ".png"
name = "fight_all.png" if name == "fight_.png" else name
make_fight_plot(casedf, fight_descriptors, num_rows, output_folder / name)
plt.close("all")
#
# xp plots
# plot mean loss / budget for each optimizer for 1 context
name_style = NameStyle() # keep the same style for each algorithm
for case in df.unique(descriptors):
subdf = df.select_and_drop(**dict(zip(descriptors, case)))
description = ",".join("{}:{}".format(x, y) for x, y in zip(descriptors, case))
out_filepath = output_folder / "xpresults{}{}.png".format("_" if description else "", description.replace(":", ""))
make_xpresults_plot(subdf, description, out_filepath, name_style)
plt.close("all")
def make_xpresults_plot(df: pd.DataFrame, title: str, output_filepath: Optional[PathLike] = None,
name_style: Optional[dict] = None) -> None:
"""Creates a xp result plot out of the given dataframe: regret with respect to budget for
each optimizer after averaging on all experiments (it is good practice to use a df
which is filtered out for one set of input parameters)
Parameters
----------
df: pd.DataFrame
run data
title: str
title of the plot
output_filepath: Path
If present, saves the plot to the given path
name_style: dict
a dict or dict-like object providing a line style for each optimizer name.
(can be helpful for consistency across plots)
"""
if name_style is None:
name_style = NameStyle()
df = tools.Selector(df.loc[:, ["optimizer_name", "budget", "loss"]])
groupeddf = df.groupby(["optimizer_name", "budget"]).mean()
groupeddf_std = df.groupby(["optimizer_name", "budget"]).std().loc[groupeddf.index, :] # std is currently unused
plt.clf()
plt.xlabel("Budget")
plt.ylabel("Loss")
plt.grid(True, which='both')
optim_vals = {}
# extract name and coordinates
for optim in df.unique("optimizer_name"):
xvals = np.array(groupeddf.loc[optim, :].index)
yvals = np.maximum(1e-30, np.array(groupeddf.loc[optim, :].loc[:, "loss"])) # avoid small vals for logplot
stds = groupeddf_std.loc[optim, :].loc[:, "loss"]
optim_name = optim.replace("Search", "").replace("oint", "t").replace("Optimizer", "")
optim_vals[optim_name] = {"x": xvals, "y": yvals, "std": stds}
# lower upper bound to twice stupid/idiot at most
upperbound = max(np.max(vals["y"]) for vals in optim_vals.values())
for optim, vals in optim_vals.items():
if optim.lower() in ["stupid", "idiot"] or optim in ["Zero", "StupidRandom"]:
upperbound = min(upperbound, 2 * np.max(vals["y"]))
# plot from best to worst
lowerbound = np.inf
handles = []
sorted_optimizers = sorted(optim_vals, key=lambda x: optim_vals[x]["y"][-1], reverse=True)
for k, optim_name in enumerate(sorted_optimizers):
vals = optim_vals[optim_name]
lowerbound = min(lowerbound, np.min(vals["y"]))
handles.append(plt.loglog(vals["x"], vals["y"], name_style[optim_name], label=optim_name))
texts = []
if vals["x"].size and vals["y"][-1] < upperbound:
angle = 30 - 60 * k / len(optim_vals)
texts.append(plt.text(vals["x"][-1], vals["y"][-1], "{} ({:.3g})".format(optim_name, vals["y"][-1]),
{'ha': 'left', 'va': 'top' if angle < 0 else 'bottom'}, rotation=angle))
if upperbound < np.inf:
plt.gca().set_ylim(lowerbound, upperbound)
# global info
legend = plt.legend(fontsize=7, ncol=2, handlelength=3,
loc='upper center', bbox_to_anchor=(0.5, -0.15))
plt.title(title)
# plt.tight_layout()
# plt.axis('tight')
# plt.tick_params(axis='both', which='both')
if output_filepath is not None:
plt.savefig(str(output_filepath), bbox_extra_artists=[legend] + texts, bbox_inches='tight', dpi=_DPI)
def make_fight_plot(df: tools.Selector, categories: List[str], num_rows: int, output_filepath: Optional[PathLike] = None) -> None:
"""Creates a fight plot out of the given dataframe, by iterating over all cases with fixed category variables.
Parameters
----------
df: pd.DataFrame
run data
categories: list
List of variables to fix for obtaining similar run conditions
num_rows: int
number of rows to plot (best algorithms)
output_filepath: Path
If present, saves the plot to the given path
"""
all_optimizers = list(df.unique("optimizer_name")) # optimizers for which no run exists are not shown
num_rows = min(num_rows, len(all_optimizers))
victories = pd.DataFrame(index=all_optimizers, columns=all_optimizers, data=0.)
# iterate on all sub cases
subcases = df.unique(categories)
for subcase in subcases: # TODO linearize this (precompute all subcases)? requires memory
subdf = df.select(**dict(zip(categories, subcase)))
victories += _make_winners_df(subdf, all_optimizers)
winrates = _make_sorted_winrates_df(victories)
mean_win = winrates.mean(axis=1)
winrates.fillna(.5) # unplayed
sorted_names = winrates.index
# number of subcases actually computed is twice self-victories
sorted_names = ["{} ({}/{})".format(n, int(2 * victories.loc[n, n]), len(subcases)) for n in sorted_names]
data = np.array(winrates.iloc[:num_rows, :])
# make plot
plt.close("all")
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(100 * data, cmap=cm.seismic, interpolation='none', vmin=0, vmax=100)
ax.set_xticks(list(range(len(sorted_names))))
ax.set_xticklabels([s.replace("Search", "") for s in sorted_names], rotation=90, fontsize=7)
ax.set_yticks(list(range(num_rows)))
# pylint: disable=anomalous-backslash-in-string
ax.set_yticklabels([(f"{name} ({100 * val:2.1f}\%)").replace("Search", "") for name, val in zip(mean_win.index[: num_rows], mean_win)], rotation=45, fontsize=7)
plt.tight_layout()
fig.colorbar(cax, orientation='vertical')
if output_filepath is not None:
plt.savefig(str(output_filepath), dpi=_DPI)
def main() -> None:
parser = argparse.ArgumentParser(description='Create plots from an experiment data file')
parser.add_argument('filepath', type=str, help='filepath containing the experiment data')
parser.add_argument('--output', type=str, default=None,
help="Output path for the CSV file (default: a folder <filename>_plots next to the data file.")
parser.add_argument('--max_combsize', type=int, default=3,
help="maximum number of parameters to fix (combinations) when creating experiment plots")
args = parser.parse_args()
exp_df = tools.Selector.read_csv(args.filepath)
output_dir = args.output
if output_dir is None:
output_dir = str(Path(args.filepath).with_suffix("")) + "_plots"
create_plots(exp_df, output_folder=output_dir, max_combsize=args.max_combsize)
if __name__ == '__main__':
main()
|
the-stack_0_3467 | #!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This script creates yaml files to build conda environments
# For generating a conda file for running only python code:
# $ python generate_conda_file.py
# For generating a conda file for running python gpu:
# $ python generate_conda_file.py --gpu
# For generating a conda file for running pyspark:
# $ python generate_conda_file.py --pyspark
# For generating a conda file for running python gpu and pyspark:
# $ python generate_conda_file.py --gpu --pyspark
# For generating a conda file for running python gpu and pyspark with a particular version:
# $ python generate_conda_file.py --gpu --pyspark-version 2.4.0
import argparse
import textwrap
from sys import platform
HELP_MSG = """
To create the conda environment:
$ conda env create -f {conda_env}.yaml
To update the conda environment:
$ conda env update -f {conda_env}.yaml
To register the conda environment in Jupyter:
$ conda activate {conda_env}
$ python -m ipykernel install --user --name {conda_env} --display-name "Python ({conda_env})"
"""
CHANNELS = ["defaults", "conda-forge", "pytorch", "fastai"]
CONDA_BASE = {
"bottleneck": "bottleneck==1.2.1",
"dask": "dask>=0.17.1",
"fastparquet": "fastparquet>=0.1.6",
"ipykernel": "ipykernel>=4.6.1",
"jupyter": "jupyter>=1.0.0",
"matplotlib": "matplotlib>=2.2.2",
"mock": "mock==2.0.0",
"numpy": "numpy>=1.13.3",
"pandas": "pandas>=0.23.4",
"pip": "pip>=19.0.3",
"python": "python==3.6.8",
"pytest": "pytest>=3.6.4",
"pytorch": "pytorch-cpu>=1.0.0",
"seaborn": "seaborn>=0.8.1",
"scikit-learn": "scikit-learn==0.19.1",
"scipy": "scipy>=1.0.0",
"scikit-surprise": "scikit-surprise>=1.0.6",
"swig": "swig==3.0.12",
"tensorflow": "tensorflow==1.12.0",
"lightgbm": "lightgbm==2.2.1",
"cornac": "cornac>=1.1.2",
"fastai": "fastai==1.0.46",
"papermill": "papermill==0.19.1",
}
CONDA_PYSPARK = {"pyarrow": "pyarrow>=0.8.0", "pyspark": "pyspark==2.3.1"}
CONDA_GPU = {
"numba": "numba>=0.38.1",
"pytorch": "pytorch>=1.0.0",
"tensorflow": "tensorflow-gpu==1.12.0",
}
PIP_BASE = {
"azureml-sdk[notebooks,tensorboard]": "azureml-sdk[notebooks,tensorboard]==1.0.18",
"azure-storage": "azure-storage>=0.36.0",
"black": "black>=18.6b4",
"category_encoders": "category_encoders>=1.3.0",
"dataclasses": "dataclasses>=0.6",
"hyperopt": "hyperopt==0.1.1",
"idna": "idna==2.7",
"locustio": "locustio==0.11.0",
"memory-profiler": "memory-profiler>=0.54.0",
"nbconvert": "nbconvert==5.5.0",
"pydocumentdb": "pydocumentdb>=2.3.3",
"pymanopt": "pymanopt==0.2.3",
"tqdm": "tqdm==4.31.1",
}
PIP_GPU = {"nvidia-ml-py3": "nvidia-ml-py3>=7.352.0"}
PIP_PYSPARK = {"databricks-cli": "databricks-cli==0.8.6"}
PIP_DARWIN = {
"nni": "nni==0.5.2.1.1",
}
PIP_LINUX = {
"nni": "nni==0.5.2.1.1",
}
PIP_WIN32 = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""
This script generates a conda file for different environments.
Plain python is the default, but flags can be used to support PySpark and GPU functionality"""
),
epilog=HELP_MSG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--name", help="specify name of conda environment")
parser.add_argument(
"--gpu", action="store_true", help="include packages for GPU support"
)
parser.add_argument(
"--pyspark", action="store_true", help="include packages for PySpark support"
)
parser.add_argument(
"--pyspark-version", help="provide specific version of PySpark to use"
)
args = parser.parse_args()
# check pyspark version
if args.pyspark_version is not None:
args.pyspark = True
pyspark_version_info = args.pyspark_version.split(".")
if len(pyspark_version_info) != 3 or any(
[not x.isdigit() for x in pyspark_version_info]
):
raise TypeError(
"PySpark version input must be valid numeric format (e.g. --pyspark-version=2.3.1)"
)
else:
args.pyspark_version = "2.3.1"
# set name for environment and output yaml file
conda_env = "reco_base"
if args.gpu and args.pyspark:
conda_env = "reco_full"
elif args.gpu:
conda_env = "reco_gpu"
elif args.pyspark:
conda_env = "reco_pyspark"
# overwrite environment name with user input
if args.name is not None:
conda_env = args.name
# update conda and pip packages based on flags provided
conda_packages = CONDA_BASE
pip_packages = PIP_BASE
if args.pyspark:
conda_packages.update(CONDA_PYSPARK)
conda_packages["pyspark"] = "pyspark=={}".format(args.pyspark_version)
pip_packages.update(PIP_PYSPARK)
if args.gpu:
conda_packages.update(CONDA_GPU)
pip_packages.update(PIP_GPU)
# check for os platform support
if platform == 'darwin':
pip_packages.update(PIP_DARWIN)
elif platform.startswith('linux'):
pip_packages.update(PIP_LINUX)
elif platform == 'win32':
pip_packages.update(PIP_WIN32)
else:
raise Exception('Unsupported platform, must be Windows, Linux, or macOS')
# write out yaml file
conda_file = "{}.yaml".format(conda_env)
with open(conda_file, "w") as f:
for line in HELP_MSG.format(conda_env=conda_env).split("\n"):
f.write("# {}\n".format(line))
f.write("name: {}\n".format(conda_env))
f.write("channels:\n")
for channel in CHANNELS:
f.write("- {}\n".format(channel))
f.write("dependencies:\n")
for conda_package in conda_packages.values():
f.write("- {}\n".format(conda_package))
f.write("- pip:\n")
for pip_package in pip_packages.values():
f.write(" - {}\n".format(pip_package))
print("Generated conda file: {}".format(conda_file))
print(HELP_MSG.format(conda_env=conda_env))
|
the-stack_0_3468 | # from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
# import cv2
import tensorflow as tf
from tensorflow.data import Iterator
from Dataset import SegDataLoader, VocRgbDataLoader, VocDataLoader, LfwRgbDataLoader, ImageNetRgbDataLoader
from visulize import save_test_images
from utils import rgb2yuv_tf, yuv2rgb_tf
from model import Discriminator, encode_net, decode_net
from ResNet import resnet_nopooling
class Model():
def __init__(self):
self.run_time = time.strftime("%m%d-%H%M")
# self.learning_rate = 0.0001
self.starter_learning_rate = 0.001
self.epoches = 70
self.log_path = 'logs/'+self.run_time + '/'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=config)
self.secret_tensor = tf.placeholder(shape=[None, 256, 256, 3], dtype=tf.float32, name="secret_tensor")
self.cover_tensor = tf.placeholder(shape=[None, 256, 256, 3], dtype=tf.float32, name="cover_tensor")
self.cover_yuv = rgb2yuv_tf(self.cover_tensor)
self.secret_yuv = rgb2yuv_tf(self.secret_tensor)
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
# self.test_op = self.prepare_test_graph(self.secret_tensor, self.cover_tensor)
def get_hiding_network_op(self, cover_tensor, secret_tensor, is_training):
concat_input = tf.concat([cover_tensor, secret_tensor], axis=-1, name='images_features_concat')
# output = resnet_nopooling(concat_input, name='encode', n_class=3, dilate=[2,4,8,16], is_training=is_training)
output = resnet_nopooling(concat_input, name='encode', n_class=3, is_training=is_training)
return output
def get_reveal_network_op(self, container_tensor, is_training):
output = resnet_nopooling(container_tensor, name='decode', n_class=3, is_training=is_training)
return output
def get_noise_layer_op(self,tensor,std=.1):
# with tf.variable_scope("noise_layer"):
# return tensor + tf.random_normal(shape=tf.shape(tensor), mean=0.0, stddev=std, dtype=tf.float32)
return tensor
def get_loss_op(self,secret_true,secret_pred,cover_true,cover_pred):
# D_real_secret = Discriminator(secret_true)
# D_fake_secret = Discriminator(secret_pred, reusing=True)
# D_real = Discriminator(cover_true, reusing=True)
# D_fake = Discriminator(cover_pred, reusing=True)
# D_real_secret = Discriminator(secret_true, name='secret', reusing=False)
# D_fake_secret = Discriminator(secret_pred, name='secret', reusing=True)
# D_real = Discriminator(cover_true, name='cover', reusing=False)
# D_fake = Discriminator(cover_pred, name='cover', reusing=True)
#
# D_real = tf.concat([D_real, D_real_secret], axis=0, name='gan_true_concat')
# D_fake = tf.concat([D_fake, D_fake_secret], axis=0, name='gan_pred_concat')
#
# D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))
# G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))
with tf.variable_scope("huber_losses"):
# secret_mse = tf.losses.mean_squared_error(secret_true,secret_pred)
# cover_mse = tf.losses.mean_squared_error(cover_true,cover_pred)
# secret_mse = tf.reduce_mean(tf.losses.huber_loss(secret_true, secret_pred, delta=0.5))
# cover_mse = tf.reduce_mean(tf.losses.huber_loss(cover_true, cover_pred, delta=0.5))
secret_mse = tf.reduce_mean(tf.losses.absolute_difference(secret_true, secret_pred))
cover_mse = tf.reduce_mean(tf.losses.absolute_difference(cover_true, cover_pred))
with tf.variable_scope("ssim_losses"):
#secret_ssim = 1. - tf.reduce_mean(tf.image.ssim(secret_true, secret_pred, max_val=1.0))
#cover_ssim = 1. - tf.reduce_mean(tf.image.ssim(cover_true, cover_pred, max_val=1.0))
secret_ssim = 1. - (tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,:1],secret_pred[:,:,:,:1], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,1:2],secret_pred[:,:,:,1:2], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,2:],secret_pred[...,2:], max_val=1.0)))/3.
cover_ssim = 1. - (tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,:1],cover_pred[:,:,:,:1], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,1:2],cover_pred[:,:,:,1:2], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,2:],cover_pred[:,:,:,2:], max_val=1.0)))/3.
# D_final_loss = cover_mse + secret_mse + secret_ssim + cover_ssim + D_loss
# D_final_loss = D_loss
G_final_loss = 5*cover_mse + 5*secret_mse + secret_ssim + cover_ssim
# G_final_loss = cover_mse + secret_mse + secret_ssim + cover_ssim
# return D_final_loss, G_final_loss, D_loss, G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
return G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
def get_tensor_to_img_op(self,tensor):
with tf.variable_scope("",reuse=True):
# t = tensor*tf.convert_to_tensor([0.229, 0.224, 0.225]) + tf.convert_to_tensor([0.485, 0.456, 0.406])
tensor = yuv2rgb_tf(tensor)
return tf.clip_by_value(tensor,0,1)
# return tf.clip_by_value(tensor,0,255)
def prepare_training_graph(self,secret_tensor,cover_tensor,global_step_tensor):
hidden = self.get_hiding_network_op(cover_tensor=cover_tensor, secret_tensor=secret_tensor, is_training=True)
reveal_output_op = self.get_reveal_network_op(hidden, is_training=True)
G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hidden)
global_variables = tf.global_variables()
gan_varlist = [i for i in global_variables if i.name.startswith('Discriminator')]
en_de_code_varlist = [i for i in global_variables if i not in gan_varlist]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# train_op = optimiser.minimize(loss, global_step=global_step)
# D_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(D_final_loss, var_list=gan_varlist, global_step=global_step_tensor)
G_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(G_final_loss, var_list=en_de_code_varlist, global_step=global_step_tensor)
# G_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(G_final_loss, global_step=global_step_tensor)
# tf.summary.scalar('D_loss', D_final_loss,family='train')
tf.summary.scalar('G_loss', G_final_loss,family='train')
tf.summary.scalar('secret_mse', secret_mse,family='train')
tf.summary.scalar('cover_mse', cover_mse,family='train')
tf.summary.scalar('learning_rate', self.learning_rate,family='train')
tf.summary.scalar('secret_ssim', secret_ssim)
tf.summary.scalar('cover_ssim', cover_ssim)
tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='train')
tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='train')
tf.summary.image('hidden',self.get_tensor_to_img_op(hidden),max_outputs=1,family='train')
# tf.summary.image('hidden_noisy',self.get_tensor_to_img_op(noise_add_op),max_outputs=1,family='train')
tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='train')
merged_summary_op = tf.summary.merge_all()
return G_minimize_op, G_final_loss, merged_summary_op, secret_mse,cover_mse, secret_ssim, cover_ssim
def prepare_test_graph(self,secret_tensor,cover_tensor):
# y_output, hiding_output_op = self.get_hiding_network_op(cover_tensor=cover_tensor,secret_tensor=secret_tensor, is_training=True)
hidden = self.get_hiding_network_op(cover_tensor=cover_tensor,secret_tensor=secret_tensor, is_training=False)
# reveal_output_op = self.get_reveal_network_op(y_output, is_training=True)
reveal_output_op = self.get_reveal_network_op(hidden, is_training=False)
G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hidden)
# tf.summary.scalar('loss', loss_op,family='test')
# tf.summary.scalar('reveal_net_loss', secret_loss_op,family='test')
# tf.summary.scalar('cover_net_loss', cover_loss_op,family='test')
#
# tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='test')
# tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='test')
# tf.summary.image('hidden',self.get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='test')
# tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='test')
# merged_summary_op = tf.summary.merge_all()
return hidden, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
def save_chkp(self,path):
global_step = self.sess.run(self.global_step_tensor)
self.saver.save(self.sess,path,global_step)
def load_chkp(self,path):
self.saver.restore(self.sess,path)
print("LOADED")
def train(self):
with tf.device('/cpu:0'):
# segdl = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 4, (256, 256), (256, 256), 'voc_train.txt', split='train')
# segdl_val = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 4, (256, 256), (256, 256), 'voc_valid.txt', split='val')
#segdl = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 2, (256, 256), (256, 256),
# 'dataset/lfw_train.txt', split='train')
#segdl_val = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 2, (256, 256), (256, 256),
# 'dataset/lfw_valid.txt', split='val')
segdl = ImageNetRgbDataLoader('/home/jion/moliq/Documents/imagenet/ILSVRC2012_img_val/', 4, (256, 256), (256, 256),
'dataset/imagenet_train.txt', split='train')
segdl_val = ImageNetRgbDataLoader('/home/jion/moliq/Documents/imagenet/ILSVRC2012_img_test/', 4, (256, 256), (256, 256),
'dataset/imagenet_valid.txt', split='val')
iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes)
iterator_val = Iterator.from_structure(segdl_val.data_tr.output_types, segdl_val.data_tr.output_shapes)
next_batch = iterator.get_next()
next_batch_val = iterator_val.get_next()
training_init_op = iterator.make_initializer(segdl.data_tr)
training_init_op_val = iterator_val.make_initializer(segdl_val.data_tr)
steps_per_epoch = segdl.data_len / segdl.batch_size
steps_per_epoch_val = segdl_val.data_len / segdl_val.batch_size
self.learning_rate = tf.train.exponential_decay(self.starter_learning_rate, self.global_step_tensor,
steps_per_epoch*15, 0.1, staircase=True)
self.train_op_G, G_final_loss, self.summary_op, self.secret_mse, self.cover_mse, self.secret_ssim, self.cover_ssim = \
self.prepare_training_graph(self.secret_yuv, self.cover_yuv, self.global_step_tensor)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
self.writer = tf.summary.FileWriter(self.log_path, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=30)
# beta1_power = self.sess.graph.get_tensor_by_name('beta1_power:0')
# out = self.sess.run(beta1_power)
# print('beta1_power ', out)
# exclude_vars = ['beta1_power:0', 'beta2_power:0', 'global_step:0']
# exclude_vars = ['']
# restore_variables = [i for i in tf.global_variables() if not i.name in exclude_vars]
saver = tf.train.Saver()
loader = tf.train.latest_checkpoint('logs/0509-0030')
saver.restore(self.sess, loader)
print('loaded pretrained model')
#beta1_power = self.sess.graph.get_tensor_by_name('beta1_power:0')
#out = self.sess.run(beta1_power)
#print('beta1_power ', out)
for epoch in range(1, 1+self.epoches):
print('epoch %d'%epoch)
self.sess.run(training_init_op)
for i in range(steps_per_epoch):
cover_tensor, secret_tensor = self.sess.run(next_batch)
_, G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim, summary, global_step = \
self.sess.run([self.train_op_G, G_final_loss, self.secret_mse, self.cover_mse, self.secret_ssim, self.cover_ssim, self.summary_op, self.global_step_tensor],
feed_dict={self.secret_tensor: secret_tensor, self.cover_tensor: cover_tensor})
self.writer.add_summary(summary, global_step)
# if i % 5 == 0:
# _, D_loss, summary = \
# self.sess.run([self.train_op_D, D_final_loss, self.summary_op],
# feed_dict={self.secret_tensor: secret_tensor,self.cover_tensor: cover_tensor})
# self.writer.add_summary(summary, global_step)
if i % 30 == 0:
print('Epoch [{}/{}] Step [{}/{}] G_Loss {:.4f} encoder_ssim {:.4f} encoder_mse {:.4f}'
' decoder_ssim {:.4f} decoder_mse {:.4f} '.format(
epoch, self.epoches, i, steps_per_epoch, G_loss,
cover_ssim, cover_mse, secret_ssim, secret_mse ))
# run validation
self.sess.run(training_init_op_val)
# D_loss_val_this_epoch = []
G_loss_val_this_epoch = []
secret_ssim_this_epoch = []
cover_ssim_this_epoch = []
for i in range(steps_per_epoch_val):
cover_tensor_val, secret_tensor_val = self.sess.run(next_batch_val)
G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = \
self.sess.run([G_final_loss, self.secret_mse,self.cover_mse, self.secret_ssim, self.cover_ssim],
feed_dict={self.secret_tensor: secret_tensor_val,
self.cover_tensor: cover_tensor_val})
# D_loss_val_this_epoch.append(D_loss)
G_loss_val_this_epoch.append(G_loss)
secret_ssim_this_epoch.append(secret_ssim)
cover_ssim_this_epoch.append(cover_ssim)
# mean_D_loss_val_this_epoch = sum(D_loss_val_this_epoch) / len(D_loss_val_this_epoch)
mean_G_loss_val_this_epoch = sum(G_loss_val_this_epoch) / len(G_loss_val_this_epoch)
mean_secret_ssim_this_epoch = sum(secret_ssim_this_epoch) / len(secret_ssim_this_epoch)
mean_cover_ssim_this_epoch = sum(cover_ssim_this_epoch) / len(cover_ssim_this_epoch)
# print('global step: %d, validation loss: %.4f'%(global_step, mean_loss_val_this_epoch))
print('VALIDATION Epoch {} global step {} G_Loss {:.4f} encoder_ssim {:.4f} decoder_ssim {:.4f}'.format(
epoch, global_step, mean_G_loss_val_this_epoch,
mean_cover_ssim_this_epoch, mean_secret_ssim_this_epoch))
# self.save_chkp(self.log_path+'%d_%.3f.ckpt'%(epoch, mean_loss_val_this_epoch))
self.save_chkp(self.log_path)
def test_performance(self, log_path):
hiding_output_op, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = \
self.prepare_test_graph(self.secret_yuv, self.cover_yuv)
loader = tf.train.latest_checkpoint(log_path)
# from tensorflow.python.tools import inspect_checkpoint as chkp
# chkp.print_tensors_in_checkpoint_file(loader, tensor_name='', all_tensors=True)
# from inspect_checkpoint import print_tensors_in_checkpoint_file
# print_tensors_in_checkpoint_file(loader, tensor_name='', all_tensors=True)
# variables = [i for i in tf.global_variables() if i.name not in ['global_step:0']]
# saver_variables_dict = {value.name[:-2]:value for value in variables}
# custom_saver = tf.train.Saver(saver_variables_dict)
# custom_saver.restore(self.sess, loader)
# print('load model %s'%loader)
# self.saver = tf.train.Saver(var_list=tf.global_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, loader)
print('load model %s'%loader)
with tf.device('/cpu:0'):
# segdl_val = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 16, (256, 256), (256, 256), 'voc_valid.txt', split='val')
segdl_val = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 16, (256, 256), (256, 256),
'dataset/lfw_valid.txt', split='val')
iterator_val = Iterator.from_structure(segdl_val.data_tr.output_types, segdl_val.data_tr.output_shapes)
next_batch_val = iterator_val.get_next()
training_init_op_val = iterator_val.make_initializer(segdl_val.data_tr)
steps_per_epoch_val = segdl_val.data_len / segdl_val.batch_size
loss_val_this_epoch = []
secret_mse_val_this_epoch = []
cover_mse_val_this_epoch = []
secret_ssim_this_epoch = []
cover_ssim_this_epoch = []
self.sess.run(training_init_op_val)
# self.saver.restore(self.sess, loader)
# print('load model %s'%loader)
for i in range(steps_per_epoch_val):
cover_tensor_val, secret_tensor_val = self.sess.run(next_batch_val)
stego, secret_reveal, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value = \
self.sess.run([hiding_output_op, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim],
feed_dict={self.secret_tensor: secret_tensor_val,
self.cover_tensor: cover_tensor_val})
cover_names = segdl_val.imgs_files[i*segdl_val.batch_size:(i+1)*segdl_val.batch_size]
secret_names = segdl_val.labels_files[i*segdl_val.batch_size:(i+1)*segdl_val.batch_size]
loss_val_this_epoch.append(loss_value)
secret_mse_val_this_epoch.append(secret_mse_value)
cover_mse_val_this_epoch.append(cover_mse_value)
secret_ssim_this_epoch.append(secret_ssim_value)
cover_ssim_this_epoch.append(cover_ssim_value)
if i%10 == 0:
print('%d %.3f %.3f %.3f %.3f %.3f'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value))
save_test_images(cover_names, secret_names, cover_tensor_val, secret_tensor_val, stego, secret_reveal, log_path)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_cover.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), cover_tensor_val)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_secret.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), secret_tensor_val)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_stego.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), stego)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_secret_reveal.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), secret_reveal)
# mean_loss_val_this_epoch = sum(loss_val_this_epoch) / len(loss_val_this_epoch)
# mean_secret_mse_val_this_epoch = sum(secret_mse_val_this_epoch) / len(secret_mse_val_this_epoch)
# mean_cover_mse_val_this_epoch = sum(cover_mse_val_this_epoch) / len(cover_mse_val_this_epoch)
# mean_secret_ssim_this_epoch = sum(secret_ssim_this_epoch) / len(secret_ssim_this_epoch)
# mean_cover_ssim_this_epoch = sum(cover_ssim_this_epoch) / len(cover_ssim_this_epoch)
mean_loss_val_this_epoch = np.mean(loss_val_this_epoch)
mean_secret_mse_val_this_epoch = np.mean(secret_mse_val_this_epoch)
mean_cover_mse_val_this_epoch = np.mean(cover_mse_val_this_epoch)
mean_secret_ssim_this_epoch = np.mean(secret_ssim_this_epoch)
mean_cover_ssim_this_epoch = np.mean(cover_ssim_this_epoch)
print('validation loss: %.4f' % mean_loss_val_this_epoch)
print('secret mse: %.4f' % mean_secret_mse_val_this_epoch)
print('cover mse : %.4f' % mean_cover_mse_val_this_epoch)
print('secret ssim: %.4f' % mean_secret_ssim_this_epoch)
print('cover ssim: %.4f' % mean_cover_ssim_this_epoch)
if __name__ == '__main__':
train_model = Model()
train_model.train()
# train_model.test_performance(train_model.log_path)
# train_model.test_performance('logs/0427-1506')
# train_model.test_performance('logs/0428-2048')
# train_model.test_performance('logs/0505-1617')
|
the-stack_0_3472 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='pico_sdk',
version='0.1.4',
author='Meaty Solutions',
author_email='[email protected]',
description='High performance, gap-free streaming from any Pico Technology oscilloscope',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/meatysolutions/pico-sdk-bindings',
package_data={'': ['artifacts/*', 'artifacts/*/*']},
packages=setuptools.find_packages(),
install_requires=['numpy'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
)
|
the-stack_0_3473 | import torch
import torch.nn as nn
def standardize(x, bn_stats):
if bn_stats is None:
return x
bn_mean, bn_var = bn_stats
view = [1] * len(x.shape)
view[1] = -1
x = (x - bn_mean.view(view)) / torch.sqrt(bn_var.view(view) + 1e-5)
# if variance is too low, just ignore
x *= (bn_var.view(view) != 0).float()
return x
def clip_data(data, max_norm):
norms = torch.norm(data.reshape(data.shape[0], -1), dim=-1)
scale = (max_norm / norms).clamp(max=1.0)
data *= scale.reshape(-1, 1, 1, 1)
return data
def get_num_params(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class StandardizeLayer(nn.Module):
def __init__(self, bn_stats):
super(StandardizeLayer, self).__init__()
self.bn_stats = bn_stats
def forward(self, x):
return standardize(x, self.bn_stats)
class ClipLayer(nn.Module):
def __init__(self, max_norm):
super(ClipLayer, self).__init__()
self.max_norm = max_norm
def forward(self, x):
return clip_data(x, self.max_norm)
class CIFAR10_CNN(nn.Module):
def __init__(self, in_channels=3, input_norm=None, **kwargs):
super(CIFAR10_CNN, self).__init__()
self.in_channels = in_channels
self.features = None
self.classifier = None
self.norm = None
self.build(input_norm, **kwargs)
def build(self, input_norm=None, num_groups=None,
bn_stats=None, size=None):
if self.in_channels == 3:
if size == "small":
cfg = [16, 16, 'M', 32, 32, 'M', 64, 'M']
else:
cfg = [32, 32, 'M', 64, 64, 'M', 128, 128, 'M']
self.norm = nn.Identity()
else:
if size == "small":
cfg = [16, 16, 'M', 32, 32]
else:
cfg = [64, 'M', 64]
if input_norm is None:
self.norm = nn.Identity()
elif input_norm == "GroupNorm":
self.norm = nn.GroupNorm(num_groups, self.in_channels, affine=False)
else:
self.norm = lambda x: standardize(x, bn_stats)
layers = []
act = nn.Tanh
c = self.in_channels
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(c, v, kernel_size=3, stride=1, padding=1)
layers += [conv2d, act()]
c = v
self.features = nn.Sequential(*layers)
if self.in_channels == 3:
hidden = 128
self.classifier = nn.Sequential(nn.Linear(c * 4 * 4, hidden), act(), nn.Linear(hidden, 10))
else:
self.classifier = nn.Linear(c * 4 * 4, 10)
def forward(self, x):
if self.in_channels != 3:
x = self.norm(x.view(-1, self.in_channels, 8, 8))
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class MNIST_CNN(nn.Module):
def __init__(self, in_channels=1, input_norm=None, **kwargs):
super(MNIST_CNN, self).__init__()
self.in_channels = in_channels
self.features = None
self.classifier = None
self.norm = None
self.build(input_norm, **kwargs)
def build(self, input_norm=None, num_groups=None,
bn_stats=None, size=None):
if self.in_channels == 1:
ch1, ch2 = (16, 32) if size is None else (32, 64)
cfg = [(ch1, 8, 2, 2), 'M', (ch2, 4, 2, 0), 'M']
self.norm = nn.Identity()
else:
ch1, ch2 = (16, 32) if size is None else (32, 64)
cfg = [(ch1, 3, 2, 1), (ch2, 3, 1, 1)]
if input_norm == "GroupNorm":
self.norm = nn.GroupNorm(num_groups, self.in_channels, affine=False)
elif input_norm == "BN":
self.norm = lambda x: standardize(x, bn_stats)
else:
self.norm = nn.Identity()
layers = []
c = self.in_channels
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=1)]
else:
filters, k_size, stride, pad = v
conv2d = nn.Conv2d(c, filters, kernel_size=k_size, stride=stride, padding=pad)
layers += [conv2d, nn.Tanh()]
c = filters
self.features = nn.Sequential(*layers)
hidden = 32
self.classifier = nn.Sequential(nn.Linear(c * 4 * 4, hidden),
nn.Tanh(),
nn.Linear(hidden, 10))
def forward(self, x):
if self.in_channels != 1:
x = self.norm(x.view(-1, self.in_channels, 7, 7))
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class ScatterLinear(nn.Module):
def __init__(self, in_channels, hw_dims, input_norm=None, classes=10, clip_norm=None, **kwargs):
super(ScatterLinear, self).__init__()
self.K = in_channels
self.h = hw_dims[0]
self.w = hw_dims[1]
self.fc = None
self.norm = None
self.clip = None
self.build(input_norm, classes=classes, clip_norm=clip_norm, **kwargs)
def build(self, input_norm=None, num_groups=None, bn_stats=None, clip_norm=None, classes=10):
self.fc = nn.Linear(self.K * self.h * self.w, classes)
if input_norm is None:
self.norm = nn.Identity()
elif input_norm == "GroupNorm":
self.norm = nn.GroupNorm(num_groups, self.K, affine=False)
else:
self.norm = lambda x: standardize(x, bn_stats)
if clip_norm is None:
self.clip = nn.Identity()
else:
self.clip = ClipLayer(clip_norm)
def forward(self, x):
x = self.norm(x.view(-1, self.K, self.h, self.w))
x = self.clip(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
CNNS = {
"cifar10": CIFAR10_CNN,
"fmnist": MNIST_CNN,
"mnist": MNIST_CNN,
}
|
the-stack_0_3477 | # python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing."""
import enum
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
batch_dims: Sequence[int],
bfloat16: bool = False,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
if is_training:
start, end = _shard(split, jax.host_id(), jax.host_count())
else:
start, end = _shard(split, 0, 1)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
total_batch_size = np.prod(batch_dims)
options = ds.options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
if is_training:
options.experimental_deterministic = False
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def preprocess(example):
image = _preprocess_image(example['image'], is_training)
if bfloat16:
image = tf.cast(image, tf.bfloat16)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
ds = ds.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
yield from tfds.as_numpy(ds)
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
) -> tf.Tensor:
"""Returns processed and resized images."""
if is_training:
image = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor) -> tf.Tensor:
"""Make a random crop of 224."""
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((224 / (224 + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
|
the-stack_0_3478 | """Perform inference on one or more datasets."""
import argparse
import cv2
import os
import pprint
import sys
import time
from six.moves import cPickle as pickle
import torch
import _init_paths # pylint: disable=unused-import
from core.config import cfg, merge_cfg_from_file, merge_cfg_from_list, assert_and_infer_cfg
#from core.test_engine_rel import run_inference, get_features_for_centroids
import utils.logging
from datasets import task_evaluation_rel as task_evaluation
from evaluation.generate_detections_csv import generate_csv_file_from_det_obj, generate_topk_csv_from_det_obj, generate_boxes_csv_from_det_obj
from evaluation.frequency_based_analysis_of_methods import get_metrics_from_csv, get_many_medium_few_scores, get_wordsim_metrics_from_csv
import numpy as np
import json
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument(
'--dataset',
help='training dataset')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='optional config file')
parser.add_argument(
'--load_ckpt', help='path of checkpoint to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--output_dir',
help='output directory to save the testing results. If not provided, '
'defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument(
'--set', dest='set_cfgs',
help='set config keys, will overwrite config in the cfg_file.'
' See lib/core/config.py for all options',
default=[], nargs='*')
parser.add_argument(
'--range',
help='start (inclusive) and end (exclusive) indices',
type=int, nargs=2)
parser.add_argument(
'--multi-gpu-testing', help='using multiple gpus for inference',
action='store_true')
parser.add_argument(
'--vis', dest='vis', help='visualize detections', action='store_true')
parser.add_argument(
'--do_val', dest='do_val', help='do evaluation', action='store_true')
parser.add_argument(
'--use_gt_boxes', dest='use_gt_boxes', help='use gt boxes for sgcls/prdcls', action='store_true')
parser.add_argument(
'--use_gt_labels', dest='use_gt_labels', help='use gt boxes for sgcls/prdcls', action='store_true')
parser.add_argument(
'--cutoff_medium', dest='cutoff_medium', help='ratio of medium classes', type=float, default=0.80)
parser.add_argument(
'--cutoff_many', dest='cutoff_many', help='ratio of many classes', type=float, default=0.95)
parser.add_argument(
'--seed', dest='seed',
help='Value of seed here will overwrite seed in cfg file',
type=int)
return parser.parse_args()
def get_obj_and_prd_categories():
from datasets.dataset_catalog_rel import ANN_FN3
from datasets.dataset_catalog_rel import DATASETS
predicates_path = DATASETS[cfg.TEST.DATASETS[0]][ANN_FN3]
objects_path = DATASETS[cfg.TEST.DATASETS[0]][ANN_FN3].replace('predicates', 'objects', 1)
logger.info('Loading predicates from: ' + predicates_path)
logger.info('Loading objects from: ' + objects_path)
with open(predicates_path) as f:
prd_categories = json.load(f)
with open(objects_path) as f:
obj_categories = json.load(f)
return obj_categories, prd_categories
def get_obj_and_prd_frequencies():
if cfg.DATASET == 'gvqa10k':
freq_prd_path = cfg.DATA_DIR + '/gvqa/reduced_data/10k/seed{}/predicates_freqs.json'.format(
cfg.RNG_SEED)
freq_obj_path = cfg.DATA_DIR + '/gvqa/reduced_data/10k/seed{}/objects_freqs.json'.format(
cfg.RNG_SEED)
elif cfg.DATASET == 'gvqa20k':
freq_prd_path = cfg.DATA_DIR + '/gvqa/reduced_data/20k/seed{}/predicates_freqs.json'.format(
cfg.RNG_SEED)
freq_obj_path = cfg.DATA_DIR + '/gvqa/reduced_data/20k/seed{}/objects_freqs.json'.format(
cfg.RNG_SEED)
elif cfg.DATASET == 'gvqa':
freq_prd_path = cfg.DATA_DIR + '/gvqa/seed{}/predicates_freqs.json'.format(
cfg.RNG_SEED)
freq_obj_path = cfg.DATA_DIR + '/gvqa/seed{}/objects_freqs.json'.format(
cfg.RNG_SEED)
elif cfg.DATASET == 'vg80k':
freq_prd_path = cfg.DATA_DIR + '/vg/predicates_freqs.json'
freq_obj_path = cfg.DATA_DIR + '/vg/objects_freqs.json'
elif cfg.DATASET == 'vg8k':
freq_prd_path = cfg.DATA_DIR + '/vg8k/seed{}/train_predicates_freqs.json'.format(
cfg.RNG_SEED)
freq_obj_path = cfg.DATA_DIR + '/vg8k/seed{}/train_objects_freqs.json'.format(
cfg.RNG_SEED)
else:
raise NotImplementedError
logger.info('Loading predicates frequencies from: ' + freq_prd_path)
logger.info('Loading objects frequencies from: ' + freq_obj_path)
prd_freq_dict = json.load(open(freq_prd_path))
obj_freq_dict = json.load(open(freq_obj_path))
return obj_freq_dict, prd_freq_dict
if __name__ == '__main__':
logger = utils.logging.setup_logging(__name__)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
cfg.VIS = args.vis
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
merge_cfg_from_list(args.set_cfgs)
cfg.DATASET = args.dataset
if args.dataset == "vrd":
cfg.TEST.DATASETS = ('vrd_val',)
cfg.MODEL.NUM_CLASSES = 101
cfg.MODEL.NUM_PRD_CLASSES = 70 # exclude background
elif args.dataset == "vg":
cfg.TEST.DATASETS = ('vg_val',)
cfg.MODEL.NUM_CLASSES = 151
cfg.MODEL.NUM_PRD_CLASSES = 50 # exclude background
elif args.dataset == "vg80k":
cfg.TEST.DATASETS = ('vg80k_test',)
cfg.MODEL.NUM_CLASSES = 53305 # includes background
cfg.MODEL.NUM_PRD_CLASSES = 29086 # excludes background
elif args.dataset == "vg8k":
cfg.TEST.DATASETS = ('vg8k_test',)
cfg.MODEL.NUM_CLASSES = 5331 # includes background
cfg.MODEL.NUM_PRD_CLASSES = 2000 # excludes background
elif args.dataset == "gvqa20k":
cfg.TEST.DATASETS = ('gvqa20k_test',)
cfg.MODEL.NUM_CLASSES = 1704 # includes background
cfg.MODEL.NUM_PRD_CLASSES = 310 # exclude background
elif args.dataset == "gvqa10k":
cfg.TEST.DATASETS = ('gvqa10k_test',)
cfg.MODEL.NUM_CLASSES = 1704 # includes background
cfg.MODEL.NUM_PRD_CLASSES = 310 # exclude background
elif args.dataset == "gvqa":
cfg.TEST.DATASETS = ('gvqa_test',)
cfg.MODEL.NUM_CLASSES = 1704 # includes background
cfg.MODEL.NUM_PRD_CLASSES = 310 # exclude background
else: # For subprocess call
assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\'t be empty'
if args.seed:
cfg.RNG_SEED = args.seed
assert_and_infer_cfg()
data_dir = '{}/{}/'.format(cfg.DATA_DIR, cfg.DATASET)
ann_dir = '{}seed{}/'.format(data_dir, cfg.RNG_SEED)
# The import has to happen after setting up the config to avoid loading default cfg values
from core.test_engine_rel import run_inference
obj_categories, prd_categories = get_obj_and_prd_categories()
obj_freq_dict, prd_freq_dict = get_obj_and_prd_frequencies()
if not cfg.MODEL.RUN_BASELINE:
assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
'Exactly one of --load_ckpt and --load_detectron should be specified.'
if args.output_dir is None:
ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron
args.output_dir = os.path.join(
os.path.dirname(os.path.dirname(ckpt_path)), 'test')
logger.info('Automatically set output directory to %s', args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info('Testing with config:')
logger.info(pprint.pformat(cfg))
# For test_engine.multi_gpu_test_net_on_dataset
args.test_net_file, _ = os.path.splitext(__file__)
# manually set args.cuda
args.cuda = True
#print('Generating Centroids')
#all_results = get_features_for_centroids(args)
#print('Done!')
if args.use_gt_boxes:
if args.use_gt_labels:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.pkl')
csv_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.csv')
else:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.pkl')
csv_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.csv')
else:
det_file = os.path.join(args.output_dir, 'rel_detections.pkl')
csv_file = os.path.join(args.output_dir, 'rel_detections.csv')
if os.path.exists(det_file):
logger.info('Loading results from {}'.format(det_file))
with open(det_file, 'rb') as f:
all_results = pickle.load(f)
# logger.info('Starting evaluation now...')
# task_evaluation.eval_rel_results(all_results, args.output_dir, args.do_val)
else:
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
assert (torch.cuda.device_count() == 1) ^ bool(args.multi_gpu_testing)
all_results = run_inference(
args,
ind_range=args.range,
multi_gpu_testing=args.multi_gpu_testing,
check_expected_results=True)
all_results = all_results[0]
print('all_results', len(all_results))
print('all_results', all_results[0].keys())
#all_results = all_results[0]
freq_prd = (np.zeros(cfg.MODEL.NUM_PRD_CLASSES))
freq_obj = (np.zeros(cfg.MODEL.NUM_CLASSES))
generate_csv_file_from_det_obj(all_results, csv_file, obj_categories, prd_categories, obj_freq_dict, prd_freq_dict)
logger.info('Saved CSV to: ' + csv_file)
get_metrics_from_csv(csv_file, get_mr=True)
cutoffs = [args.cutoff_medium, args.cutoff_many]
get_many_medium_few_scores(csv_file, cutoffs, cfg.DATASET, data_dir, ann_dir, syn=True)
csv_file_topk = os.path.join(os.path.dirname(csv_file), 'rel_detections_gt_boxes_prdcls_topk.csv')
generate_topk_csv_from_det_obj(all_results, csv_file_topk, obj_categories, prd_categories, 250)
logger.info('Saved topk CSV to: ' + csv_file_topk)
csv_file_boxes = os.path.join(os.path.dirname(csv_file), 'rel_detections_gt_boxes_prdcls_boxes.csv')
generate_boxes_csv_from_det_obj(all_results, csv_file_boxes, obj_categories, prd_categories, obj_freq_dict, prd_freq_dict)
logger.info('Saved boxes CSV to: ' + csv_file_boxes)
if cfg.DATASET.find('gvqa') >= 0:
from evaluation.add_word_similarity_to_csv import add_similarity_to_detections
logger.info('Adding word similarity to CSV')
add_similarity_to_detections(csv_file)
csv_file_w = os.path.join(os.path.dirname(csv_file), 'rel_detections_gt_boxes_prdcls_wrd_sim.csv')
get_wordsim_metrics_from_csv(csv_file_w)
|
the-stack_0_3481 | import pandas as pd
import datetime
import copy
import requests
from data.dataloader.base import BaseLoader
class Covid19IndiaLoader(BaseLoader):
"""Dataloader that gets casecount data from 'https://api.covid19india.org'
We use the JSON api and not the CSV api
Different API are accessed and then converted into pd.DataFrames
Full list of dataframes are given in the docstrings of pull_dataframes
Args:
BaseLoader (abstract class): Abstract Data Loader Class
"""
def __init__(self):
super().__init__()
def _load_data_json(self):
"""Returns dataframes from data.json
df_tested : dataframe of testing data
df_statewise : dataframe of statewise data (today's snapshot)
df_india_time_series : dataframe of india cases (time series)
Returns:
[pd.DataFrame]: list of dataframes
"""
# Parse data.json file
data = requests.get('https://api.covid19india.org/data.json').json()
# Create dataframe for testing data
df_tested = pd.DataFrame.from_dict(data['tested'])
# Create dataframe for statewise data
df_statewise = pd.DataFrame.from_dict(data['statewise'])
# Create dataframe for time series data
df_india_time_series = pd.DataFrame.from_dict(data['cases_time_series'])
df_india_time_series['date'] = pd.to_datetime([datetime.datetime.strptime(
x.split(' ')[0] + ' ' + x.split(' ')[1][:3] + ' 2020', '%d %b %Y') for x in
df_india_time_series['date']])
return df_tested, df_statewise, df_india_time_series
def _load_state_district_wise_json(self):
"""Loads dataframes from the state_district_wise.json file
df_districtwise : Today's snapshot of district-wise cases
statecode_to_state_dict : Mapping statecode to state name
Returns:
pd.DataFrame, dict: df_districtwise, statecode_to_state_dict
"""
# Load state_district_wise.json file
data = requests.get('https://api.covid19india.org/state_district_wise.json').json()
# Create statecode_to_state_dict
df_statecode = pd.DataFrame.from_dict(data)
df_statecode = df_statecode.drop(['districtData']).T
statecode_to_state_dict = dict(
zip(df_statecode['statecode'], df_statecode.index))
# Create df_districtwise
states = data.keys()
for state in states:
for district, district_dict in data[state]['districtData'].items():
delta_dict = dict([('delta_'+k, v)
for k, v in district_dict['delta'].items()])
data[state]['districtData'][district].update(delta_dict)
del data[state]['districtData'][district]['delta']
columns = ['state', 'district', 'active', 'confirmed', 'deceased',
'recovered', 'migratedother', 'delta_confirmed', 'delta_deceased',
'delta_recovered']
df_districtwise = pd.DataFrame(columns=columns)
for state in states:
df = pd.DataFrame.from_dict(
data[state]['districtData']).T.reset_index()
del df['notes']
df.rename({'index': 'district'}, axis=1, inplace=True)
df['state'] = state
df = df[columns]
df_districtwise = pd.concat([df_districtwise, df], ignore_index=True)
return df_districtwise, statecode_to_state_dict
def _load_raw_data_json(self, NUM_RAW_DFS=30):
"""Loads raw_data from raw_data{i}.json
df_raw : patient level information
Args:
NUM_RAW_DFS (int, optional): Number of raw data json files to consider. Defaults to 30.
"""
# Parse raw_data.json file
raw_data_dataframes = []
for i in range(1, NUM_RAW_DFS+1):
try:
data = requests.get(f'https://api.covid19india.org/raw_data{i}.json').json()
raw_data_dataframes.append(pd.DataFrame.from_dict(data['raw_data']))
except Exception:
break
df_raw = pd.concat(raw_data_dataframes, ignore_index=True)
return df_raw
def _load_districts_daily_json(self):
"""Loads history of cases district wise from districts_daily.json
Returns:
pd.DataFrame: df_districts
"""
data = requests.get('https://api.covid19india.org/districts_daily.json').json()
df_districts = pd.DataFrame(columns=['notes', 'active', 'confirmed', 'deceased',
'recovered', 'date', 'state', 'district'])
for state in data['districtsDaily'].keys():
for dist in data['districtsDaily'][state].keys():
df = pd.DataFrame.from_dict(data['districtsDaily'][state][dist])
df['state'] = state
df['district'] = dist
df_districts = pd.concat([df_districts, df], ignore_index=True)
df_districts = df_districts[['state', 'district', 'date',
'active', 'confirmed', 'deceased', 'recovered', 'notes']]
numeric_cols = ['active', 'confirmed', 'deceased', 'recovered']
df_districts[numeric_cols] = df_districts[numeric_cols].apply(
pd.to_numeric)
return df_districts
def _load_data_all_json_district(self, statecode_to_state_dict):
"""Loads history of cases district wise from data-all.json
Args:
statecode_to_state_dict (dict): dict mapping state code to state name
Returns:
pd.DataFrame: df_districts_all
"""
data = requests.get('https://api.covid19india.org/v4/data-all.json').json()
for date in data.keys():
date_dict = data[date]
# Remove all the states which don't have district data in them
date_dict = {state : state_dict for state, state_dict in date_dict.items() \
if 'districts' in state_dict.keys()}
data[date] = date_dict
# Remove all the dates which have 0 states with district data after pruning
data = {date : date_dict for date, date_dict in data.items() if len(date_dict) > 0}
# Make the districts key data the only data available for the state key
for date in data.keys():
for state in data[date].keys():
# Make the districts key dict the main dict itself for a particular date, state
data[date][state] = data[date][state]['districts']
state_dict = data[date][state]
# Keep only those district dicts for which cumulative data (total key) is available
state_dict = {dist : dist_dict for dist, dist_dict in state_dict.items() \
if 'total' in dist_dict.keys()}
data[date][state] = state_dict
# Make the total key dict the main dict itself for a particular date, state, dist
for district in data[date][state].keys():
data[date][state][district] = data[date][state][district]['total']
# For a particular date, state, dist, only keep those keys for which have confirmed, recovered, deceased are all available
state_dict = {dist: dist_dict for dist, dist_dict in state_dict.items() \
if {'confirmed', 'recovered', 'deceased'} <= dist_dict.keys()}
data[date][state] = state_dict
# Remove all the states for a particular date which don't have district that satisfied above criteria
date_dict = data[date]
date_dict = {state : state_dict for state, state_dict in date_dict.items() if len(state_dict) > 0}
data[date] = date_dict
# Remove all the dates which have 0 states with district data after pruning
data = {date : date_dict for date, date_dict in data.items() if len(date_dict) > 0}
df_districts_all = pd.DataFrame(columns=['date', 'state', 'district', 'confirmed', 'active',
'recovered', 'deceased', 'tested', 'migrated'])
for date in data.keys():
for state in data[date].keys():
df_date_state = pd.DataFrame.from_dict(data[date][state]).T.reset_index()
df_date_state = df_date_state.rename({'index' : 'district'}, axis='columns')
df_date_state['active'] = df_date_state['confirmed'] - \
(df_date_state['recovered'] + df_date_state['deceased'])
df_date_state['state'] = statecode_to_state_dict[state]
df_date_state['date'] = date
df_districts_all = pd.concat([df_districts_all, df_date_state], ignore_index=True, sort=False)
numeric_cols = ['confirmed', 'active', 'recovered', 'deceased', 'tested', 'migrated']
df_districts_all.loc[:, numeric_cols] = df_districts_all.loc[:, numeric_cols].apply(pd.to_numeric)
return df_districts_all
def _load_data_all_json_state(self, statecode_to_state_dict):
"""Loads history of cases state wise from data-all.json
Args:
statecode_to_state_dict (dict): dict mapping state code to state name
Returns:
pd.DataFrame: df_state_all
"""
data = requests.get('https://api.covid19india.org/v4/data-all.json').json()
for date in data.keys():
date_dict = data[date]
# Remove all the states which don't have district data in them
date_dict = {state : state_dict for state, state_dict in date_dict.items() if 'districts' in state_dict.keys()}
data[date] = date_dict
# Remove all the dates which have 0 states with district data after pruning
data = {date : date_dict for date, date_dict in data.items() if len(date_dict) > 0}
# Make the districts key data the only data available for the state key
for date in data.keys():
for state in data[date].keys():
# Make the districts key dict the main dict itself for a particular date, state
data[date][state] = data[date][state]['total']
date_dict = {state: state_dict for state, state_dict in data[date].items() \
if {'confirmed', 'recovered', 'deceased'} <= state_dict.keys()}
data[date] = date_dict
# Remove all the dates which have 0 states with district data after pruning
data = {date: date_dict for date, date_dict in data.items() if len(date_dict) > 0}
df_states_all = pd.DataFrame(columns=['date', 'state', 'confirmed', 'active', 'recovered', 'deceased', 'tested', 'migrated'])
for date in data.keys():
df_date = pd.DataFrame.from_dict(data[date]).T.reset_index()
df_date = df_date.rename({'index': 'state'}, axis='columns')
df_date['active'] = df_date['confirmed'] - (df_date['recovered'] + df_date['deceased'])
df_date['state'] = pd.Series([statecode_to_state_dict[state_code] for state_code in df_date['state']])
df_date['date'] = date
df_states_all = pd.concat([df_states_all, df_date], ignore_index=True)
numeric_cols = ['confirmed', 'active', 'recovered', 'deceased', 'tested', 'migrated']
df_states_all.loc[:, numeric_cols] = df_states_all.loc[:, numeric_cols].apply(pd.to_numeric)
return df_states_all
def _load_districts_csv(self):
df = pd.read_csv('https://api.covid19india.org/csv/latest/districts.csv')
df.columns = [x.lower() for x in df.columns]
df['active'] = df['confirmed'] - (df['recovered'] + df['deceased'])
numeric_cols = ['confirmed', 'active',
'recovered', 'deceased', 'tested', 'other']
df.loc[:, numeric_cols] = df.loc[:, numeric_cols].apply(pd.to_numeric)
df = df.fillna(0)
df['date'] = pd.to_datetime(df['date'], format="%Y-%m-%d")
return df
def pull_dataframes(self, load_raw_data=False, load_districts_daily=False, **kwargs):
"""
This function parses multiple JSONs from covid19india.org
It then converts the data into pandas dataframes
It returns the following dataframes as a dict :
- df_tested : Time series of people tested in India
- df_statewise : Today's snapshot of cases in India, statewise
- df_india_time_series : Time series of cases in India (nationwide)
- df_districtwise : Today's snapshot of cases in India, districtwise
- df_raw_data : Patient level information of cases
- df_districts_daily : History of cases district wise obtained from districts_daily.json
- df_districts_all : History of cases district wise obtained from data_all.json
- df_states_all : History of cases state wise obtained from data_all.json
"""
# List of dataframes to return
dataframes = {}
df_tested, df_statewise, df_india_time_series = self._load_data_json()
dataframes['df_tested'] = df_tested
dataframes['df_statewise'] = df_statewise
dataframes['df_india_time_series'] = df_india_time_series
df_districtwise, statecode_to_state_dict = self._load_state_district_wise_json()
dataframes['df_districtwise'] = df_districtwise
if load_raw_data:
df_raw = self._load_raw_data_json()
dataframes['df_raw_data'] = df_raw
if load_districts_daily:
df_districts = self._load_districts_daily_json()
dataframes['df_districts_daily'] = df_districts
df_districts_all = self._load_data_all_json_district(statecode_to_state_dict)
dataframes['df_districts_all'] = df_districts_all
df_states_all = self._load_data_all_json_state(statecode_to_state_dict)
dataframes['df_states_all'] = df_states_all
df_districts = self._load_districts_csv()
dataframes['df_districts'] = df_districts
return dataframes
def pull_dataframes_cached(self, reload_data=False, label=None, **kwargs):
return super().pull_dataframes_cached(reload_data=reload_data, label=label, **kwargs)
def get_data(self, state='Maharashtra', district='Mumbai', use_dataframe='data_all',
reload_data=False, **kwargs):
"""Main function serving as handshake between data and fitting modules
Args:
state (str, optional): State to fit on. Defaults to 'Maharashtra'.
district (str, optional): District to fit on. If given, get_data_district is called.
Else, get_data_state is called. Defaults to 'Mumbai'.
use_dataframe (str, optional): Which dataframe to use for districts.
Can be data_all/districts_daily. Defaults to 'data_all'.
reload_data (bool, optional): arg for pull_dataframes_cached. If true, data is
pulled afresh, rather than using the cache. Defaults to False.
Returns:
dict { str : pd.DataFrame } : Processed dataframe
"""
if not district is None:
return {"data_frame": self.get_data_district(state, district, use_dataframe,
reload_data, **kwargs)}
else:
return {"data_frame": self.get_data_state(state, reload_data, **kwargs)}
def get_data_state(self, state='Delhi', reload_data=False, **kwargs):
"""Helper function for get_data. Returns state data
Args:
state (str, optional): State to fit on. Defaults to 'Delhi'.
reload_data (bool, optional): arg for pull_dataframes_cached. If true, data is
pulled afresh, rather than using the cache. Defaults to False.
Returns:
dict { str : pd.DataFrame } : Processed dataframe
"""
dataframes = self.pull_dataframes_cached(reload_data=reload_data, **kwargs)
df_states = copy.copy(dataframes['df_states_all'])
df_state = df_states[df_states['state'] == state]
df_state['date'] = pd.to_datetime(df_state['date'])
df_state = df_state.rename({'confirmed': 'total'}, axis='columns')
df_state.reset_index(inplace=True, drop=True)
return df_state
def get_data_district(self, state='Karnataka', district='Bengaluru',
use_dataframe='data_all', reload_data=False, **kwargs):
"""Helper function for get_data. Returns district data
Args:
state (str, optional): State to fit on. Defaults to 'Karnataka'.
district (str, optional): District to fit on. Defaults to 'Bengaluru'.
use_dataframe (str, optional) : Which dataframe to use. Can be `data_all`/`districts_daily`.
reload_data (bool, optional): arg for pull_dataframes_cached. If true, data is
pulled afresh, rather than using the cache. Defaults to False.
Returns:
dict { str : pd.DataFrame } : Processed dataframe
"""
dataframes = self.pull_dataframes_cached(reload_data=reload_data, **kwargs)
if use_dataframe == 'data_all':
df_districts = copy.copy(dataframes['df_districts_all'])
df_district = df_districts.loc[(df_districts['state'] == state) & (
df_districts['district'] == district)]
df_district.loc[:, 'date'] = pd.to_datetime(df_district.loc[:, 'date'])
df_district = df_district.rename({'confirmed': 'total'}, axis='columns')
del df_district['migrated']
df_district.reset_index(inplace=True, drop=True)
return df_district
if use_dataframe == 'districts_daily':
df_districts = copy.copy(dataframes['df_districts_daily'])
df_district = df_districts.loc[(df_districts['state'] == state) & (
df_districts['district'] == district)]
del df_district['notes']
df_district.loc[:, 'date'] = pd.to_datetime(df_district.loc[:, 'date'])
df_district = df_district.loc[df_district['date'] >= '2020-04-24', :]
df_district = df_district.rename({'confirmed': 'total'}, axis='columns')
df_district.reset_index(inplace=True, drop=True)
return df_district
|
the-stack_0_3482 | import sys
import logging
from collections import namedtuple
logging.basicConfig(stream=sys.stderr, level=logging.WARNING)
class Parser(object):
"""Defines the common interface for parser objects.
Parser transofrm natural text into graphbrain hyperedges.
"""
def __init__(self, lemmas=False):
self.lemmas = lemmas
self.atom2token = {}
self.cur_text = None
# to be created by derived classes
self.lang = None
self.nlp = None
# named tuple used to pass parser state internally
self._ParseState = namedtuple('_ParseState',
['extra_edges', 'tokens', 'child_tokens',
'positions', 'children', 'entities'])
def _post_process(edge):
raise NotImplementedError()
def _parse_token(token):
raise NotImplementedError()
def _parse_sentence(self, sent):
self.atom2token = {}
main_edge, extra_edges = self._parse_token(sent.root)
main_edge, _ = self._post_process(main_edge)
return {'main_edge': main_edge,
'extra_edges': extra_edges,
'text': str(sent).strip(),
'spacy_sentence': sent}
def parse(self, text):
"""Transforms the given text into hyperedges + aditional information.
Returns a sequence of dictionaries, with one dictionary for each
sentence found in the text.
Each dictionary contains the following fields:
-> main_edge: the hyperedge corresponding to the sentence.
-> extra_edges: aditional edges, e.g. connecting atoms that appear
in the main_edge to their lemmas.
-> text: the string of natural language text corresponding to the
main_edge, i.e.: the sentence itself.
-> spacy_sentence: the spaCy structure representing the sentence
enriched with NLP annotations.
"""
self.cur_text = text
doc = self.nlp(text.strip())
return tuple(self._parse_sentence(sent) for sent in doc.sents)
|
the-stack_0_3483 | # -*- coding: utf-8 -*-
from hypothesis import assume, given
import hypothesis.strategies as st
import pytest
from matchpy.expressions.expressions import Arity, Operation, Symbol, Wildcard, Pattern
from matchpy.functions import ReplacementRule, replace, replace_all, substitute, replace_many, is_match
from matchpy.matching.one_to_one import match_anywhere
from matchpy.matching.one_to_one import match as match_one_to_one
from matchpy.matching.many_to_one import ManyToOneReplacer
from .common import *
@pytest.mark.parametrize(
' expr, pattern, do_match',
[
(a, a, True),
(a, b, False),
(f(a), f(x_), True),
]
) # yapf: disable
def test_is_match(expr, pattern, do_match):
assert is_match(expr, Pattern(pattern)) == do_match
class TestSubstitute:
@pytest.mark.parametrize(
' expression, substitution, expected_result, replaced',
[
(a, {}, a, False),
(a, {'x': b}, a, False),
(x_, {'x': b}, b, True),
(x_, {'x': [a, b]}, [a, b], True),
(y_, {'x': b}, y_, False),
(f(x_), {'x': b}, f(b), True),
(f(x_), {'y': b}, f(x_), False),
(f(x_), {}, f(x_), False),
(f(a, x_), {'x': b}, f(a, b), True),
(f(x_), {'x': [a, b]}, f(a, b), True),
(f(x_), {'x': []}, f(), True),
(f(x_, c), {'x': [a, b]}, f(a, b, c), True),
(f(x_, y_), {'x': a, 'y': b}, f(a, b), True),
(f(x_, y_), {'x': [a, c], 'y': b}, f(a, c, b), True),
(f(x_, y_), {'x': a, 'y': [b, c]}, f(a, b, c), True),
(Pattern(f(x_)), {'x': a}, f(a), True)
]
) # yapf: disable
def test_substitute(self, expression, substitution, expected_result, replaced):
result = substitute(expression, substitution)
assert result == expected_result, "Substitution did not yield expected result"
if replaced:
assert result is not expression, "When substituting, the original expression may not be modified"
else:
assert result is expression, "When nothing is substituted, the original expression has to be returned"
def many_replace_wrapper(expression, position, replacement):
return replace_many(expression, [(position, replacement)])
class TestReplaceTest:
@pytest.mark.parametrize('replace', [replace, many_replace_wrapper])
@pytest.mark.parametrize(
' expression, position, replacement, expected_result',
[
(a, (), b, b),
(f(a), (), b, b),
(a, (), f(b), f(b)),
(f(a), (), f(b), f(b)),
(f(a), (0, ), b, f(b)),
(f(a, b), (0, ), c, f(c, b)),
(f(a, b), (1, ), c, f(a, c)),
(f(a), (0, ), [b, c], f(b, c)),
(f(a, b), (0, ), [b, c], f(b, c, b)),
(f(a, b), (1, ), [b, c], f(a, b, c)),
(f(f(a)), (0, ), b, f(b)),
(f(f(a)), (0, 0), b, f(f(b))),
(f(f(a, b)), (0, 0), c, f(f(c, b))),
(f(f(a, b)), (0, 1), c, f(f(a, c))),
(f(f(a, b), f(a, b)), (0, 0), c, f(f(c, b), f(a, b))),
(f(f(a, b), f(a, b)), (0, 1), c, f(f(a, c), f(a, b))),
(f(f(a, b), f(a, b)), (1, 0), c, f(f(a, b), f(c, b))),
(f(f(a, b), f(a, b)), (1, 1), c, f(f(a, b), f(a, c))),
(f(f(a, b), f(a, b)), (0, ), c, f(c, f(a, b))),
(f(f(a, b), f(a, b)), (1, ), c, f(f(a, b), c)),
]
) # yapf: disable
def test_substitution_match(self, replace, expression, position, replacement, expected_result):
result = replace(expression, position, replacement)
assert result == expected_result, "Replacement did not yield expected result ({!r} {!r} -> {!r})".format(
expression, position, replacement
)
assert result is not expression, "Replacement modified the original expression"
@pytest.mark.parametrize('replace', [replace, many_replace_wrapper])
def test_too_big_position_error(self, replace):
with pytest.raises(IndexError):
replace(a, (0, ), b)
with pytest.raises(IndexError):
replace(f(a), (0, 0), b)
with pytest.raises(IndexError):
replace(f(a), (1, ), b)
with pytest.raises(IndexError):
replace(f(a, b), (2, ), b)
class TestReplaceManyTest:
@pytest.mark.parametrize(
' expression, replacements, expected_result',
[
(f(a, b), [((0, ), b), ((1, ), a)], f(b, a)),
(f(a, b), [((0, ), [c, c]), ((1, ), a)], f(c, c, a)),
(f(a, b), [((0, ), b), ((1, ), [c, c])], f(b, c, c)),
(f(f2(a, b), c), [((0, 0), b), ((0, 1), a)], f(f2(b, a), c)),
(f_c(c, f2(a, b)), [((1, 0), b), ((1, 1), a)], f_c(c, f2(b, a))),
(f(f2(a, b), f2(c)), [((1, 0), b), ((0, 1), a)], f(f2(a, a), f2(b))),
(f(f2(a, b), f2(c)), [((0, 1), a), ((1, 0), b)], f(f2(a, a), f2(b))),
(f_c(f2(c), f2(a, b)), [((0, 0), b), ((1, 1), a)], f_c(f2(b), f2(a, a))),
(f_c(f2(c), f2(a, b)), [((1, 1), a), ((0, 0), b)], f_c(f2(b), f2(a, a))),
]
) # yapf: disable
def test_substitution_match(self, expression, replacements, expected_result):
result = replace_many(expression, replacements)
assert result == expected_result, "Replacement did not yield expected result ({!r} -> {!r})".format(
expression, replacements
)
assert result is not expression, "Replacement modified the original expression"
def test_inconsistent_position_error(self):
with pytest.raises(IndexError):
replace_many(f(a), [((), b), ((0, ), b)])
with pytest.raises(IndexError):
replace_many(a, [((), b), ((0, ), b)])
with pytest.raises(IndexError):
replace_many(a, [((0, ), b), ((1, ), b)])
def test_empty_replace(self):
expression = f(a, b)
result = replace_many(expression, [])
assert expression is result, "Empty replacements should not change the expression."
@pytest.mark.parametrize(
' expression, pattern, expected_results',
[ # Substitution Position
(f(a), f(x_), [({'x': a}, ())]),
(f(a), x_, [({'x': f(a)}, ()),
({'x': a}, (0, ))]),
(f(a, f2(b), f2(f2(c), f2(a), f2(f2(b))), f2(c), c), f2(x_), [({'x': b}, (1, )),
({'x': c}, (2, 0)),
({'x': a}, (2, 1)),
({'x': f2(b)}, (2, 2)),
({'x': b}, (2, 2, 0)),
({'x': c}, (3, ))])
]
) # yapf: disable
def test_match_anywhere(expression, pattern, expected_results):
expression = expression
pattern = Pattern(pattern)
results = list(match_anywhere(expression, pattern))
assert len(results) == len(expected_results), "Invalid number of results"
for result in expected_results:
assert result in results, "Results differ from expected"
def test_match_anywhere_error():
with pytest.raises(ValueError):
next(match_anywhere(f(x_), f(x_)))
def test_match_error():
with pytest.raises(ValueError):
next(match_one_to_one(f(x_), f(x_)))
def _many_to_one_replace(expression, rules):
return ManyToOneReplacer(*rules).replace(expression)
@pytest.mark.parametrize(
'replacer', [replace_all, _many_to_one_replace]
)
def test_logic_simplify(replacer):
LAnd = Operation.new('and', Arity.variadic, 'LAnd', associative=True, one_identity=True, commutative=True)
LOr = Operation.new('or', Arity.variadic, 'LOr', associative=True, one_identity=True, commutative=True)
LXor = Operation.new('xor', Arity.variadic, 'LXor', associative=True, one_identity=True, commutative=True)
LNot = Operation.new('not', Arity.unary, 'LNot')
LImplies = Operation.new('implies', Arity.binary, 'LImplies')
Iff = Operation.new('iff', Arity.binary, 'Iff')
___ = Wildcard.star()
a1 = Symbol('a1')
a2 = Symbol('a2')
a3 = Symbol('a3')
a4 = Symbol('a4')
a5 = Symbol('a5')
a6 = Symbol('a6')
a7 = Symbol('a7')
a8 = Symbol('a8')
a9 = Symbol('a9')
a10 = Symbol('a10')
a11 = Symbol('a11')
LBot = Symbol(u'⊥')
LTop = Symbol(u'⊤')
expression = LImplies(
LAnd(
Iff(
Iff(LOr(a1, a2), LOr(LNot(a3), Iff(LXor(a4, a5), LNot(LNot(LNot(a6)))))),
LNot(
LAnd(
LAnd(a7, a8),
LNot(
LXor(
LXor(LOr(a9, LAnd(a10, a11)), a2),
LAnd(LAnd(a11, LXor(a2, Iff(a5, a5))), LXor(LXor(a7, a7), Iff(a9, a4)))
)
)
)
)
),
LImplies(
Iff(
Iff(LOr(a1, a2), LOr(LNot(a3), Iff(LXor(a4, a5), LNot(LNot(LNot(a6)))))),
LNot(
LAnd(
LAnd(a7, a8),
LNot(
LXor(
LXor(LOr(a9, LAnd(a10, a11)), a2),
LAnd(LAnd(a11, LXor(a2, Iff(a5, a5))), LXor(LXor(a7, a7), Iff(a9, a4)))
)
)
)
)
),
LNot(
LAnd(
LImplies(
LAnd(a1, a2),
LNot(
LXor(
LOr(
LOr(
LXor(LImplies(LAnd(a3, a4), LImplies(a5, a6)), LOr(a7, a8)),
LXor(Iff(a9, a10), a11)
), LXor(LXor(a2, a2), a7)
), Iff(LOr(a4, a9), LXor(LNot(a6), a6))
)
)
), LNot(Iff(LNot(a11), LNot(a9)))
)
)
)
),
LNot(
LAnd(
LImplies(
LAnd(a1, a2),
LNot(
LXor(
LOr(
LOr(
LXor(LImplies(LAnd(a3, a4), LImplies(a5, a6)), LOr(a7, a8)),
LXor(Iff(a9, a10), a11)
), LXor(LXor(a2, a2), a7)
), Iff(LOr(a4, a9), LXor(LNot(a6), a6))
)
)
), LNot(Iff(LNot(a11), LNot(a9)))
)
)
)
rules = [
# xor(x,⊥) → x
ReplacementRule(
Pattern(LXor(x__, LBot)),
lambda x: LXor(*x)
),
# xor(x, x) → ⊥
ReplacementRule(
Pattern(LXor(x_, x_, ___)),
lambda x: LBot
),
# and(x,⊤) → x
ReplacementRule(
Pattern(LAnd(x__, LTop)),
lambda x: LAnd(*x)
),
# and(x,⊥) → ⊥
ReplacementRule(
Pattern(LAnd(__, LBot)),
lambda: LBot
),
# and(x, x) → x
ReplacementRule(
Pattern(LAnd(x_, x_, y___)),
lambda x, y: LAnd(x, *y)
),
# and(x, xor(y, z)) → xor(and(x, y), and(x, z))
ReplacementRule(
Pattern(LAnd(x_, LXor(y_, z_))),
lambda x, y, z: LXor(LAnd(x, y), LAnd(x, z))
),
# implies(x, y) → not(xor(x, and(x, y)))
ReplacementRule(
Pattern(LImplies(x_, y_)),
lambda x, y: LNot(LXor(x, LAnd(x, y)))
),
# not(x) → xor(x,⊤)
ReplacementRule(
Pattern(LNot(x_)),
lambda x: LXor(x, LTop)
),
# or(x, y) → xor(and(x, y), xor(x, y))
ReplacementRule(
Pattern(LOr(x_, y_)),
lambda x, y: LXor(LAnd(x, y), LXor(x, y))
),
# iff(x, y) → not(xor(x, y))
ReplacementRule(
Pattern(Iff(x_, y_)),
lambda x, y: LNot(LXor(x, y))
),
] # yapf: disable
result = replacer(expression, rules)
assert result == LBot
|
the-stack_0_3484 | from ast import Mod
import cv2
import numpy as np
import os
from matplotlib import pyplot
def edit():
#Read the image
image = cv2.imread('Media/sample.jpg')
#greyscale filter
def greyscale(img):
greyscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return greyscale
# brightness adjustment
def bright(img, beta_value ):
img_bright = cv2.convertScaleAbs(img, beta=beta_value)
return img_bright
#sharp effect
def sharpen(img):
kernel = np.array([[-1, -1, -1], [-1, 9.5, -1], [-1, -1, -1]])
img_sharpen = cv2.filter2D(img, -1, kernel)
return img_sharpen
#sepia effect
def sepia(img):
img_sepia = np.array(img, dtype=np.float64) # converting to float to prevent loss
img_sepia = cv2.transform(img_sepia, np.matrix([[0.272, 0.534, 0.131],
[0.349, 0.686, 0.168],
[0.393, 0.769, 0.189]])) # multipying image with special sepia matrix
img_sepia[np.where(img_sepia > 255)] = 255 # normalizing values greater than 255 to 255
img_sepia = np.array(img_sepia, dtype=np.uint8)
return img_sepia
#grey pencil sketch effect
def pencil_sketch_grey(img):
#inbuilt function to create sketch effect in colour and greyscale
sk_gray, sk_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.1)
return sk_gray
#colour pencil sketch effect
def pencil_sketch_col(img):
#inbuilt function to create sketch effect in colour and greyscale
sk_gray, sk_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.1)
return sk_color
#HDR effect
def HDR(img):
hdr = cv2.detailEnhance(img, sigma_s=12, sigma_r=0.15)
return hdr
# invert filter
def invert(img):
inv = cv2.bitwise_not(img)
return inv
#defining a function
from scipy.interpolate import UnivariateSpline
def LookupTable(x, y):
spline = UnivariateSpline(x, y)
return spline(range(256))
#summer effect
def Summer(img):
increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256])
decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256])
blue_channel, green_channel, red_channel = cv2.split(img)
red_channel = cv2.LUT(red_channel, increaseLookupTable).astype(np.uint8)
blue_channel = cv2.LUT(blue_channel, decreaseLookupTable).astype(np.uint8)
sum= cv2.merge((blue_channel, green_channel, red_channel ))
return sum
#winter effect
def Winter(img):
increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256])
decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256])
blue_channel, green_channel, red_channel = cv2.split(img)
red_channel = cv2.LUT(red_channel, decreaseLookupTable).astype(np.uint8)
blue_channel = cv2.LUT(blue_channel, increaseLookupTable).astype(np.uint8)
win= cv2.merge((blue_channel, green_channel, red_channel))
return win
#making the greyscale image
a1 = greyscale(image)
filename = 'greyscale.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a1)
#making the more bright image
#positive beta value
a2 = bright(image, 60)
filename = 'more_bright.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a2)
#making the less bright image
#negative beta value
a3 = bright(image, -60)
filename = 'less_bright.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a3)
#making the sharp image
a4 = sharpen(image)
filename = 'sharpen.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a4)
#making the sepia image
a5 = sepia(image)
filename = 'sepia.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a5)
#making the grey pencil sketch
a6 = pencil_sketch_grey(image)
filename = 'pencil_grey.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a6)
#making the colour pencil sketch
a7 = pencil_sketch_col(image)
filename = 'pencil_col.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a7)
#making the hdr img
a8 = HDR(image)
filename = 'HDR.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a8)
#making the invert img
a9 = invert(image)
filename = 'invert.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a9)
#making the summer img
a11 = Summer(image)
filename = 'Summer.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a11)
#making the winter img
a10 = Winter(image)
filename = 'Winter.jpg'
# Using cv2.imwrite() method
# Saving the image
cv2.imwrite(f'Edited/{filename}', a10)
os.startfile('Edited') |
the-stack_0_3485 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMarketingCampaignMemberRelationUnbindModel(object):
def __init__(self):
self._member_template_id = None
self._out_member_no = None
self._request_id = None
self._user_id = None
@property
def member_template_id(self):
return self._member_template_id
@member_template_id.setter
def member_template_id(self, value):
self._member_template_id = value
@property
def out_member_no(self):
return self._out_member_no
@out_member_no.setter
def out_member_no(self, value):
self._out_member_no = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.member_template_id:
if hasattr(self.member_template_id, 'to_alipay_dict'):
params['member_template_id'] = self.member_template_id.to_alipay_dict()
else:
params['member_template_id'] = self.member_template_id
if self.out_member_no:
if hasattr(self.out_member_no, 'to_alipay_dict'):
params['out_member_no'] = self.out_member_no.to_alipay_dict()
else:
params['out_member_no'] = self.out_member_no
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMarketingCampaignMemberRelationUnbindModel()
if 'member_template_id' in d:
o.member_template_id = d['member_template_id']
if 'out_member_no' in d:
o.out_member_no = d['out_member_no']
if 'request_id' in d:
o.request_id = d['request_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
the-stack_0_3487 | from __future__ import print_function
import random
import string
import subprocess
import time
from configparser import SafeConfigParser
import MySQLdb
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.mail import EmailMessage
from django.http import JsonResponse
from django.views.generic import View
from billing.models import Transaction, ItemCount, RestrictedRequest
from digitalmarket.mixins import AjaxRequiredMixin
from notifications.models import NotificationItem
from flows.models import Flow
#NP_PATH = '/home/zxc/Desktop/frontend/src/np'
#CHECKOUT_LOG_PATH = '/home/zxc/Desktop/checkout_log'
class CheckoutAjaxView(LoginRequiredMixin, AjaxRequiredMixin, View):
"""
View for the checkout function
Add password, send email, send notification, send flow, deal with restricted permission
"""
# Added: sending message to the broker
def _sendAdminEmail(self, user, seller, topic, prod_num, message, request_file, restricted_active, request,
product_obj):
config = SafeConfigParser()
config.read('/code/config.ini')
db = MySQLdb.connect(host=config.get('main', 'mysql_host'), # your host, usually localhost
user=config.get('main', 'mysql_name'), # your username
passwd=config.get('main', 'mysql_pw'), # your password
db=config.get('main', 'mysql_db')) # your database
cur = db.cursor()
log = config.get('main', 'checkout_log_path')
NP_PATH = config.get('main', 'np_path')
username = user.username
user_email = user.email
topic = topic
# Password with 6 characters (lower case + number)
original_password = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(12))
if not cur.execute("select (1) from users where username = %s limit 1", (username,)):
command = NP_PATH + ' ' + '-p' + ' ' + original_password
command_bytes = command.encode('utf-8')
pw_bytes = subprocess.Popen(command_bytes, stdout=subprocess.PIPE, shell=True).communicate()[0]
password = pw_bytes.decode().rstrip('\n')
cur.execute("insert into users (username,pw,user_id) values (%s,%s,%s)",
(username, password, user.id)) # stdout: ignore '\n'
# Send password to email
subject = 'Your new password'
msg = "Your password to I3 is: " + original_password
email = EmailMessage(subject, msg, to=[user_email])
email.send()
# Record email as notification
notification_box = request.user.get_notification_box()
notification_item = NotificationItem(
notification_box=notification_box,
subject=subject,
body=msg)
notification_item.save()
# To do: make topic as a product obj that can be linked to
flow_obj = Flow.objects.create(
user=request.user,
topic=topic,
direction='in',
state='inactive')
flow_obj.save()
# send to the user which topic is able to pub/sub
# when the topic is unrestricted: insert to acls and send confirmation back to buyer
if not restricted_active:
subject = 'New product purchased'
msg = 'Now you can subscribe to topic: ' + topic + '.'
email = EmailMessage(subject, msg, to=[user_email])
email.send()
# Record email as notification
notification_box = request.user.get_notification_box()
notification_item = NotificationItem(
notification_box=notification_box,
subject=subject,
body=msg)
notification_item.save()
subject = 'New buyer of an unrestricted topic'
msg = 'Buyer ' + username + ' just bought product ' + topic + '.'
email = EmailMessage(subject, msg, to=[seller.email])
email.send()
# Record email as notification
notification_box = seller.get_notification_box()
notification_item = NotificationItem(
notification_box=notification_box,
subject=subject,
body=msg)
notification_item.save()
# insert into acls table
rw = 1 # seller: can read and write
if product_obj.sensor_type >= 2:
rw = 2
cur.execute("insert into acls (username,topic,rw,user_id, topic_id) values (%s,%s,%s,%s,%s)",
(username, topic, str(rw), user.id, product_obj.id))
# write new sub info to log
with open(log, 'a') as f:
f.write(str(time.time()) + ': New Sub ' + username + ' ' + topic + ' ' + str(prod_num) + '\n')
else:
restricted_request_obj = RestrictedRequest(
seller=product_obj.seller,
requester=request.user,
product=product_obj,
price=product_obj.price,
quantity=prod_num,
intention=message,
attachment=request_file
)
restricted_request_obj.save()
subject = 'New product purchased (to be confirmed)'
msg = 'Waiting seller to confirm purchase of ' + topic + '.'
email = EmailMessage(subject, msg, to=[user_email])
email.send()
# Record email as notification
notification_box = request.user.get_notification_box()
notification_item = NotificationItem(
notification_box=notification_box,
subject=subject,
body=msg)
notification_item.save()
subject = 'New buyer of a restricted topic'
msg = 'Buyer ' + username + ' just bought product ' + topic + '. You need to approve the purchase.'
email = EmailMessage(subject, msg, to=[seller.email])
email.send()
# Record email as notification
notification_box = seller.get_notification_box()
notification_item = NotificationItem(
notification_box=notification_box,
subject=subject,
body=msg)
notification_item.save()
db.commit()
def post(self, request, *args, **kwargs):
# TODO: add credit card processing
user = request.user
cart = user.get_cart()
if cart.num_items() == 0:
data = {
'success': False,
'errMsg': 'Your cart is empty'
}
return JsonResponse(data=data)
processed_items = []
for item in cart.all_items():
# TODO: how to handle restricted?
transaction = Transaction(
buyer=request.user,
seller=item.product.seller,
product=item.product,
price=item.product.get_price * item.quantity,
quantity=item.quantity,
)
transaction.save()
item_count = ItemCount(
buyer=request.user,
product=item.product,
order=item.quantity,
quantity=item.quantity,
)
item_count.save()
try:
self._sendAdminEmail(user, item.product.seller, item.product.title, item.quantity,
item.intention, item.attachment, item.product.restricted_active, request,
item.product)
processed_items.append(item)
except:
# TODO: log error, recover, try again?
pass
links = []
for item in processed_items:
download_link = item.product.get_download()
preview_link = download_link + "?preview=True"
link = {
"download": download_link,
"preview": preview_link,
}
links.append(link)
item.delete()
data = {
'success': True,
'links': links
}
return JsonResponse(data=data)
class RequestsAjaxView(LoginRequiredMixin, AjaxRequiredMixin, View):
"""
seller decides whether to approve or decline the buy product request
return Json object for frontend Ajax call
"""
def post(self, request, *args, **kwargs):
request_id = kwargs['pk']
restricted_request = RestrictedRequest.objects.get(pk=request_id)
if restricted_request.seller != request.user:
data = {
'success': False,
'errMsg': 'Request not found',
'errCode': '404'
}
else:
task = kwargs['task']
if task == 'approve':
restricted_request.success = 1
else:
restricted_request.success = 0
restricted_request.replied = True
restricted_request.save()
data = {
'success': True
}
return JsonResponse(data=data)
|
the-stack_0_3488 | import gym
from gym.spaces import Box, Discrete, Tuple
import logging
import random
import numpy as np
logger = logging.getLogger(__name__)
# Agent has to traverse the maze from the starting position S -> F
# Observation space [x_pos, y_pos, wind_direction]
# Action space: stay still OR move in current wind direction
MAP_DATA = """
###########################
# S #
# ### #
# ### #
# ### #
# F #
###########################"""
class MazeEnv(gym.Env):
def __init__(self, env_config={}):
self.map = [m for m in MAP_DATA.split("\n") if m]
self.x_dim = len(self.map)
self.y_dim = len(self.map[0])
logger.info("Loaded map {} {}".format(self.x_dim, self.y_dim))
for x in range(self.x_dim):
for y in range(self.y_dim):
if self.map[x][y] == "S":
self.start_pos = (x, y)
elif self.map[x][y] == "F":
self.end_pos = (x, y)
logger.info("Start pos {} end pos {}".format(self.start_pos,
self.end_pos))
# self.observation_space = Tuple([
# Box(0, 100, shape=(2, )), # (x, y)
# Discrete(4), # wind direction (N, E, S, W)
# ])
self.observation_space = Box(0, 100, shape=(2,))
self.action_space = Discrete(4) # whether to move or not
self.viewer = None
self.h = len(self.map)
self.w = len(self.map[0])
self.frame = 255 * np.ones((self.h, self.w, 3), dtype=np.uint8)
self.bg = 255 * np.ones((self.h, self.w, 3), dtype=np.uint8)
for ridx in range(self.h):
for cidx in range(self.w):
if self.map[ridx][cidx] == "#":
self.bg[ridx, cidx, :] = [255, 0, 0]
self.frame = self.bg.copy()
self.member = None
def reset(self):
# self.wind_direction = random.choice([0, 1, 2, 3])
self.pos = self.start_pos
self.num_steps = 0
return np.array(self.pos)
def step(self, action, verbose=False):
# if action == 1:
# self.pos = self._get_new_pos(self.pos, self.wind_direction)
# self.wind_direction = random.choice([0, 1, 2, 3])
self.pos = self._get_new_pos(self.pos, action)
self.num_steps += 1
at_goal = self.pos == self.end_pos
done = at_goal or self.num_steps >= 200
if verbose:
print(f"step: {self.num_steps}, pos: {self.pos}")
return (np.array(self.pos),
1 * int(at_goal), done, {})
def _get_new_pos(self, pos, direction):
if direction == 0:
new_pos = (pos[0] - 1, pos[1])
elif direction == 1:
new_pos = (pos[0], pos[1] + 1)
elif direction == 2:
new_pos = (pos[0] + 1, pos[1])
elif direction == 3:
new_pos = (pos[0], pos[1] - 1)
if (new_pos[0] >= 0 and new_pos[0] < self.x_dim and new_pos[1] >= 0 and new_pos[1] < self.y_dim
and self.map[new_pos[0]][new_pos[1]] != "#"):
return new_pos
else:
return pos # did not move
def set_member(self, member):
self.member = member
def member_color(self):
if self.member == 0:
return [51, 255, 69]
elif self.member == 1:
return [255, 190, 51]
else:
raise ValueError
def _get_image(self, alpha=0.995):
frame_t = self.bg.copy()
frame_t[self.pos] = self.member_color()
# frame[self.end_pos] = [0, 0, 255]
# self.frame = (alpha * self.frame + (1 - alpha) * frame_t).astype(np.uint8)
self.frame[self.pos] = self.member_color()
return np.concatenate([frame_t, self.frame], axis=1)
def render(self, mode='human'):
img = self._get_image()
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
|
the-stack_0_3490 | import logging
import os
import sys
import time
import click
from .investing import Investing
from .sendtext import SendText
__version__ = "0.0.4"
def setup_logging():
"""Create a basic console based logger object.
Args:
None
Returns:
logger (logging.logger): Logger object.
"""
log_handler = logging.StreamHandler()
log_handler.setFormatter(
logging.Formatter(
"%(asctime)s [%(levelname)5s] %(funcName)4s() - %(message)s",
"%Y-%m-%d %H:%M:%S",
)
)
logger = logging.getLogger(__name__)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
return logger
def run(
instrument: Investing,
send_message: SendText,
lower: float,
upper: float,
threshold: float,
) -> tuple:
instrument.fetch()
logger.debug("Fetched page successfully.")
price = instrument.price()
logger.debug(f"Price of {instrument.name} is ${price}.")
if price >= upper or price <= lower:
logger.info(f"Price {price} breached price band [{lower}, {upper}].")
logger.debug(f"Resetting price band with threshold value {threshold}.")
upper = price * (1 + threshold / 10000)
lower = price * (1 - threshold / 10000)
logger.info(f"Resetting price band to [{lower}, {upper}].")
logger.debug("Sending text.")
send_message.send(f"{instrument.name} price is {price}.")
return (lower, upper)
@click.command(
context_settings=dict(help_option_names=["-h", "--help"]),
options_metavar="[options...]",
)
@click.argument("to_num", metavar="[to number]")
@click.argument("from_num", metavar="[from number]")
@click.argument("market", metavar="[market]")
@click.argument("contract", metavar="[contract]")
@click.argument("priceband", metavar="[priceband]")
@click.option("--symbol", "-s", help="Contract symbol. [default: contract]")
@click.option(
"--threshold", "-t", help="Threshold in bps.", default=100.0, show_default=True
)
@click.option(
"--interval",
"-i",
help="Interval to perform check (mins).",
default=1.0,
show_default=True,
)
@click.option(
"--sub-market", "-m", help="E.g. crypto is market and bitcoin is sub market."
)
@click.option("--debug", "-d", is_flag=True, help="Print debug messages.")
def main(
to_num,
from_num,
interval,
threshold,
debug=None,
symbol=None,
market=None,
contract=None,
priceband=None,
sub_market=None,
):
"""Utiltiy script to notify if instrument price fluctuates out of price band.
"""
global logger
logger = setup_logging()
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("Logging set to debug.")
if ("TWILIO_AUTH_TOKEN" in os.environ) and ("TWILIO_ACCOUNT_SID" in os.environ):
pass
else:
logger.error("TWILIO_AUTH_TOKEN and/or TWILIO_ACCOUNT_SID not defined.")
sys.exit(1)
lower, upper = list(map(float, priceband.split("-")))
if sub_market:
end_point = market + "/" + sub_market + "/" + contract
else:
end_point = market + "/" + contract
logger.debug(f"{end_point} end point will be queried.")
instrument = Investing(end_point, symbol)
text_client = SendText(from_num, to_num)
while True:
try:
lower, upper = run(instrument, text_client, lower, upper, threshold)
time.sleep(60 * interval)
except KeyboardInterrupt:
logger.info("Caught interrupt, exiting...")
sys.exit()
if __name__ == "__main__":
main()
|
the-stack_0_3491 | #Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
#For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
#Bonus: Can you do this in one pass?
if __name__ == "__main__":
l = list( int(i) for i in input().split(' '))
k = int(input())
found = False
for el in range(len(l)):
check = k - l[el]
if check in l:
found = Tru
print(found)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.