ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4116ba99cf602c6c87d441c8e6c5b3438b5d8e7 | import os
from .log_handlers import apply_log_handler, get_formatter
from .runner import Runner
__all__ = ("Runner",)
if int(os.environ.get("USE_OCTUE_LOG_HANDLER", "0")) == 1:
# Use the default log handler from this package if `USE_OCTUE_LOG_HANDLER` is 1. The default value for this is 0
# because `octue` is a package that is primarily imported - the importer may not want to use this log handler if
# they have their own.
apply_log_handler(logger_name=None, formatter=get_formatter())
|
py | b4116bbf016330c14d8e154c800f7b0d2c7e28f2 | import taichi as ti
from tests import test_utils
@test_utils.test(require=ti.extension.bls)
def test_simple_1d():
x, y = ti.field(ti.f32), ti.field(ti.f32)
N = 64
bs = 16
ti.root.pointer(ti.i, N // bs).dense(ti.i, bs).place(x, y)
@ti.kernel
def populate():
for i in range(N):
x[i] = i
@ti.kernel
def copy():
ti.block_local(x)
for i in x:
y[i] = x[i]
populate()
copy()
for i in range(N):
assert y[i] == i
@test_utils.test(require=ti.extension.bls)
def test_simple_2d():
x, y = ti.field(ti.f32), ti.field(ti.f32)
N = 16
bs = 16
ti.root.pointer(ti.ij, N // bs).dense(ti.ij, bs).place(x, y)
@ti.kernel
def populate():
for i, j in ti.ndrange(N, N):
x[i, j] = i - j
@ti.kernel
def copy():
ti.block_local(x)
for i, j in x:
y[i, j] = x[i, j]
populate()
copy()
for i in range(N):
for j in range(N):
assert y[i, j] == i - j
def _test_bls_stencil(*args, **kwargs):
from .bls_test_template import bls_test_template
bls_test_template(*args, **kwargs)
@test_utils.test(require=ti.extension.bls)
def test_gather_1d_trivial():
# y[i] = x[i]
_test_bls_stencil(1, 128, bs=32, stencil=((0, ), ))
@test_utils.test(require=ti.extension.bls)
def test_gather_1d():
# y[i] = x[i - 1] + x[i]
_test_bls_stencil(1, 128, bs=32, stencil=((-1, ), (0, )))
@test_utils.test(require=ti.extension.bls)
def test_gather_2d():
stencil = [(0, 0), (0, -1), (0, 1), (1, 0)]
_test_bls_stencil(2, 128, bs=16, stencil=stencil)
@test_utils.test(require=ti.extension.bls)
def test_gather_2d_nonsquare():
stencil = [(0, 0), (0, -1), (0, 1), (1, 0)]
_test_bls_stencil(2, 128, bs=(4, 16), stencil=stencil)
@test_utils.test(require=ti.extension.bls)
def test_gather_3d():
stencil = [(-1, -1, -1), (2, 0, 1)]
_test_bls_stencil(3, 64, bs=(4, 8, 16), stencil=stencil)
@test_utils.test(require=ti.extension.bls)
def test_scatter_1d_trivial():
# y[i] = x[i]
_test_bls_stencil(1, 128, bs=32, stencil=((0, ), ), scatter=True)
@test_utils.test(require=ti.extension.bls)
def test_scatter_1d():
_test_bls_stencil(1, 128, bs=32, stencil=(
(1, ),
(0, ),
), scatter=True)
@test_utils.test(require=ti.extension.bls)
def test_scatter_2d():
stencil = [(0, 0), (0, -1), (0, 1), (1, 0)]
_test_bls_stencil(2, 128, bs=16, stencil=stencil, scatter=True)
@test_utils.test(require=ti.extension.bls)
def test_multiple_inputs():
x, y, z, w, w2 = ti.field(ti.i32), ti.field(ti.i32), ti.field(
ti.i32), ti.field(ti.i32), ti.field(ti.i32)
N = 128
bs = 8
ti.root.pointer(ti.ij, N // bs).dense(ti.ij, bs).place(x, y, z, w, w2)
@ti.kernel
def populate():
for i, j in ti.ndrange((bs, N - bs), (bs, N - bs)):
x[i, j] = i - j
y[i, j] = i + j * j
z[i, j] = i * i - j
@ti.kernel
def copy(bls: ti.template(), w: ti.template()):
if ti.static(bls):
ti.block_local(x, y, z)
for i, j in x:
w[i,
j] = x[i, j - 2] + y[i + 2, j -
1] + y[i - 1, j] + z[i - 1, j] + z[i + 1, j]
populate()
copy(False, w2)
copy(True, w)
for i in range(N):
for j in range(N):
assert w[i, j] == w2[i, j]
@test_utils.test(require=ti.extension.bls)
def test_bls_large_block():
n = 2**10
block_size = 32
stencil_length = 28 # uses 60 * 60 * 4B = 14.0625KiB shared memory
a = ti.field(dtype=ti.f32)
b = ti.field(dtype=ti.f32)
block = ti.root.pointer(ti.ij, n // block_size)
block.dense(ti.ij, block_size).place(a)
block.dense(ti.ij, block_size).place(b)
@ti.kernel
def foo():
ti.block_dim(512)
ti.block_local(a)
for i, j in a:
for k in range(stencil_length):
b[i, j] += a[i + k, j]
b[i, j] += a[i, j + k]
foo()
# TODO: BLS on CPU
# TODO: BLS boundary out of bound
# TODO: BLS with TLS
|
py | b4116c7011a94814895ddc4d907f2b29cb45ab6a | import csv
def create_events_table(df, cluster_c, data_c, non_p, out_dir):
measures = df[[cluster_c] + data_c].groupby(cluster_c).agg(['mean', 'std'])
measures = measures.join(df.groupby(cluster_c)[cluster_c].count(), on=cluster_c)
with open(out_dir + '/events_characteristics.csv', 'w', newline='') as f:
writer = csv.writer(f)
row = ['Cluster', 'Number of events'] + data_c + ['Name']
writer.writerow(row)
i = 1
for idx, val in measures.iterrows():
row = [i, int(val[cluster_c])]
for d in data_c:
if d not in non_p:
row.append('{mean}%\n({std}%)'.format(mean=(val[(d, 'mean')]*100).round(2), std=(val[(d, 'std')]*100).round(2)))
else:
row.append('{mean}\n({std})'.format(mean=(val[(d, 'mean')]).round(2), std=(val[(d, 'std')]).round(2)))
row += [idx]
writer.writerow(row)
i += 1 |
py | b4116d1884b652f99e3ea8e098ac69ee4edb6752 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 LED to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
|
py | b4116e36b31399312259d6adcafc7175b4ff9c7c | def myFunc(e):
return e["year"]
cars = [
{"car": "Ford", "year": 2005},
{"car": "Mitsubishi", "year": 2000},
{"car": "BMW", "year": 2019},
{"car": "VW", "year": 2011},
]
cars.sort(key=myFunc)
print(cars)
|
py | b4116ea14d4499e9e25286e88cec6899449f4299 | SAMPLE_PROFIT_CENTERS = [
{
'ci_id': 123,
'ci_uid': 'pc-1',
'name': 'Profit Center #1',
'description': 'Profit Center #1 description',
'business_line': 333,
},
{
'ci_id': 322,
'ci_uid': 'pc-2',
'name': 'Profit Center #2',
'description': 'Profit Center #2 description',
'business_line': 444,
},
]
|
py | b4116ff7fbbb34c4603ddf824a770798b256ce3e | #
# Example file for parsing and processing HTML
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
def main():
# instantiate the parser and feed it some HTML
parser = MyHTMLParser()
if __name__ == "__main__":
main();
|
py | b411708d325c317df0d4343741d0279cb0ed797e | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class ResizeCbsStorageRequest(Request):
def __init__(self):
super(ResizeCbsStorageRequest, self).__init__(
'cbs', 'qcloudcliV1', 'ResizeCbsStorage', 'cbs.api.qcloud.com')
def get_storageId(self):
return self.get_params().get('storageId')
def set_storageId(self, storageId):
self.add_param('storageId', storageId)
def get_storageSize(self):
return self.get_params().get('storageSize')
def set_storageSize(self, storageSize):
self.add_param('storageSize', storageSize)
|
py | b41171430339714854c401d0fc525181fbfe3f19 | import os
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.cleapfuncs import logfunc, tsv, timeline, is_platform_windows, get_next_unused_name, \
open_sqlite_db_readonly, get_browser_name, get_ldb_records, read_varint
def parse_ls_ldb_record(record):
""" This code was taken from the file chrome.py from Ryan Benson's Hindsight project
From https://cs.chromium.org/chromium/src/components/services/storage/dom_storage/local_storage_impl.cc:
// LevelDB database schema
// =======================
//
// Version 1 (in sorted order):
// key: "VERSION"
// value: "1"
//
// key: "META:" + <url::Origin 'origin'>
// value: <LocalStorageOriginMetaData serialized as a string>
//
// key: "_" + <url::Origin> 'origin'> + '\x00' + <script controlled key>
// value: <script controlled value>
"""
parsed = {
'seq': record['seq'],
'state': record['state'],
'origin_file': record['origin_file']
}
if record['key'].startswith('META:'.encode('utf-8')):
parsed['record_type'] = 'META'
parsed['origin'] = record['key'][5:].decode()
parsed['key'] = record['key'][5:].decode()
# From https://cs.chromium.org/chromium/src/components/services/storage/dom_storage/
# local_storage_database.proto:
# message LocalStorageOriginMetaData
# required int64 last_modified = 1;
# required uint64 size_bytes = 2;
# TODO: consider redoing this using protobufs
if record['value'].startswith(b'\x08'):
ptr = 1
last_modified, bytes_read = read_varint(record['value'][ptr:])
size_bytes, _ = read_varint(record['value'][ptr + bytes_read:])
parsed['value'] = f'Last modified: {last_modified}; size: {size_bytes}'
return parsed
elif record['key'] == b'VERSION':
return
elif record['key'].startswith(b'_'):
parsed['record_type'] = 'entry'
try:
parsed['origin'], parsed['key'] = record['key'][1:].split(b'\x00', 1)
parsed['origin'] = parsed['origin'].decode()
if parsed['key'].startswith(b'\x01'):
parsed['key'] = parsed['key'].lstrip(b'\x01').decode()
elif parsed['key'].startswith(b'\x00'):
parsed['key'] = parsed['key'].lstrip(b'\x00').decode('utf-16')
except Exception as e:
logfunc(str("Origin/key parsing error: {}".format(e)))
return
try:
if record['value'].startswith(b'\x01'):
parsed['value'] = record['value'].lstrip(b'\x01').decode('utf-8', errors='replace')
elif record['value'].startswith(b'\x00'):
parsed['value'] = record['value'].lstrip(b'\x00').decode('utf-16', errors='replace')
elif record['value'].startswith(b'\x08'):
parsed['value'] = record['value'].lstrip(b'\x08').decode()
elif record['value'] == b'':
parsed['value'] = ''
except Exception as e:
logfunc(str(f'Value parsing error: {e}'))
return
for item in parsed.values():
assert not isinstance(item, bytes)
return parsed
def get_local_storage(ls_path, wrap_text):
''' This code was taken from the file utils.py from Ryan Benson's Hindsight project '''
results = []
# logfunc ('Local Storage:')
# logfunc (f' - Reading from {ls_path}')
local_storage_listing = os.listdir(ls_path)
# logfunc (f' - {len(local_storage_listing)} files in Local Storage directory')
filtered_listing = []
# Chrome v61+ used leveldb for LocalStorage, but kept old SQLite .localstorage files if upgraded.
ls_ldb_path = ls_path
ls_ldb_records = get_ldb_records(ls_ldb_path)
# logfunc (f' - Reading {len(ls_ldb_records)} Local Storage raw LevelDB records; beginning parsing')
for record in ls_ldb_records:
ls_item = parse_ls_ldb_record(record)
if ls_item and ls_item.get('record_type') == 'entry':
# results.append(Chrome.LocalStorageItem(
if wrap_text:
results.append((ls_item['origin'], ls_item['key'], textwrap.fill(ls_item['value'], width=50),
ls_item['seq'], ls_item['state'], str(ls_item['origin_file'])))
else:
results.append((ls_item['origin'], ls_item['key'], ls_item['value'],
ls_item['seq'], ls_item['state'], str(ls_item['origin_file'])))
# self.artifacts_counts['Local Storage'] = len(results)
# logfunc (f' - Parsed {len(results)} items from {len(filtered_listing)} files')
# self.parsed_storage.extend(results)
return results
def get_LocalStorage(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if not os.path.basename(file_found) == 'CURRENT': # skip -journal and other files
continue
path_name = os.path.dirname(file_found)
browser_name = get_browser_name(file_found)
data_list = get_local_storage(path_name, wrap_text)
usageentries = len(data_list)
if usageentries > 0:
description = 'Local Storage key:value pairs report. See path for data provenance.'
report = ArtifactHtmlReport(f'{browser_name} Local Storage')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} Local Storage.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path), description)
report.add_script()
data_headers = ('Origin','Key','Value','seq','state', 'origin_file')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} Local Storage'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} Local Storage'
timeline(report_folder, tlactivity, data_list, data_headers)
logfunc(f'{browser_name} Local Storage parsed')
else:
pass
|
py | b411729d4bb36eb117a6726bbc4d299accbfea37 | from aiohttp import web
from aiohttp_admin2 import setup_admin
from aiohttp_admin2.views import Admin
from .utils import generate_new_admin_class
async def index(request):
return web.Response(text="Index")
async def test_that_middleware_work_only_for_admin_pages(aiohttp_client):
"""
In this test we check success apply of middleware for admin interface.
1. Correct access for not admin page
2. Wrong access for admin page
"""
@web.middleware
async def access(request, handler):
raise web.HTTPForbidden()
app = web.Application()
app.add_routes([web.get('/', index)])
admin = generate_new_admin_class()
setup_admin(app, middleware_list=[access, ], admin_class=admin)
cli = await aiohttp_client(app)
# 1. Correct access for not admin page
res = await cli.get('/')
assert res.status == 200
# 2. Wrong access for admin page
res = await cli.get(Admin.admin_url)
assert res.status == 403
async def test_that_admin_pages_are_available_if_pass_middleware(aiohttp_client):
"""
In this test we check success apply of middleware for admin interface.
1. Correct access for admin page
"""
@web.middleware
async def access(request, handler):
# to do nothing
return await handler(request)
app = web.Application()
app.add_routes([web.get('/', index)])
admin = generate_new_admin_class()
setup_admin(app, middleware_list=[access, ], admin_class=admin)
cli = await aiohttp_client(app)
res = await cli.get(Admin.admin_url)
assert res.status == 200
|
py | b41172c5853f5b2068c72be1b2c234777fd34d4f | import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def show(resource_group_name=None, name=None, key=None, url_connection=None, db_name):
params = get_params(locals())
command = "az cosmosdb database show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group_name=None, name=None, key=None, url_connection=None):
params = get_params(locals())
command = "az cosmosdb database list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def exists(resource_group_name=None, name=None, key=None, url_connection=None, db_name):
params = get_params(locals())
command = "az cosmosdb database exists " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def create(resource_group_name=None, name=None, key=None, url_connection=None, db_name, throughput=None):
params = get_params(locals())
command = "az cosmosdb database create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group_name=None, name=None, key=None, url_connection=None, db_name, yes=None):
params = get_params(locals())
command = "az cosmosdb database delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
|
py | b411731727613637e2531eb8ea3020179bb09ac5 | from flask_restful import Resource
from ...utils.utils import get_res_data
class UserCodesResource(Resource):
def get(self, user_id):
return get_res_data(), 200
def patch(self, user_id):
return get_res_data(), 200
|
py | b411738154b77abc48eab8ea2d2acee1cc745d75 | # -*- coding: utf-8 -*-
"""
PHP Tests
~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexers import PhpLexer
from pygments.token import Token
class PhpTest(unittest.TestCase):
def setUp(self):
self.lexer = PhpLexer()
def testStringEscapingRun(self):
fragment = '<?php $x="{\\""; ?>\n'
tokens = [
(Token.Comment.Preproc, '<?php'),
(Token.Text, ' '),
(Token.Name.Variable, '$x'),
(Token.Operator, '='),
(Token.Literal.String.Double, '"'),
(Token.Literal.String.Double, '{'),
(Token.Literal.String.Escape, '\\"'),
(Token.Literal.String.Double, '"'),
(Token.Punctuation, ';'),
(Token.Text, ' '),
(Token.Comment.Preproc, '?>'),
(Token.Other, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
|
py | b4117403f842a7aeaaf16de0a2e3d85f72e3940d | # coding: utf8
from __future__ import unicode_literals
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...attrs import LANG
from ...language import Language
from ...util import update_exc
class TatarDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'tt'
lex_attr_getters.update(LEX_ATTRS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
infixes = tuple(TOKENIZER_INFIXES)
stop_words = STOP_WORDS
class Tatar(Language):
lang = 'tt'
Defaults = TatarDefaults
__all__ = ['Tatar']
|
py | b411744f7bac2469d88587b0c810d0b4f8473d83 | import pytest
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path,
compression=compression))
# explicitly ensure file was compressed.
with tm.decompress_file(path, compression) as fh:
result = fh.read().decode('utf8')
assert_frame_equal(df, pd.read_json(result))
def test_read_zipped_json(datapath):
uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@td.skip_if_not_us_locale
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
moto = pytest.importorskip('moto')
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
compression=compression)
assert_frame_equal(df, roundtripped_df)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True,
compression=compression)
roundtripped_df = pd.read_json(path, lines=True,
compression=compression)
assert_frame_equal(df, roundtripped_df)
def test_chunksize_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True,
compression=compression)
res = pd.read_json(path, lines=True, chunksize=1,
compression=compression)
roundtripped_df = pd.concat(res)
assert_frame_equal(df, roundtripped_df)
def test_write_unsupported_compression_type():
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, df.to_json,
path, compression="unsupported")
def test_read_unsupported_compression_type():
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, pd.read_json,
path, compression="unsupported")
@pytest.mark.parametrize("to_infer", [True, False])
@pytest.mark.parametrize("read_infer", [True, False])
def test_to_json_compression(compression_only,
read_infer, to_infer):
# see gh-15008
compression = compression_only
if compression == "zip":
pytest.skip("{compression} is not supported "
"for to_csv".format(compression=compression))
# We'll complete file extension subsequently.
filename = "test."
if compression == "gzip":
filename += "gz"
else:
# xz --> .xz
# bz2 --> .bz2
filename += compression
df = pd.DataFrame({"A": [1]})
to_compression = "infer" if to_infer else compression
read_compression = "infer" if read_infer else compression
with tm.ensure_clean(filename) as path:
df.to_json(path, compression=to_compression)
result = pd.read_json(path, compression=read_compression)
tm.assert_frame_equal(result, df)
|
py | b41174822812656ee21edc6bd92ce3093f779b7c | # -- coding: utf-8 --
#リスト内包表記
print([i**2 for i in [1,2,3]])
"""
[出力]
[1, 4, 9]
"""
#リスト内包表記(条件付き:iが2のものだけ)
print([i**2 for i in [1,2,3] if i==2])
"""
[出力]
[4]
"""
#セット内包表記
print({i**2 for i in [1,2,3]})
"""
[出力]
set([1, 4, 9])
"""
#ディクショナリ内包表記
print({i:i**2 for i in [1,2,3]})
"""
[出力]
{1: 1, 2: 4, 3: 9}
"""
|
py | b411756bbfa08eef72f2d2d2aca015bf5ce6a860 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2020, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2437
class RSAPrivateKeyTestCase(unittest.TestCase):
pem_text = """\
MIIBPAIBAAJBAMfAjvBNDDYBCl1w3yNcagZkPhqd0q5KqeOTgKSLuJWfe5+VSeR5
Y1PcF3DyH8dvS3t8PIQjxJLoKS7HVRlsfhECAwEAAQJBAIr93/gxhIenXbD7MykF
yvi7k8MtgkWoymICZwcX+c6RudFyuPPfQJ/sf6RmFZlRA9X9CQm5NwVG7+x1Yi6t
KoECIQDmJUCWkPCiQYow6YxetpXFa0K6hTzOPmax7MNHVWNgmQIhAN4xOZ4JFT34
xVhK+8EudBCYRomJUHmOJfoQAxiIXVw5AiEAyB7ecc5on/5zhqKef4Eu7LKfHIdc
304diFuDVpTmTAkCIC2ZmKOQZaWkSowGR4isCfHl7oQHhFaOD8k0RA5i3hYxAiEA
n8lDw3JT6NjvMnD6aM8KBsLyhazWSVVkaUSqmJzgCF0=
"""
def setUp(self):
self.asn1Spec = rfc2437.RSAPrivateKey()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
py | b411763716d0bcb3ac9f4a88b57c8e43261c6bb3 | import pandas as pd
import penguins.ParamAnaylsis as pa
import numpy as np
import copy
def test_len():
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
assert len(examplePackage) == len(examplePackage.data)
def test_filter_topPer():
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
minError = examplePackage.minError
maxError = np.log(np.exp(minError) + np.abs(np.exp(minError)*.1))
filteredExamplePackage = examplePackage.filter(topPer = .1)
test = filteredExamplePackage.data[filteredExamplePackage.data['error']>maxError]
assert len(test) == 0
#TODO: Need To Fix this
def test_returnStats():
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
stats = examplePackage.returnStatistics()
assert stats.loc['minimum']
assert stats.loc['1st quantile']
assert stats.loc['median']
assert stats.loc['3rd quantile']
assert stats.loc['maximum']
assert stats.loc['IQR']
assert stats.loc['STD']
def test_returnBnds():
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
examplePackage = examplePackage.filter(topPer = .1)
bounds = examplePackage.paramBounds()
for key in bounds.keys():
assert bounds[key][0] < examplePackage.bestParams[key]
assert bounds[key][1] > examplePackage.bestParams[key]
def test_filter_bnds():
bnds = {
'A' : [1.25,1.4],
'B' : [1.75,1.85],
'C' : [0.25, 0.35]
}
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
filteredExamplePackage = examplePackage.filter(bnds = bnds)
for key in bnds:
minPar = min(filteredExamplePackage.data[key])
assert minPar > bnds[key][0]
maxPar = max(filteredExamplePackage.data[key])
assert maxPar < bnds[key][1]
def test_filter_bnds_ZeroLen():
bnds = {
'A' : [-100,-50],
'B' : [1.75,1.85],
'C' : [0.25, 0.35]
}
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
filteredExamplePackage = examplePackage.filter(bnds = bnds)
assert len(filteredExamplePackage) == 0
def test_changeLog():
print('\n')
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
assert len(examplePackage.changeLog) == 0
examplePackage = examplePackage.filter(topPer = .1)
assert len(examplePackage.changeLog) == 1
examplePackage = examplePackage.filter(topPer = .05)
assert len(examplePackage.changeLog) == 2
def test_print():
print('\n')
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
examplePackage = examplePackage.filter(topPer = .1)
print(examplePackage)
def test_filter_Fixed():
print('\n')
fixed = {
'A' : 1.37
}
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
filteredExamplePackage = examplePackage.filter(fixed=fixed)
minPar = min(filteredExamplePackage.data['A'])
assert minPar > fixed['A']-np.abs(fixed['A']*.05)
maxPar = max(filteredExamplePackage.data['A'])
assert maxPar < fixed['A'] + np.abs(fixed['A'] * .05)
def test_filter_Fixed_Threshold():
print('\n')
fixed = {
'A' : 1.37
}
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
filteredExamplePackage = examplePackage.filter(fixed=fixed, threshold = .01)
minPar = min(filteredExamplePackage.data['A'])
assert minPar > fixed['A']-np.abs(fixed['A']*.01)
maxPar = max(filteredExamplePackage.data['A'])
assert maxPar < fixed['A'] + np.abs(fixed['A'] * .01)
#TODO: Need to create a grouping option
def test_group_parRange():
assert False
def test_metaData():
print('\n')
examplePackage = pa.createPackage('Results\\fitLog\\ComplicatedFit.txt', 'ComplicatedFit')
# All I want to do here is make sure these are accessible.
assert isinstance(examplePackage.file, str)
assert isinstance(examplePackage.name, str)
assert isinstance(examplePackage.minError, float)
assert isinstance(examplePackage.bestParams, dict)
|
py | b411764be2bab978d426b4456e66f24be0135e32 | """
CSS syntax names.
Copyright (c)
See LICENSE for details.
<[email protected]>
"""
css2 = [
'azimuth',
'background-attachment',
'background-color',
'background-image',
'background-position',
'background-repeat',
'background',
'border-collapse',
'border-color',
'border-spacing',
'border-style',
'border-top',
'border-right',
'border-bottom',
'border-left',
'border-top-color',
'border-right-color',
'border-bottom-color',
'border-left-color',
'border-top-style',
'border-right-style',
'border-bottom-style',
'border-left-style',
'border-top-width',
'border-right-width',
'border-bottom-width',
'border-left-width',
'border-width',
'border',
'bottom',
'caption-side',
'clear',
'clip',
'color',
'content',
'counter-increment',
'counter-reset',
'cue-after',
'cue-before',
'cue',
'cursor',
'direction',
'display',
'elevation',
'empty-cells',
'float',
'font-family',
'font-size',
'font-style',
'font-variant',
'font-weight',
'font',
'height',
'left',
'letter-spacing',
'line-height',
'list-style-image',
'list-style-position',
'list-style-type',
'list-style',
'margin-right',
'margin-left',
'margin-top',
'margin-bottom',
'margin',
'max-height',
'max-width',
'min-height',
'min-width',
'orphans',
'outline-color',
'outline-style',
'outline-width',
'outline',
'overflow',
'padding-top',
'padding-right',
'padding-bottom',
'padding-left',
'padding',
'page-break-after',
'page-break-before',
'page-break-inside',
'pause-after',
'pause-before',
'pause',
'pitch-range',
'pitch',
'play-during',
'position',
'quotes',
'richness',
'right',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speak',
'speech-rate',
'stress',
'table-layout',
'text-align',
'text-decoration',
'text-indent',
'text-transform',
'top',
'unicode-bidi',
'vertical-align',
'visibility',
'voice-family',
'volume',
'white-space',
'widows',
'width',
'word-spacing',
'z-index',
]
css3 = [
'alignment-adjust',
'alignment-baseline',
'animation',
'animation-delay',
'animation-direction',
'animation-duration',
'animation-iteration-count',
'animation-name',
'animation-play-state',
'animation-timing-function',
'appearance',
'backface-visibility',
'background-clip',
'background-origin',
'background-size',
'baseline-shift',
'bookmark-label',
'bookmark-level',
'bookmark-target',
'border-bottom-left-radius',
'border-bottom-right-radius',
'border-image',
'border-image-outset',
'border-image-repeat',
'border-image-slice',
'border-image-source',
'border-image-width',
'border-radius',
'border-top-left-radius',
'border-top-right-radius',
'box-align',
'box-decoration-break',
'box-direction',
'box-flex',
'box-flex-group',
'box-lines',
'box-ordinal-group',
'box-orient',
'box-pack',
'box-shadow',
'box-sizing',
'color-profile',
'column-count',
'column-fill',
'column-gap',
'column-rule',
'column-rule-color',
'column-rule-style',
'column-rule-width',
'column-span',
'column-width',
'columns',
'crop',
'dominant-baseline',
'drop-initial-after-adjust',
'drop-initial-after-align',
'drop-initial-before-adjust',
'drop-initial-before-align',
'drop-initial-size',
'drop-initial-value',
'fit',
'fit-position',
'float-offset',
'font-size-adjust',
'font-stretch',
'grid-columns',
'grid-rows',
'hanging-punctuation',
'hyphenate-after',
'hyphenate-before',
'hyphenate-character',
'hyphenate-lines',
'hyphenate-resource',
'hyphens',
'icon',
'image-orientation',
'image-resolution',
'inline-box-align',
'line-stacking',
'line-stacking-ruby',
'line-stacking-shift',
'line-stacking-strategy',
# 'mark',
'mark-after',
'mark-before',
'marks',
'marquee-direction',
'marquee-play-count',
'marquee-speed',
'marquee-style',
'move-to',
'nav-down',
'nav-index',
'nav-left',
'nav-right',
'nav-up',
'opacity',
'outline-offset',
'overflow-style',
'overflow-x',
'overflow-y',
'page',
'page-policy',
'perspective',
'perspective-origin',
'phonemes',
'punctuation-trim',
'rendering-intent',
'resize',
'rest',
'rest-after',
'rest-before',
'rotation',
'rotation-point',
'ruby-align',
'ruby-overhang',
'ruby-position',
'ruby-span',
'size',
'string-set',
'target',
'target-name',
'target-new',
'target-position',
'text-align-last',
'text-height',
'text-justify',
'text-outline',
'text-overflow',
'text-shadow',
'text-wrap',
'transform',
'transform-origin',
'transform-style',
'transition',
'transition-delay',
'transition-duration',
'transition-property',
'transition-timing-function',
'voice-balance',
'voice-duration',
'voice-pitch',
'voice-pitch-range',
'voice-rate',
'voice-stress',
'voice-volume',
'word-break',
'word-wrap'
]
# SVG only includes style not present in either css2 or css3:
svg = [
# clipping / masking / compositing:
'clip-path',
'clip-rule',
'mask',
# filter effects:
'enable-background',
'filter',
'flood-color',
'flood-opacity',
'lightning-color',
# gradient:
'stop-color',
'stop-opacity',
# interactivity:
'pointer-events',
# color / painting:
'color-interpolation',
'color-interpolation-filters',
'color-rendering',
'fill',
'fill-opacity',
'fill-rule',
'image-rendering',
'marker',
'marker-end',
'marker-mid',
'marker-start',
'shape-rendering',
'stroke',
'stroke-dasharray',
'stroke-dashoffset',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-width',
'text-rendering',
# text:
'glyph-orientation-horizontal',
'glyph-orientation-vertical',
'kerning',
'text-anchor',
'writing-mode',
]
vendor_prefix = [
'-ms-',
'-moz-',
'-o-',
'-atsc-',
'-wap-',
'-webkit-',
'-khtml-'
'-xv-',
'mso-',
]
vendor_ugly = [
'accelerator',
'behavior',
'zoom',
]
properties = css2 + css3 + svg + vendor_ugly
# CSS-2(.1) media types: http://www.w3.org/TR/CSS2/media.html#media-types
# Include media types as defined in HTML4: http://www.w3.org/TR/1999/REC-html401-19991224/types.html#h-6.13
# Also explained in http://www.w3.org/TR/css3-mediaqueries/#background
html4_media_types = [
'all',
'aural', # deprecated by CSS 2.1, which reserves "speech"
'braille',
'handheld',
'print',
'projection',
'screen',
'tty',
'tv',
]
css2_media_types = [
'embossed', # CSS2, not HTML4
'speech', # CSS2. not HTML4
]
media_types = html4_media_types + css2_media_types
css3_media_features = [
'width',
'min-width',
'max-width',
'height',
'min-height',
'max-height',
'device-width',
'min-device-width',
'max-device-width',
'device-height',
'min-device-height',
'max-device-height',
'orientation',
'aspect-ratio',
'min-aspect-ratio',
'max-aspect-ratio',
'device-aspect-ratio',
'min-device-aspect-ratio',
'max-device-aspect-ratio',
'color',
'min-color',
'max-color',
'color-index',
'min-color-index',
'max-color-index',
'monochrome',
'min-monochrome',
'max-monochrome',
'resolution',
'min-resolution',
'max-resolution',
'scan',
'grid',
]
vendor_media_features = [
'-webkit-min-device-pixel-ratio',
'min--moz-device-pixel-ratio',
'-o-min-device-pixel-ratio',
'min-device-pixel-ratio',
]
media_features = css3_media_features + vendor_media_features
|
py | b4117693d31a14f18be3196ca486620a4ebeb18c | print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 6
print "This should be five: {:s}".format(str(five))
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of : {:d}".format(start_point)
print "We'd have {:d} beans, {:d} jars, and {:d} crates.".format(beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have {:d} beans, {:d} jars, and {:d} crates.".format(*secret_formula(start_point))
|
py | b41176a7459fd8b3c640c88fd274f8821856403b | from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vyo+k2s_!gm0f=$2w_w8=qlzfz8o+#5&lymr+j8(i!mxazudvc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'rest_framework',
'webpage',
'places',
'labels',
'crew',
'bomber',
'django_extensions',
'django_tables2'
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticatedOrReadOnly',),
'PAGE_SIZE': 10
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'flj',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
}
}
|
py | b41176d1f19caccc31265c3d48d191cdc0cc6903 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 09:19:02 2020
"""
from collections import defaultdict
def main():
lines = [line.strip() for line in get_file_contents()]
(foods, ingredient_count, allergen_to_food, ingredient_to_food,
ingredient_to_possible_allergens) = parse_food(lines)
ingredients_without_allergens, possible = find_possible_allergens(
allergen_to_food, ingredient_to_food, ingredient_to_possible_allergens
)
occurences = count_occurences(ingredients_without_allergens,
ingredient_count)
print('Without allergens: ', ingredients_without_allergens)
print(f'Occurences of ingredients without allergens: {occurences}')
ingredient_allergen = find_allergens(possible)
print('Ingredient allergens: ', ingredient_allergen)
print('Sorted by allergen: ', ','.join(sorted(
ingredient_allergen, key=lambda ing: ingredient_allergen[ing]
)))
def find_allergens(ingredients):
"""Return ingredients with cooresponding allergen."""
by_allergens_count = sorted(ingredients, key=lambda i: len(ingredients[i]))
for ingredient in by_allergens_count:
if len(ingredients[ingredient]) == 1:
for other_ingredient, allergens in ingredients.items():
if ingredient == other_ingredient:
continue
ingredients[other_ingredient] = (allergens
- ingredients[ingredient])
return {
ingredient: allergen.pop()
for ingredient, allergen in ingredients.items()
}
def find_possible_allergens(
allergen_to_food, ingredient_to_food, ingredient_to_allergens):
"""Return ingredients without allergens and with possible allergens."""
ingredients_without_allergens = set()
ingredient_to_possible_allergens = {}
for ingredient, allergens in ingredient_to_allergens.items():
possible_allergens = set()
for allergen in allergens:
if not allergen_to_food[allergen] - ingredient_to_food[ingredient]:
possible_allergens.add(allergen)
if possible_allergens:
ingredient_to_possible_allergens[ingredient] = possible_allergens
else:
ingredients_without_allergens.add(ingredient)
return ingredients_without_allergens, ingredient_to_possible_allergens
def get_ingredients_with_allergens(
ingredient_to_food, ingredients_without_allergens):
"""Return ingredients not present in ingredients_without_allergens."""
return {
ingredient: food
for ingredient, food in ingredient_to_food.items()
if ingredient not in ingredients_without_allergens
}
def count_occurences(ingredients, ingredient_count):
"""Count number of occurences of ingredients based on ingredient_count."""
return sum(ingredient_count[ingredient] for ingredient in ingredients)
def parse_food(lines):
"""Parse food from input lines.
Returns
foods: list -> dict, list of food dicts, having key for ingredients
and allergen sets
ingredient_count: dict ingredinet -> count, ingredient to count how
many times ingredient occurs in food
allergen_to_food: dict allergen -> set, allergen to set of foods in
which allergen occurs
ingredient_to_food: dict ingredient -> set, ingredinet to set of foods
in which ingredient occurs
ingredient_to_possible_allergen: dict ingredient -> set, ingredient
to set of possible allergens of ingredient
"""
ingredient_count = defaultdict(int)
allergen_to_food = defaultdict(set)
ingredient_to_food = defaultdict(set)
ingredient_to_possible_allergens = defaultdict(set)
foods = []
for index, line in enumerate(lines):
ingredients, allergens = line.split(' (contains')
ingredients = ingredients.split()
allergens = [allergen.strip()
for allergen in allergens[:-1].split(', ')]
foods.append({'ingredients': ingredients,
'allergens': allergens})
for ingredient in ingredients:
ingredient_count[ingredient] += 1
ingredient_to_food[ingredient].add(index)
ingredient_to_possible_allergens[ingredient].update(allergens)
for allergen in allergens:
allergen_to_food[allergen].add(index)
return (foods, ingredient_count, allergen_to_food, ingredient_to_food,
ingredient_to_possible_allergens)
def get_file_contents(file='input.txt'):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
|
py | b4117827027e438a00214bda1e7ff3e9ee23c0fa | # import the necessary packages
import numpy as np
import argparse
import cv2
from HandClasses import *
import pygame as game
from App import *
from VerletPhysics import *
from Fits import *
from Camera import *
# define the list of boundaries
todaylowRight = np.loadtxt('lowRight.txt', dtype=int)
todayhighRight = np.loadtxt('highRight.txt', dtype=int)
todaylowLeft = np.loadtxt('lowLeft.txt', dtype=int)
todayhighLeft = np.loadtxt('highLeft.txt', dtype=int)
class DemoRope(App):
#
world = World(Vector(640.0, 480.0), Vector(0, 0), 4)
#
grabbed = None
radius = 15
strength = 0.05
segments = 300
#camera selection
cameraString = 'Intel'
#
def Initialize(self):
#
rope = self.world.AddComposite()
#self.cap = cv2.VideoCapture(0)
self.hand1 = HandClassOneColor([[]])
mat = Material(1.0,1.0,1.0)
for i in range(self.segments+1):
rope.AddParticles(
self.world.AddParticle(20+i*2, self.world.hsize.y,Material(0.4,0.4,1.0)),
)
# y=210.0
#self.world.AddParticle(5.0 + i * 10.0, 200.0)) # y=270.0
for i in range(0, self.segments):
rope.AddConstraints(self.world.AddConstraint(rope.particles[i],rope.particles[i+1],1.0))
rope.particles[int(len(rope.particles)/2)].material.mass = 0.0
#rope.particles[-1].material.mass = 0.0
self.camera = Camera(self.cameraString)
#rope.particles[9].ApplyForce(Vector(400.0, -900.0))
#
def Update(self):
#
if self.hand1.numberofFingers == 3 and self.hand1.state=='Closed':
if self.grabbed == None:
closest = self.ClosestPoint()
if closest[1] < self.radius:
self.grabbed = closest[0]
#print('here')
if self.grabbed != None:
mouse = Vector(self.hand1.centerTriangle[0],self.hand1.centerTriangle[1])
force = (mouse - self.grabbed.position) * self.strength
self.grabbed.ApplyImpulse(force)
#print('here2')
self.world.Simulate()
elif game.mouse.get_pressed()[0]:
if self.grabbed == None:
closest = self.ClosestPoint()
if closest[1] < self.radius:
self.grabbed = closest[0]
if self.grabbed != None:
mouse = Vector(game.mouse.get_pos()[0], game.mouse.get_pos()[1])
force = (mouse - self.grabbed.position) * self.strength
self.grabbed.ApplyImpulse(force)
self.world.Simulate()
else:
if self.grabbed != None:
self.world.SimulateWorldStop()
self.grabbed = None
#print('here3')
success, image = self.camera.getFrame()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
else:
inputimage = cv2.flip(image.copy(),1)
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image_rgb = image.copy()
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
#results = hands.process(image)
# Draw the hand annotations on the image.
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
frame_HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image_height, image_width, _ = image.shape
maskRight = MaskClass(frame_HSV.copy(),todaylowRight,todayhighRight)
maskRight.process()
#cv2.imshow('green',maskgreen.tagged)
maskLeft = MaskClass(frame_HSV.copy(),todaylowLeft,todayhighLeft)
maskLeft.process()
handRightcenters = []
handRightcenters.append(maskRight.centers)
self.handRight = HandClassOneColor(handRightcenters)
handLeftcenters = []
handLeftcenters.append(maskLeft.centers)
self.handLeft = HandClassOneColor(handLeftcenters)
font = cv2.FONT_HERSHEY_PLAIN
if self.handRight.numberofFingers == 3:
#print(str(self.hand1.area) + ' ' +str(self.hand1.state))
cv2.putText(inputimage, 'Hand'+ str(self.handRight.state), (image_width-200,25), font, 2, (120,120,0), 3)
#print(self.hand1.centerTriangle)
else:
cv2.putText(inputimage, 'Not Right Hand', (image_width-300,25), font, 2, (120,120,0), 3)
if self.handLeft.numberofFingers == 3:
#print(str(self.hand1.area) + ' ' +str(self.hand1.state))
cv2.putText(inputimage, 'Hand'+ str(self.handLeft.state), (image_width-200,25), font, 2, (120,120,0), 3)
else:
cv2.putText(inputimage, 'Not Left Hand', (image_width-300,25), font, 2, (120,120,0), 3)
masksum = maskRight.tagged
#
self.cv_inputimage = inputimage
self.cv_masksumimage = masksum
self.cv_Rightfiltered = maskRight.tagged
cv2.imshow('input-image',self.cv_inputimage)
bin = cv2.waitKey(5)
cv2.imshow('right-filter',self.cv_greenfiltered)
if game.key.get_pressed()[game.K_ESCAPE]:
self.Exit()
#
def Render(self):
#
self.screen.fill((24, 24, 24))
for c in self.world.constraints:
pos1 = (int(c.node1.position.x), int(c.node1.position.y))
pos2 = (int(c.node2.position.x), int(c.node2.position.y))
game.draw.line(self.screen, (255, 255, 0), pos1, pos2, 4)
game.draw.line(self.screen, (130, 130, 130), (self.world.hsize.x,20), (self.world.hsize.x,460), 3)
game.draw.line(self.screen, (130, 130, 130), (20,self.world.hsize.y), (620,self.world.hsize.y), 3)
y = []
x = []
for p in self.world.particles:
pos = (int(p.position.x), int(p.position.y))
x.append(p.position.x-self.world.hsize.x)
y.append(p.position.y*-1+self.world.hsize.y)
game.draw.circle(self.screen, (255, 255, 255), pos, 8, 0)
#print("x" + str(x))
#print("y"+ str(y))
tempfit = CurveFit(x,y,1)
# define the RGB value for white,
# green, blue colour .
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 128)
# create a font object.
# 1st parameter is the font file
# which is present in pygame.
# 2nd parameter is size of the font
font = game.font.Font('freesansbold.ttf', 15)
# create a text surface object,
# on which text is drawn on it.
text = font.render("Yours : " +str(tempfit.function), True, green, blue)
game.draw.line(self.screen, (255, 0, 0), tempfit.startpoint, tempfit.endpoint, 3)
# create a rectangular object for the
# text surface object
textRect = text.get_rect()
# set the center of the rectangular object.
textRect.center = (470, 30)
# copying the text surface object
# to the display surface object
# at the center coordinate.
self.screen.blit(text, textRect)
text2 = font.render("Target: y = 0.80 x + -15.0", True, green, blue)
textRect2 = text2.get_rect()
textRect2.center = (100, 30)
self.screen.blit(text2, textRect2)
text3 = font.render("Match the equations", True, green, blue)
textRect3 = text3.get_rect()
textRect3.center = (300, 10)
self.screen.blit(text3, textRect3)
if self.handRight.numberofFingers == 3 and self.handRight.state=='Closed':
game.draw.circle(self.screen, (0, 255, 0), self.handRight.centerTriangle, 8, 0)
elif self.handRight.numberofFingers == 3 and self.handRight.state=='Open':
game.draw.circle(self.screen, (255, 0, 0), self.handRight.centerTriangle, 8, 0)
game.display.update()
if self.handRight.centerTriangle[0] < self.handLeft.centerTriangle[0]:
self.gestureReset = True
else:
self.gestureReset = False
#
def ClosestPoint(self):
if game.mouse.get_pressed()[0]:
mouse = Vector(game.mouse.get_pos()[0], game.mouse.get_pos()[1])
else:
mouse = Vector(self.hand1.centerTriangle[0],self.hand1.centerTriangle[1])
closest = None
distance = float('inf')
for particle in self.world.particles:
d = mouse.distance(particle.position)
if d < distance:
closest = particle
distance = d
return (closest, distance)
if __name__ == "__main__":
print ("Starting...")
app = DemoRope("Swinging Rope", 640, 480, 30)
app.Run()
#if bin & 0xFF == ord('q'):
# print('exit')
#elif bin & 0xFF ==ord('s'):
# print('save')
#self.cap.release()
# loop over the boundaries
print ("Ending...") |
py | b411787405e506631b8e5ae98eaef4c05398e60d | import pytest
from nbformat.v4 import new_notebook, new_output
from ...preprocessors import SaveCells, SaveAutoGrades, GetGrades
from ...api import Gradebook
from ...utils import compute_checksum
from .base import BaseTestPreprocessor
from .. import (
create_grade_cell, create_solution_cell, create_grade_and_solution_cell)
@pytest.fixture
def preprocessors():
return (SaveCells(), SaveAutoGrades(), GetGrades())
@pytest.fixture
def gradebook(request, db):
gb = Gradebook(db)
gb.add_assignment("ps0")
gb.add_student("bar")
def fin():
gb.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def resources(db):
return {
"nbgrader": {
"db_url": db,
"assignment": "ps0",
"notebook": "test",
"student": "bar"
}
}
class TestGetGrades(BaseTestPreprocessor):
def test_save_correct_code(self, preprocessors, gradebook, resources):
"""Is a passing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['score'] == 1
assert cell.metadata.nbgrader['points'] == 1
assert 'comment' not in cell.metadata.nbgrader
def test_save_incorrect_code(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['score'] == 0
assert cell.metadata.nbgrader['points'] == 1
assert 'comment' not in cell.metadata.nbgrader
def test_save_unchanged_code(self, preprocessors, gradebook, resources):
"""Is an unchanged code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['comment'] == "No response."
def test_save_changed_code(self, preprocessors, gradebook, resources):
"""Is an unchanged code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['comment'] is None
def test_save_unchanged_markdown(self, preprocessors, gradebook, resources):
"""Is an unchanged markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['score'] == 0
assert cell.metadata.nbgrader['points'] == 1
assert cell.metadata.nbgrader['comment'] == "No response."
def test_save_changed_markdown(self, preprocessors, gradebook, resources):
"""Is a changed markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
preprocessors[2].preprocess(nb, resources)
assert cell.metadata.nbgrader['score'] == 0
assert cell.metadata.nbgrader['points'] == 1
assert cell.metadata.nbgrader['comment'] is None
|
py | b4117997389920091c2ec4dfb65bd82c8748ab64 | #!/usr/bin/env python
import unittest
from dremel.reader import scan
from dremel.field_graph import FieldGraphError
from .utils import create_test_storage
class ScanTest(unittest.TestCase):
def setUp(self):
self.storage = create_test_storage()
def test_create(self):
for values, fetch_level in scan(self.storage, ['doc_id', 'links.backward']):
print(values, fetch_level)
for values, fetch_level in scan(self.storage, ['doc_id', 'name.url', 'name.language.code']):
print(values, fetch_level)
def test_invalid_repeated_fields(self):
with self.assertRaisesRegex(FieldGraphError, 'independently-repeated fields'):
for values, fetch_level in scan(self.storage, ['name.url', 'links.backward']):
print(values, fetch_level)
|
py | b41179f110266adbce7ba7f811e1487f3b3ca2ca | import io
import logging
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import pandas as pd
from matplotlib import pyplot as plt
from bots import imps
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.government import quiverquant_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def gtrades_command(
ticker: str = "",
gov_type="",
past_transactions_months: int = 10,
raw: bool = False,
):
"""Displays government trades [quiverquant.com]"""
# Debug user input
if imps.DEBUG:
logger.debug(
"gov gtrades %s %s %s %s",
ticker,
gov_type,
past_transactions_months,
raw,
)
if ticker == "":
raise Exception("A ticker is required")
possible_args = ["congress", "senate", "house"]
if gov_type == "":
gov_type = "congress"
elif gov_type not in possible_args:
raise Exception(
"Enter a valid government argument, options are: congress, senate and house"
)
# Retrieve Data
df_gov = quiverquant_model.get_government_trading(gov_type, ticker)
if df_gov.empty:
raise Exception(f"No {gov_type} trading data found")
# Output Data
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(days=past_transactions_months * 30)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date]
if df_gov.empty:
logger.debug("No recent %s trading data found", gov_type)
raise Exception(f"No recent {gov_type} trading data found")
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "").split("\n")[0]
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(float(x["min"]))
if x["Transaction"] == "Purchase"
else -int(float(x["max"])),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(float(x["max"]))
if x["Transaction"] == "Purchase"
else -1 * int(float(x["min"])),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
fig, ax = plt.subplots(figsize=imps.bot_plot_scale(), dpi=imps.BOT_PLOT_DPI)
ax.fill_between(
df_gov["TransactionDate"].unique(),
df_gov.groupby("TransactionDate")["lower"].sum().values / 1000,
df_gov.groupby("TransactionDate")["upper"].sum().values / 1000,
)
ax.set_xlim(
[
df_gov["TransactionDate"].values[0],
df_gov["TransactionDate"].values[-1],
]
)
ax.grid()
ax.set_title(f"{gov_type.capitalize()} trading on {ticker}")
ax.set_xlabel("Date")
ax.set_ylabel("Amount ($1k)")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gcf().autofmt_xdate()
fig.tight_layout()
imagefile = "gov_gtrades.png"
dataBytesIO = io.BytesIO()
plt.savefig(dataBytesIO)
dataBytesIO.seek(0)
plt.close("all")
imagefile = imps.image_border(imagefile, base64=dataBytesIO)
return {
"title": "Stocks: [quiverquant.com] Government Trades",
"imagefile": imagefile,
}
|
py | b4117c12277c253ecb5a64d33f0ff446404bf257 | import datetime
import os
import random
import string
from django.db import models
def upload_to(instance, filename):
"""
Function used to create a secure string. Ensures uniqueness of file upload names.
https://docs.djangoproject.com/en/1.11/ref/models/fields/#django.db.models.FileField.upload_to
"""
datetime_string = datetime.datetime.now().strftime("%d%m%Y")
hash = create_secure_string()
filename_base, filename_ext = os.path.splitext(filename)
while os.path.isdir(datetime_string + hash + "/"):
hash = create_secure_string()
filepath_out = datetime_string + hash + "/" + datetime_string + hash + filename_ext.lower()
return filepath_out
def create_secure_string():
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(10))
class Resources(models.Model):
file = models.FileField(upload_to=upload_to, blank=False, max_length=500)
class Meta:
verbose_name_plural = "Resources"
|
py | b4117cbc030276055515b09a821179f1e4bd21ae |
from pymjengine.engine.game_manager import GameManager
from pymjengine.baseMJPlayer import BaseMJPlayer
def setup_config(max_round):
return Config(max_round)
#debug_info_level
def start_mahjong(config, debug_info_level=0):
config.validation()
gm = GameManager()
gm.set_debug_info_level(debug_info_level)
for info in config.players_info:
gm.register_player(info["name"], info["algorithm"])
result_message = gm.start_game(config.max_round)
return _format_result(result_message)
def _format_result(result_message):
return {
"rule": result_message["message"]["game_information"]["rule"],
"game_round": result_message["message"]["game_round"],
"winner": result_message["message"]["winner"]
# "players": result_message["message"]["game_information"]["seats"]
}
class Config(object):
def __init__(self, max_round):
self.players_info = []
self.max_round = max_round
def register_player(self, name, algorithm):
if not isinstance(algorithm, BaseMJPlayer):
base_msg = 'Mahjong player must be child class of "BaseMJPlayer". But its parent was "%s"'
raise TypeError(base_msg % algorithm.__class__.__bases__)
info = { "name" : name, "algorithm" : algorithm }
self.players_info.append(info)
def validation(self):
player_num = len(self.players_info)
if player_num < 2:
detail_msg = "no player is registered yet" if player_num==0 else "you registered only 1 player"
base_msg = "At least 2 players are needed to start the game"
raise Exception("%s (but %s.)" % (base_msg, detail_msg)) |
py | b4117cdbffc4f89e00dcbd61c57ee598f047472a | from typing import Counter
from Translate import Translate
from Errors import Error, IllegalCharacterError, ExpectedCharError
from Lexer.position import Position
from Lexer.token import Token
from Constants import *
# -----------LEXER---------------
class Lexer:
def __init__(self, fn, text):
self.fn = fn
self.text = text
self.pos = Position(-1, 0, -1, fn, text)
self.current_char = None
self.advance()
self.translate = Translate()
def advance(self):
self.pos.advance(self.current_char)
self.current_char = (
self.text[self.pos.idx] if self.pos.idx < len(self.text) else None
)
def peak(self, idx=1):
if self.pos.idx + idx < len(self.text):
return self.text[self.pos.idx + idx]
return None
def primitive_token(self):
if self.current_char == "+":
return TT_PLUS, None
if self.current_char == "-":
return self.make_minus_or_arrow()
if self.current_char == "*":
return self.make_mul_or_power()
if self.current_char == "/":
return TT_DIV, None
if self.current_char == "%":
return TT_MOD, None
if self.current_char == ",":
return TT_COMMA, None
if self.current_char == "(":
return TT_LPAREN, None
if self.current_char == ")":
return TT_RPAREN, None
if self.current_char == "[":
return TT_LSQUARE, None
if self.current_char == "]":
return TT_RSQUARE , None
if self.current_char == "=":
return self.make_equals()
if self.current_char == "<":
return self.make_less_than()
if self.current_char == ">":
return self.make_greater_than()
if self.current_char == "!":
token, error = self.make_not_equals()
if error:
return None, error
return token, None
if self.current_char in ";\n":
return TT_NEWLINE,None
return None, None
def get_token(self):
token, error = self.primitive_token()
if error:
return error
if token:
self.advance()
return Token(token, pos_start=self.pos)
if self.current_char == '"':
return self.make_string()
if self.current_char in DIGITS:
return self.make_number()
if self.current_char in LETTERS:
return self.make_identifier()
position_start = self.pos.copy()
return IllegalCharacterError(
position_start, self.pos, "'" + self.current_char + "'"
)
def make_tokens(self):
tokens = []
while self.current_char != None:
if self.current_char in " \t":
self.advance()
continue
if self.current_char == "#":
self.skip_comment()
current_token = self.get_token()
if isinstance(current_token, Error):
return [], current_token
tokens.append(current_token)
tokens.append(Token(TT_EOF, pos_start=self.pos))
return tokens, None
def make_number(self):
num_str = ""
dot = False
pos_start = self.pos
while self.current_char != None and self.current_char in DIGITS + ".":
if self.current_char == ".":
if dot == True:
break
dot = True
num_str += "."
else:
num_str += self.translate.digit_to_eng(self.current_char)
self.advance()
if dot:
return Token(
TT_FLOAT, float(num_str), pos_start=pos_start, pos_end=self.pos
)
else:
return Token(TT_INT, int(num_str), pos_start=pos_start, pos_end=self.pos)
def make_string(self):
string = ''
pos_start = self.pos.copy()
escape_character = False
self.advance()
escape_characters = {
'n': '\n',
't': '\t'
}
while self.current_char != None and (self.current_char != '"' or escape_character):
if escape_character:
string += escape_characters.get(self.current_char, self.current_char)
else:
if self.current_char == '\\':
escape_character = True
if self.peak()=="n":
self.advance()
string+="\n"
else:
string += self.current_char
self.advance()
escape_character = False
self.advance()
return Token(TT_STRING, string, pos_start, self.pos)
def make_identifier(self):
id_str = ""
pos_start = self.pos
while self.current_char != None and self.current_char in LETTERS_DIGITS + "_":
id_str += self.translate.digit_to_eng(self.current_char)
self.advance()
token_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER
return Token(token_type, id_str, pos_start, self.pos)
def make_not_equals(self):
pos_start = self.pos.copy()
self.advance()
if self.current_char == "=":
return TT_NE, None
return None, ExpectedCharError(pos_start, self.pos, "'=' (after '!')")
def make_equals(self):
tok_type = TT_EQ
nxt = self.peak()
if nxt == "=":
self.advance()
tok_type = TT_EE
return tok_type, None
def make_less_than(self):
tok_type = TT_LT
nxt = self.peak()
if nxt == "=":
self.advance()
tok_type = TT_LTE
return tok_type, None
def make_greater_than(self):
tok_type = TT_GT
nxt = self.peak()
if nxt == "=":
self.advance()
tok_type = TT_GTE
return tok_type, None
def make_mul_or_power(self):
tok_type = TT_MUL
nxt = self.peak()
if nxt == "*":
self.advance()
tok_type = TT_POWER
return tok_type, None
def make_minus_or_arrow(self):
tok_type = TT_MINUS
nxt = self.peak()
if nxt == ">":
self.advance()
tok_type = TT_ARROW
return tok_type, None
def skip_comment(self):
while self.current_char != '\n':
self.advance()
self.advance()
|
py | b4117d2a47a2357b5c3634b5ecc29d98e08670ef | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegistrationForm(FlaskForm):
username = StringField(
'Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField(
'Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Sign Up')
|
py | b4117de8fa136b3c39cb3904f1c1d9e58f07a50c | import pandas as pd
def generarSecuenciaCSV(nombreArchivoSource, nombreArchivoDesnation, delete = False):
df = pd.read_csv(nombreArchivoSource, header=None)
lista = []
lista.append(0)
temporal = 1
flag = False
for i in range(9999):
lista.append(temporal)
if(flag):
flag = False
temporal += 1
else:
flag = True
if(delete):
lista.reverse()
df['n'] = lista
df.to_csv(nombreArchivoDesnation, header=None, index=None)
source = ["memory_insert_hash.csv",
"time_insert_hash.csv",
"memory_search_hash.csv",
"time_search_hash.csv"]
target = ["memory_insert_hash_n.csv",
"time_insert_hash_n.csv",
"memory_search_hash_n.csv",
"time_search_hash_n.csv"]
for s, d in zip(source, target):
generarSecuenciaCSV(s,d)
source = ["memory_remove_hash.csv",
"time_remove_hash.csv"]
target = ["memory_remove_hash_n.csv",
"time_remove_hash_n.csv"]
for s, d in zip(source, target):
generarSecuenciaCSV(s,d,True) |
py | b4117e9d58dc4a5c963f8ae355640773b4662b9d | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import json
from cloudferry import data_storage
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import log
from cloudferry.condensation import action as c_action
from cloudferry.condensation import process
LOG = log.getLogger(__name__)
class Evacuate(action.Action):
def __init__(self, iteration, **kwargs):
self.iteration = iteration
super(Evacuate, self).__init__(**kwargs)
def run(self, **kwargs):
compute_resource = self.cloud.resources['compute']
cloud = process.SOURCE
LOG.debug("getting info on cloud %s iteration %s from db", cloud,
self.iteration)
info = data_storage.get(
c_action.get_key(self.iteration, cloud))
if not info:
LOG.info("cannot find info in db on %s-%s", cloud, self.iteration)
return {}
actions = json.loads(info).get(c_action.CONDENSE)
LOG.debug("live-migrating vm one by one")
for vm_id, dest_host in actions:
compute_resource.live_migrate_vm(vm_id, dest_host)
return {}
|
py | b4117ebb9299178c61a444e78260358822421bf1 | import curses
class TUI:
def __init__(self):
self.stdscr = None
self.lines = 1
def __getmax(self):
y, x = self.stdscr.getmaxyx()
return x, y
def __printat(self, x, y, line):
self.stdscr.addstr(y, x, line)
def start(self):
self.stdscr = curses.initscr()
curses.start_color()
self.stdscr.nodelay(True)
self.stdscr.keypad(True)
def stop(self):
curses.endwin()
def resetlines(self):
self.lines = 1
self.stdscr.border()
def clear(self):
self.stdscr.clear()
def getch(self):
keycode = self.stdscr.getch()
if keycode == -1:
pass
else:
c = chr(keycode)
self.print("{} {} {}".format(c, keycode, type(keycode)))
if c in ("Q", "q"):
raise KeyboardInterrupt
def print(self, line=""):
self.__printat(1, self.lines, line)
self.lines += 1
def refresh(self):
self.stdscr.refresh()
def print_header(self):
self.__printat(1, self.lines,
"hostname RX TX total RX total TX")
self.lines += 1
def print_bandwidth(self, name, diff_rx, diff_tx, total_rx, total_tx):
self.__printat(1, self.lines, "{} {:10.2f} Mbit/s {:10.2f} Mbit/s {:10.2f} GB {:10.2f} GB".format(name, diff_rx,
diff_tx,
total_rx,
total_tx))
self.lines += 1
def print_footer(self, now, delta, sleeptime, all_diff_rx, all_diff_tx, all_rx, all_tx):
self.__printat(1, self.lines, "TOTAL: {:10.2f} Mbit/s {:10.2f} Mbit/s {:10.2f} GB {:10.2f} GB".format(all_diff_rx, all_diff_tx, all_rx, all_tx))
self.lines += 1
x, y = self.__getmax()
self.__printat(1, y-2, "{} delta: {:.2f}, sleeptime: {:.2f}".format(
now, delta, sleeptime
))
|
py | b4117f1d1d0a33e327acd6796dfcc5a14be068cd | import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from utils import *
def batch_norm(inputs, name, train=True, reuse=False):
return tf.contrib.layers.batch_norm(inputs=inputs,is_training=train,
reuse=reuse,scope=name,scale=True)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d", reuse=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim],
initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", reuse=False, with_w=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_h, output_shape[-1],
input_.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer())
try:
deconv = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1],
padding=padding)
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.nn.tanh(x)
def shape2d(a):
"""
a: a int or tuple/list of length 2
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def shape4d(a):
# for use with tensorflow
return [1] + shape2d(a) + [1]
def UnPooling2x2ZeroFilled(x):
out = tf.concat(axis=3, values=[x, tf.zeros_like(x)])
out = tf.concat(axis=2, values=[out, tf.zeros_like(out)])
sh = x.get_shape().as_list()
if None not in sh[1:]:
out_size = [-1, sh[1] * 2, sh[2] * 2, sh[3]]
return tf.reshape(out, out_size)
else:
sh = tf.shape(x)
return tf.reshape(out, [-1, sh[1] * 2, sh[2] * 2, sh[3]])
def MaxPooling(x, shape, stride=None, padding='VALID'):
"""
MaxPooling on images.
:param input: NHWC tensor.
:param shape: int or [h, w]
:param stride: int or [h, w]. default to be shape.
:param padding: 'valid' or 'same'. default to 'valid'
:returns: NHWC tensor.
"""
padding = padding.upper()
shape = shape4d(shape)
if stride is None:
stride = shape
else:
stride = shape4d(stride)
return tf.nn.max_pool(x, ksize=shape, strides=stride, padding=padding)
#@layer_register()
def FixedUnPooling(x, shape):
"""
Unpool the input with a fixed mat to perform kronecker product with.
:param input: NHWC tensor
:param shape: int or [h, w]
:returns: NHWC tensor
"""
shape = shape2d(shape)
# a faster implementation for this special case
return UnPooling2x2ZeroFilled(x)
def gdl(gen_frames, gt_frames, alpha):
"""
Calculates the sum of GDL losses between the predicted and gt frames.
@param gen_frames: The predicted frames at each scale.
@param gt_frames: The ground truth frames at each scale
@param alpha: The power to which each gradient term is raised.
@return: The GDL loss.
"""
# create filters [-1, 1] and [[1],[-1]]
# for diffing to the left and down respectively.
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
# [-1, 1]
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0)
# [[1],[-1]]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)])
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
gdl_loss = tf.reduce_mean((grad_diff_x ** alpha + grad_diff_y ** alpha))
# condense into one tensor and avg
return gdl_loss
def linear(input_, output_size, name, stddev=0.02, bias_start=0.0,
reuse=False, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name, reuse=reuse):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
|
py | b4117f3682f798fbf14a54b114b3b2df95cd99ae | from datetime import datetime
from hypothesis import given
import numpy as np
import pytest
from pandas.compat import np_version_under1p19
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
StringDtype,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
from pandas._testing._hypothesis import OPTIONAL_ONE_OF_ALL
@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame):
if request.param == "default":
return DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
if request.param == "float_string":
return float_string_frame
if request.param == "mixed_float":
return mixed_float_frame
if request.param == "mixed_int":
return mixed_int_frame
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (
issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8"
)
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items()))
class TestDataFrameIndexingWhere:
def test_where_get(self, where_frame, float_string_frame):
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.items():
exp = Series(np.where(cond[k], df[k], other1[k]), index=v.index)
tm.assert_series_equal(v, exp, check_names=False)
tm.assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
cond = df > 0
_check_get(df, cond)
def test_where_upcasting(self):
# upcasting case (GH # 2794)
df = DataFrame(
{
c: Series([1] * 3, dtype=c)
for c in ["float32", "float64", "int32", "int64"]
}
)
df.iloc[1, :] = 0
result = df.dtypes
expected = Series(
[
np.dtype("float32"),
np.dtype("float64"),
np.dtype("int32"),
np.dtype("int64"),
],
index=["float32", "float64", "int32", "int64"],
)
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
tm.assert_series_equal(result, expected)
def test_where_alignment(self, where_frame, float_string_frame):
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
tm.assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
def test_where_invalid(self):
# invalid conditions
df = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
cond = df > 0
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
df.where(cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
df.where(err2, other1)
with pytest.raises(ValueError, match=msg):
df.mask(True)
with pytest.raises(ValueError, match=msg):
df.mask(0)
def test_where_set(self, where_frame, float_string_frame):
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
return_value = dfi.where(cond, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in df.dtypes.items():
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype("float64")
assert dfi[k].dtype == v
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligning
cond = (df >= 0)[1:]
_check_set(df, cond)
def test_where_series_slicing(self):
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({"a": range(3), "b": range(4, 7)})
result = df.where(df["a"] == 1)
expected = df[df["a"] == 1].reindex(df.index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array])
def test_where_array_like(self, klass):
# see gh-15414
df = DataFrame({"a": [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({"a": [np.nan, 2, 3]})
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
df["b"] = 2
expected["b"] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({"a": [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")], [pd.NaT], [Timestamp("2017-01-02")]],
],
)
def test_where_invalid_input_single(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
@pytest.mark.parametrize(
"cond",
[
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"], ["True", "True"]],
DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}),
[
[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")],
],
],
)
def test_where_invalid_input_multiple(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# see gh-2793
df = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0], "b": [4.0, 3.0, 2.0, 1.0]}, dtype="float64"
)
expected = DataFrame(
{"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64",
)
result = df.where(df > 2, np.nan)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_mixed(self, any_signed_int_numpy_dtype):
# see gh-2793
df = DataFrame(
{
"a": np.array([1, 2, 3, 4], dtype=any_signed_int_numpy_dtype),
"b": np.array([4.0, 3.0, 2.0, 1.0], dtype="float64"),
}
)
expected = DataFrame(
{"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64",
)
result = df.where(df > 2, np.nan)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_transposition(self):
# see gh-7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(
{
"A": date_range("20130102", periods=5),
"B": date_range("20130104", periods=5),
"C": np.random.randn(5),
}
)
stamp = datetime(2013, 1, 3)
msg = "'>' not supported between instances of 'float' and 'datetime.datetime'"
with pytest.raises(TypeError, match=msg):
df > stamp
result = df[df.iloc[:, :-1] > stamp]
expected = df.copy()
expected.loc[[0, 1], "A"] = np.nan
expected.loc[:, "C"] = np.nan
tm.assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({"series": Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{"series": Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])}
)
tm.assert_frame_equal(df, expected)
# GH 7656
df = DataFrame(
[
{"A": 1, "B": np.nan, "C": "Test"},
{"A": np.nan, "B": "Test", "C": np.nan},
]
)
msg = "boolean setting on mixed-type"
with pytest.raises(TypeError, match=msg):
df.where(~isna(df), None, inplace=True)
def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self):
# see gh-21947
df = DataFrame(columns=["a"])
cond = df
assert (cond.dtypes == object).all()
result = df.where(cond)
tm.assert_frame_equal(result, df)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis="columns")
tm.assert_frame_equal(result, expected)
return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis="index")
tm.assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis="rows")
tm.assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(
pd.notna(df), DataFrame(1, index=df.index, columns=df.columns)
)
tm.assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1 + 1j, 2], [np.nan, 4 + 1j]], columns=["a", "b"])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=["a", "b"])
df[df.abs() >= 5] = np.nan
tm.assert_frame_equal(df, expected)
def test_where_axis(self, using_array_manager):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
ser = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype="float64")
result = df.where(mask, ser, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, ser, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype="float64")
result = df.where(mask, ser, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, ser, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_axis_with_upcast(self):
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
mask = DataFrame([[False, False], [False, False]])
ser = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype="float64")
result = df.where(mask, ser, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, ser, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]])
result = df.where(mask, ser, axis="columns")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
0: np.array([0, 0], dtype="int64"),
1: np.array([np.nan, np.nan], dtype="float64"),
}
)
result = df.copy()
return_value = result.where(mask, ser, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_axis_multiple_dtypes(self):
# Multiple dtypes (=> multiple Blocks)
df = pd.concat(
[
DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)), dtype="int64"),
],
ignore_index=True,
axis=1,
)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis="columns")
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s1, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.where(mask, s2, axis="index")
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s2, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d1, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d1, inplace=True, axis="index")
assert return_value is None
tm.assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d2, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_where_tz_values(self, tz_naive_fixture, frame_or_series):
obj1 = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
columns=["date"],
)
obj2 = DataFrame(
DatetimeIndex(["20150103", "20150104", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
mask = DataFrame([True, True, False], columns=["date"])
exp = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
if frame_or_series is Series:
obj1 = obj1["date"]
obj2 = obj2["date"]
mask = mask["date"]
exp = exp["date"]
result = obj1.where(mask, obj2)
tm.assert_equal(exp, result)
def test_df_where_change_dtype(self):
# GH#16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
result = df.where(mask)
expected = DataFrame(
[[0, np.nan, np.nan], [np.nan, np.nan, 5]], columns=list("ABC")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"other": None}])
def test_df_where_with_category(self, kwargs):
# GH#16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
# change type to category
df.A = df.A.astype("category")
df.B = df.B.astype("category")
df.C = df.C.astype("category")
result = df.where(mask, **kwargs)
A = pd.Categorical([0, np.nan], categories=[0, 3])
B = pd.Categorical([np.nan, np.nan], categories=[1, 4])
C = pd.Categorical([np.nan, 5], categories=[2, 5])
expected = DataFrame({"A": A, "B": B, "C": C})
tm.assert_frame_equal(result, expected)
# Check Series.where while we're here
result = df.A.where(mask[:, 0], **kwargs)
expected = Series(A, name="A")
tm.assert_series_equal(result, expected)
def test_where_categorical_filtering(self):
# GH#22609 Verify filtering operations on DataFrames with categorical Series
df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"])
df["b"] = df["b"].astype("category")
result = df.where(df["a"] > 0)
expected = df.copy()
expected.loc[0, :] = np.nan
tm.assert_equal(result, expected)
def test_where_ea_other(self):
# GH#38729/GH#38742
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
arr = pd.array([7, pd.NA, 9])
ser = Series(arr)
mask = np.ones(df.shape, dtype=bool)
mask[1, :] = False
# TODO: ideally we would get Int64 instead of object
result = df.where(mask, ser, axis=0)
expected = DataFrame({"A": [1, pd.NA, 3], "B": [4, pd.NA, 6]}).astype(object)
tm.assert_frame_equal(result, expected)
ser2 = Series(arr[:2], index=["A", "B"])
expected = DataFrame({"A": [1, 7, 3], "B": [4, pd.NA, 6]})
expected["B"] = expected["B"].astype(object)
result = df.where(mask, ser2, axis=1)
tm.assert_frame_equal(result, expected)
def test_where_interval_noop(self):
# GH#44181
df = DataFrame([pd.Interval(0, 0)])
res = df.where(df.notna())
tm.assert_frame_equal(res, df)
ser = df[0]
res = ser.where(ser.notna())
tm.assert_series_equal(res, ser)
def test_where_interval_fullop_downcast(self, frame_or_series):
# GH#45768
obj = frame_or_series([pd.Interval(0, 0)] * 2)
other = frame_or_series([1.0, 2.0])
res = obj.where(~obj.notna(), other)
# since all entries are being changed, we will downcast result
# from object to ints (not floats)
tm.assert_equal(res, other.astype(np.int64))
# unlike where, Block.putmask does not downcast
obj.mask(obj.notna(), other, inplace=True)
tm.assert_equal(obj, other.astype(object))
@pytest.mark.parametrize(
"dtype",
[
"timedelta64[ns]",
"datetime64[ns]",
"datetime64[ns, Asia/Tokyo]",
"Period[D]",
],
)
def test_where_datetimelike_noop(self, dtype):
# GH#45135, analogue to GH#44181 for Period don't raise on no-op
# For td64/dt64/dt64tz we already don't raise, but also are
# checking that we don't unnecessarily upcast to object.
ser = Series(np.arange(3) * 10**9, dtype=np.int64).view(dtype)
df = ser.to_frame()
mask = np.array([False, False, False])
res = ser.where(~mask, "foo")
tm.assert_series_equal(res, ser)
mask2 = mask.reshape(-1, 1)
res2 = df.where(~mask2, "foo")
tm.assert_frame_equal(res2, df)
res3 = ser.mask(mask, "foo")
tm.assert_series_equal(res3, ser)
res4 = df.mask(mask2, "foo")
tm.assert_frame_equal(res4, df)
# opposite case where we are replacing *all* values -> we downcast
# from object dtype # GH#45768
res5 = df.where(mask2, 4)
expected = DataFrame(4, index=df.index, columns=df.columns)
tm.assert_frame_equal(res5, expected)
# unlike where, Block.putmask does not downcast
df.mask(~mask2, 4, inplace=True)
tm.assert_frame_equal(df, expected.astype(object))
def test_where_try_cast_deprecated(frame_or_series):
obj = DataFrame(np.random.randn(4, 3))
obj = tm.get_obj(obj, frame_or_series)
mask = obj > 0
with tm.assert_produces_warning(FutureWarning):
# try_cast keyword deprecated
obj.where(mask, -1, try_cast=False)
def test_where_int_downcasting_deprecated(using_array_manager, request):
# GH#44597
if not using_array_manager:
mark = pytest.mark.xfail(
reason="After fixing a bug in can_hold_element, we don't go through "
"the deprecated path, and also up-cast both columns to int32 "
"instead of just 1."
)
request.node.add_marker(mark)
arr = np.arange(6).astype(np.int16).reshape(3, 2)
df = DataFrame(arr)
mask = np.zeros(arr.shape, dtype=bool)
mask[:, 0] = True
msg = "Downcasting integer-dtype"
warn = FutureWarning if not using_array_manager else None
with tm.assert_produces_warning(warn, match=msg):
res = df.where(mask, 2**17)
expected = DataFrame({0: arr[:, 0], 1: np.array([2**17] * 3, dtype=np.int32)})
tm.assert_frame_equal(res, expected)
def test_where_copies_with_noop(frame_or_series):
# GH-39595
result = frame_or_series([1, 2, 3, 4])
expected = result.copy()
col = result[0] if frame_or_series is DataFrame else result
where_res = result.where(col < 5)
where_res *= 2
tm.assert_equal(result, expected)
where_res = result.where(col > 5, [1, 2, 3, 4])
where_res *= 2
tm.assert_equal(result, expected)
def test_where_string_dtype(frame_or_series):
# GH40824
obj = frame_or_series(
["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype()
)
filtered_obj = frame_or_series(
["b", "c"], index=["id2", "id3"], dtype=StringDtype()
)
filter_ser = Series([False, True, True, False])
result = obj.where(filter_ser, filtered_obj)
expected = frame_or_series(
[pd.NA, "b", "c", pd.NA],
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
tm.assert_equal(result, expected)
result = obj.mask(~filter_ser, filtered_obj)
tm.assert_equal(result, expected)
obj.mask(~filter_ser, filtered_obj, inplace=True)
tm.assert_equal(result, expected)
def test_where_bool_comparison():
# GH 10336
df_mask = DataFrame(
{"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]}
)
result = df_mask.where(df_mask == False) # noqa:E712
expected = DataFrame(
{
"AAA": np.array([np.nan] * 4, dtype=object),
"BBB": [False] * 4,
"CCC": [np.nan, False, np.nan, False],
}
)
tm.assert_frame_equal(result, expected)
def test_where_none_nan_coerce():
# GH 15613
expected = DataFrame(
{
"A": [Timestamp("20130101"), pd.NaT, Timestamp("20130103")],
"B": [1, 2, np.nan],
}
)
result = expected.where(expected.notnull(), None)
tm.assert_frame_equal(result, expected)
def test_where_non_keyword_deprecation(frame_or_series):
# GH 41485
obj = frame_or_series(range(5))
msg = (
"In a future version of pandas all arguments of "
f"{frame_or_series.__name__}.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = obj.where(obj > 1, 10, False)
expected = frame_or_series([10, 10, 2, 3, 4])
tm.assert_equal(expected, result)
def test_where_columns_casting():
# GH 42295
df = DataFrame({"a": [1.0, 2.0], "b": [3, np.nan]})
expected = df.copy()
result = df.where(pd.notnull(df), None)
# make sure dtypes don't change
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("as_cat", [True, False])
def test_where_period_invalid_na(frame_or_series, as_cat, request):
# GH#44697
idx = pd.period_range("2016-01-01", periods=3, freq="D")
if as_cat:
idx = idx.astype("category")
obj = frame_or_series(idx)
# NA value that we should *not* cast to Period dtype
tdnat = pd.NaT.to_numpy("m8[ns]")
mask = np.array([True, True, False], ndmin=obj.ndim).T
if as_cat:
msg = (
r"Cannot setitem on a Categorical with a new category \(NaT\), "
"set the categories first"
)
if np_version_under1p19:
mark = pytest.mark.xfail(
reason="When evaluating the f-string to generate the exception "
"message, numpy somehow ends up trying to cast None to int, so "
"ends up raising TypeError but with an unrelated message."
)
request.node.add_marker(mark)
else:
msg = "value should be a 'Period'"
if as_cat:
with pytest.raises(TypeError, match=msg):
obj.where(mask, tdnat)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, tdnat)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, tdnat, inplace=True)
else:
# With PeriodDtype, ser[i] = tdnat coerces instead of raising,
# so for consistency, ser[mask] = tdnat must as well
expected = obj.astype(object).where(mask, tdnat)
result = obj.where(mask, tdnat)
tm.assert_equal(result, expected)
expected = obj.astype(object).mask(mask, tdnat)
result = obj.mask(mask, tdnat)
tm.assert_equal(result, expected)
obj.mask(mask, tdnat, inplace=True)
tm.assert_equal(obj, expected)
def test_where_nullable_invalid_na(frame_or_series, any_numeric_ea_dtype):
# GH#44697
arr = pd.array([1, 2, 3], dtype=any_numeric_ea_dtype)
obj = frame_or_series(arr)
mask = np.array([True, True, False], ndmin=obj.ndim).T
msg = r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}"
for null in tm.NP_NAT_OBJECTS + [pd.NaT]:
# NaT is an NA value that we should *not* cast to pd.NA dtype
with pytest.raises(TypeError, match=msg):
obj.where(mask, null)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, null)
@given(data=OPTIONAL_ONE_OF_ALL)
def test_where_inplace_casting(data):
# GH 22051
df = DataFrame({"a": data})
df_copy = df.where(pd.notnull(df), None).copy()
df.where(pd.notnull(df), None, inplace=True)
tm.assert_equal(df, df_copy)
|
py | b4117f9fce1264d8fd5b460f4f88c027de9a276b | """Celer algorithm to solve the Lasso"""
from .homotopy import celer_path
from .wrapper import celer
from .dropin_sklearn import Lasso, LassoCV
__version__ = '0.3dev'
|
py | b4117fc9f52aa36ca48bf72decce2d43fd834701 | """
一个使用内置的数据类表示数据的 demo
"""
import json
from dataclasses import (
asdict, dataclass, field, fields, is_dataclass
)
# 对于一些嵌套的数据类,需要深度遍历
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if is_dataclass(o):
return asdict(o)
return super().default(o)
def dicts_to_dataclasses(instance):
"""将所有的数据类属性都转化到数据类中"""
cls = type(instance)
for f in fields(cls):
if not is_dataclass(f.type):
continue
value = getattr(instance, f.name)
if not isinstance(value, dict):
continue
new_value = f.type(**value)
setattr(instance, f.name, new_value)
@dataclass
class Base:
def __post_init__(self):
dicts_to_dataclasses(self)
def as_dict(self):
return asdict(self)
def as_json(self):
return json.dumps(self, cls=EnhancedJSONEncoder)
@dataclass
class Cover(Base):
id: str = None
cover_id: str = None
offset_x: str = field(default=None, repr=False)
offset_y: str = field(default=None, repr=False)
source: str = field(default=None, repr=False)
@dataclass
class Page(Base):
id: str = None
about: str = field(default='', repr=False)
birthday: str = field(default=None, repr=False)
name: str = None
username: str = None
fan_count: int = field(default=None, repr=False)
cover: Cover = field(default=None, repr=False)
if __name__ == '__main__':
data = {
"id": "20531316728",
"about": "The Facebook Page celebrates how our friends inspire us, support us, and help us discover the world when we connect.",
"birthday": "02/04/2004",
"name": "Facebook",
"username": "facebookapp",
"fan_count": 214643503,
"cover": {
"cover_id": "10158913960541729",
"offset_x": 50,
"offset_y": 50,
"source": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/73087560_10158913960546729_8876113648821469184_o.jpg?_nc_cat=1&_nc_ohc=bAJ1yh0abN4AQkSOGhMpytya2quC_uS0j0BF-XEVlRlgwTfzkL_F0fojQ&_nc_ht=scontent.xx&oh=2964a1a64b6b474e64b06bdb568684da&oe=5E454425",
"id": "10158913960541729"
}
}
# 数据加载
p = Page(**data)
print(p.name)
print(p)
print(p.cover)
print(p.as_dict())
print(p.as_json())
|
py | b4118014c799dc45bf645cfa79f70293f7e49c11 | from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('user', views.UserViewSet, base_name='user')
router.register('posting', views.PostingViewSet, base_name='posting')
router.register('reposting', views.RepostingViewSet, base_name='reposting')
router.register('category', views.CategoryViewSet, base_name='category')
urlpatterns = router.urls
urlpatterns += [
path('login',views.login),
path('register',views.register),
path('agreement',views.agreement),
path('password',views.password),
path('posting/thumb',views.thumbposting),
path('reposting/thumb',views.thumbreposting),
path('head',views.head),
path('resource',views.resource)
] |
py | b41180ab702ee62ced7dabbebaf071bd6c65251a | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from PIL import Image
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(
f'Type {type(data)} cannot be converted to tensor.'
'Supported types are: `numpy.ndarray`, `torch.Tensor`, '
'`Sequence`, `int` and `float`')
@PIPELINES.register_module()
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToPIL(object):
def __init__(self):
pass
def __call__(self, results):
results['img'] = Image.fromarray(results['img'])
return results
@PIPELINES.register_module()
class ToNumpy(object):
def __init__(self):
pass
def __call__(self, results):
results['img'] = np.array(results['img'], dtype=np.float32)
return results
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img" and "gt_label".
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_shape', 'img_shape', 'flip',
'flip_direction', 'img_norm_cfg')``
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas`` if available
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'flip', 'flip_direction',
'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
if key in results:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists(object):
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapIntoLists')
>>> ]
"""
def __call__(self, results):
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
|
py | b41180e19369a791ff2725df073f13242b6d0136 | import os
import pdb
rootdir = '/net/acadia9a/data/jchoi/data/kinetics/RGB-feature3_i3d'
# rootdir = '/net/acadia9a/data/jchoi/data/kinetics/test_rgb'
listsub_dirs = ['train', 'val']
# train or val
for subdir in listsub_dirs:
list_cls_dirs = os.listdir(os.path.join(rootdir,subdir))
for cur_cls_dir in list_cls_dirs:
cur_path = os.path.join(rootdir,subdir,cur_cls_dir)
print(cur_path)
cmd = 'mv "{}"/* "{}"/../../'.format(cur_path,cur_path)
print(cmd)
os.system(cmd) |
py | b4118243f472e2fd11b0fb902ec12196b8f7f48f | import os
class LEDError(IOError):
"""Base class for LED errors."""
pass
class LED(object):
def __init__(self, name, brightness=None):
"""Instantiate an LED object and open the sysfs LED corresponding to
the specified name.
`brightness` can be a boolean for on/off, integer value for a specific
brightness, or None to preserve existing brightness. Default is
preserve existing brightness.
Args:
name (str): Linux led name.
brightness (bool, int, None): Initial brightness.
Returns:
LED: LED object.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `name` or `brightness` types are invalid.
ValueError: if `brightness` value is invalid.
"""
self._fd = None
self._name = None
self._max_brightness = None
self._open(name, brightness)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def _open(self, name, brightness):
if not isinstance(name, str):
raise TypeError("Invalid name type, should be string.")
if not isinstance(brightness, (bool, int, type(None))):
raise TypeError("Invalid brightness type, should be bool, int, or None.")
led_path = "/sys/class/leds/%s" % name
if not os.path.isdir(led_path):
raise ValueError("LED %s not found!" % name)
# Read max brightness
try:
with open("/sys/class/leds/%s/max_brightness" % name, "r") as f_max_brightness:
max_brightness = int(f_max_brightness.read())
except IOError as e:
raise LEDError(e.errno, "Reading LED max brightness: " + e.strerror)
# Open brightness
try:
self._fd = os.open("/sys/class/leds/%s/brightness" % name, os.O_RDWR)
except OSError as e:
raise LEDError(e.errno, "Opening LED brightness: " + e.strerror)
self._max_brightness = max_brightness
self._name = name
# Set initial brightness
if brightness:
self.write(brightness)
# Methods
def read(self):
"""Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, "Reading LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
return int(buf)
def write(self, brightness):
"""Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
"""
if not isinstance(brightness, (bool, int)):
raise TypeError("Invalid brightness type, should be bool or int.")
if isinstance(brightness, bool):
brightness = self._max_brightness if brightness else 0
else:
if not 0 <= brightness <= self._max_brightness:
raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness)
# Write value
try:
os.write(self._fd, b"%d\n" % brightness)
except OSError as e:
raise LEDError(e.errno, "Writing LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
def close(self):
"""Close the sysfs LED.
Raises:
LEDError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise LEDError(e.errno, "Closing LED: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs LED "brightness"
file of the LED object.
:type: int
"""
return self._fd
@property
def name(self):
"""Get the sysfs LED name.
:type: str
"""
return self._name
@property
def max_brightness(self):
"""Get the LED's max brightness.
:type: int
"""
return self._max_brightness
# Mutable properties
def _get_brightness(self):
# Read brightness
return self.read()
def _set_brightness(self, brightness):
return self.write(brightness)
brightness = property(_get_brightness, _set_brightness)
"""Get or set the LED's brightness.
Value can be a boolean for on/off, or integer value a for specific
brightness.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
ValueError: if `brightness` value is invalid.
:type: int
"""
# String representation
def __str__(self):
return "LED %s (fd=%d, max_brightness=%d)" % (self._name, self._fd, self._max_brightness)
|
py | b41185320650b91632ca7f3391888c1b3ba55049 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
from collections import namedtuple
import json
import threading
from fasteners import InterProcessLock
from functools import lru_cache
import datalad
from datalad.consts import (
DATASET_CONFIG_FILE,
)
from datalad.cmd import (
GitWitlessRunner,
StdOutErrCapture,
)
import re
import os
from pathlib import Path
import logging
lgr = logging.getLogger('datalad.config')
# git-config key syntax with a section and a subsection
# see git-config(1) for syntax details
cfg_k_regex = re.compile(r'([a-zA-Z0-9-.]+\.[^\0\n]+)$', flags=re.MULTILINE)
# identical to the key regex, but with an additional group for a
# value in a null-delimited git-config dump
cfg_kv_regex = re.compile(
r'([a-zA-Z0-9-.]+\.[^\0\n]+)\n(.*)$',
flags=re.MULTILINE | re.DOTALL
)
cfg_section_regex = re.compile(r'(.*)\.[^.]+')
cfg_sectionoption_regex = re.compile(r'(.*)\.([^.]+)')
_where_reload_doc = """
where : {'dataset', 'local', 'global', 'override'}, optional
Indicator which configuration file to modify. 'dataset' indicates the
persistent configuration in .datalad/config of a dataset; 'local'
the configuration of a dataset's Git repository in .git/config;
'global' refers to the general configuration that is not specific to
a single repository (usually in $USER/.gitconfig); 'override'
limits the modification to the ConfigManager instance, and the
assigned value overrides any setting from any other source.
reload : bool
Flag whether to reload the configuration from file(s) after
modification. This can be disable to make multiple sequential
modifications slightly more efficient.""".lstrip()
# Selection of os.stat_result fields we care to collect/compare to judge
# on either file has changed to warrant reload of configuration.
_stat_result = namedtuple('_stat_result', 'st_ino st_size st_ctime st_mtime')
# we cannot import external_versions here, as the cfg comes before anything
# and we would have circular imports
@lru_cache()
def get_git_version(runner=None):
"""Return version of available git"""
runner = runner or GitWitlessRunner()
return runner.run('git version'.split(),
protocol=StdOutErrCapture)['stdout'].split()[2]
def _where_reload(obj):
"""Helper decorator to simplify providing repetitive docstring"""
obj.__doc__ = obj.__doc__ % _where_reload_doc
return obj
def parse_gitconfig_dump(dump, cwd=None, multi_value=True):
"""Parse a dump-string from `git config -z --list`
This parser has limited support for discarding unrelated output
that may contaminate the given dump. It does so performing a
relatively strict matching of configuration key syntax, and discarding
lines in the output that are not valid git-config keys.
There is also built-in support for parsing outputs generated
with --show-origin (see return value).
Parameters
----------
dump : str
Null-byte separated output
cwd : path-like, optional
Use this absolute path to convert relative paths for origin reports
into absolute paths. By default, the process working directory
PWD is used.
multi_value : bool, optional
If True, report values from multiple specifications of the
same key as a tuple of values assigned to this key. Otherwise,
the last configuration is reported.
Returns:
--------
dict, set
Configuration items are returned as key/value pairs in a dictionary.
The second tuple-item will be a set of path objects comprising all
source files, if origin information was included in the dump
(--show-origin). An empty set is returned otherwise.
"""
dct = {}
fileset = set()
for line in dump.split('\0'):
# line is a null-delimited chunk
k = None
# in anticipation of output contamination, process within a loop
# where we can reject non syntax compliant pieces
while line:
if line.startswith('file:'):
# origin line
fname = Path(line[5:])
if not fname.is_absolute():
fname = Path(cwd) / fname if cwd else Path.cwd() / fname
fileset.add(fname)
break
if line.startswith('command line:'):
# no origin that we could as a pathobj
break
# try getting key/value pair from the present chunk
k, v = _gitcfg_rec_to_keyvalue(line)
if k is not None:
# we are done with this chunk when there is a good key
break
# discard the first line and start over
ignore, line = line.split('\n', maxsplit=1)
lgr.debug('Non-standard git-config output, ignoring: %s', ignore)
if not k:
# nothing else to log, all ignored dump was reported before
continue
# multi-value reporting
present_v = dct.get(k, None)
if present_v is None or not multi_value:
dct[k] = v
else:
if isinstance(present_v, tuple):
dct[k] = present_v + (v,)
else:
dct[k] = (present_v, v)
return dct, fileset
# keep alias with previous name for now
_parse_gitconfig_dump = parse_gitconfig_dump
def _gitcfg_rec_to_keyvalue(rec):
"""Helper for parse_gitconfig_dump()
Parameters
----------
rec: str
Key/value specification string
Returns
-------
str, str
Parsed key and value. Key and/or value could be None
if not snytax-compliant (former) or absent (latter).
"""
kv_match = cfg_kv_regex.match(rec)
if kv_match:
k, v = kv_match.groups()
elif cfg_k_regex.match(rec):
# could be just a key without = value, which git treats as True
# if asked for a bool
k, v = rec, None
else:
# no value, no good key
k = v = None
return k, v
def _update_from_env(store):
overrides = {}
dct = {}
for k in os.environ:
if k == "DATALAD_CONFIG_OVERRIDES_JSON":
try:
overrides = json.loads(os.environ[k])
except json.decoder.JSONDecodeError as exc:
lgr.warning("Failed to load DATALAD_CONFIG_OVERRIDES_JSON: %s",
exc)
elif k.startswith('DATALAD_'):
dct[k.replace('__', '-').replace('_', '.').lower()] = os.environ[k]
if overrides:
store.update(overrides)
store.update(dct)
def anything2bool(val):
if hasattr(val, 'lower'):
val = val.lower()
if val in {"off", "no", "false", "0"} or not bool(val):
return False
elif val in {"on", "yes", "true", True} \
or (hasattr(val, 'isdigit') and val.isdigit() and int(val)) \
or isinstance(val, int) and val:
return True
else:
raise TypeError(
"Got value %s which could not be interpreted as a boolean"
% repr(val))
class ConfigManager(object):
"""Thin wrapper around `git-config` with support for a dataset configuration.
The general idea is to have an object that is primarily used to read/query
configuration option. Upon creation, current configuration is read via one
(or max two, in the case of the presence of dataset-specific configuration)
calls to `git config`. If this class is initialized with a Dataset
instance, it supports reading and writing configuration from
``.datalad/config`` inside a dataset too. This file is committed to Git and
hence useful to ship certain configuration items with a dataset.
The API aims to provide the most significant read-access API of a
dictionary, the Python ConfigParser, and GitPython's config parser
implementations.
This class is presently not capable of efficiently writing multiple
configurations items at once. Instead, each modification results in a
dedicated call to `git config`. This author thinks this is OK, as he
cannot think of a situation where a large number of items need to be
written during normal operation.
Each instance carries a public `overrides` attribute. This dictionary
contains variables that override any setting read from a file. The overrides
are persistent across reloads.
Any DATALAD_* environment variable is also presented as a configuration
item. Settings read from environment variables are not stored in any of the
configuration files, but are read dynamically from the environment at each
`reload()` call. Their values take precedence over any specification in
configuration files, and even overrides.
Parameters
----------
dataset : Dataset, optional
If provided, all `git config` calls are executed in this dataset's
directory. Moreover, any modifications are, by default, directed to
this dataset's configuration file (which will be created on demand)
overrides : dict, optional
Variable overrides, see general class documentation for details.
source : {'any', 'local', 'dataset', 'dataset-local'}, optional
Which sources of configuration setting to consider. If 'dataset',
configuration items are only read from a dataset's persistent
configuration file, if any is present (the one in ``.datalad/config``, not
``.git/config``); if 'local', any non-committed source is considered
(local and global configuration in Git config's terminology);
if 'dataset-local', persistent dataset configuration and local, but
not global or system configuration are considered; if 'any'
all possible sources of configuration are considered.
"""
_checked_git_identity = False
# Lock for running changing operation across multiple threads.
# Since config itself to the same path could
# potentially be created independently in multiple threads, and we might be
# modifying global config as well, making lock static should not allow more than
# one thread to write at a time, even if to different repositories.
_run_lock = threading.Lock()
def __init__(self, dataset=None, overrides=None, source='any'):
if source not in ('any', 'local', 'dataset', 'dataset-local'):
raise ValueError(
'Unknown ConfigManager(source=) setting: {}'.format(source))
store = dict(
# store in a simple dict
# no subclassing, because we want to be largely read-only, and implement
# config writing separately
cfg={},
# track the files that jointly make up the config in this store
files=set(),
# and their modification times to be able to avoid needless unforced reloads
stats=None,
)
self._stores = dict(
# populated with info from git
git=store,
# only populated with info from committed dataset config
dataset=store.copy(),
)
# merged representation (the only one that existed pre datalad 0.14)
# will be built on initial reload
self._merged_store = {}
self._repo_dot_git = None
self._repo_pathobj = None
if dataset:
if hasattr(dataset, 'dot_git'):
self._repo_dot_git = dataset.dot_git
self._repo_pathobj = dataset.pathobj
elif dataset.repo:
self._repo_dot_git = dataset.repo.dot_git
self._repo_pathobj = dataset.repo.pathobj
self._config_cmd = ['git', 'config']
# public dict to store variables that always override any setting
# read from a file
# `hasattr()` is needed because `datalad.cfg` is generated upon first module
# import, hence when this code runs first, there cannot be any config manager
# to inherit from
self.overrides = datalad.cfg.overrides.copy() if hasattr(datalad, 'cfg') else {}
if overrides is not None:
self.overrides.update(overrides)
if dataset is None:
if source in ('dataset', 'dataset-local'):
raise ValueError(
'ConfigManager configured to read dataset only, '
'but no dataset given')
# The caller didn't specify a repository. Unset the git directory
# when calling 'git config' to prevent a repository in the current
# working directory from leaking configuration into the output.
self._config_cmd = ['git', '--git-dir=', 'config']
self._src_mode = source
run_kwargs = dict()
self._runner = None
if dataset is not None:
if hasattr(dataset, '_git_runner'):
self._runner = dataset._git_runner
elif dataset.repo:
self._runner = dataset.repo._git_runner
else:
# make sure we run the git config calls in the dataset
# to pick up the right config files
run_kwargs['cwd'] = dataset.path
if self._runner is None:
self._runner = GitWitlessRunner(**run_kwargs)
self.reload(force=True)
if not ConfigManager._checked_git_identity:
for cfg, envs in (
('user.name', ('GIT_AUTHOR_NAME', 'GIT_COMMITTER_NAME')),
('user.email', ('GIT_AUTHOR_EMAIL', 'GIT_COMMITTER_EMAIL'))):
if cfg not in self \
and not any(e in os.environ for e in envs):
lgr.warning(
"It is highly recommended to configure Git before using "
"DataLad. Set both 'user.name' and 'user.email' "
"configuration variables."
)
ConfigManager._checked_git_identity = True
def reload(self, force=False):
"""Reload all configuration items from the configured sources
If `force` is False, all files configuration was previously read from
are checked for differences in the modification times. If no difference
is found for any file no reload is performed. This mechanism will not
detect newly created global configuration files, use `force` in this case.
"""
run_args = ['-z', '-l', '--show-origin']
# update from desired config sources only
# 2-step strategy:
# - load datalad dataset config from dataset
# - load git config from all supported by git sources
# in doing so we always stay compatible with where Git gets its
# config from, but also allow to override persistent information
# from dataset locally or globally
# figure out what needs to be reloaded at all
to_run = {}
# committed dataset config
dataset_cfgfile = self._repo_pathobj / DATASET_CONFIG_FILE \
if self._repo_pathobj else None
if (self._src_mode != 'local' and
dataset_cfgfile and
dataset_cfgfile.exists()) and (
force or self._need_reload(self._stores['dataset'])):
to_run['dataset'] = run_args + ['--file', str(dataset_cfgfile)]
if self._src_mode != 'dataset' and (
force or self._need_reload(self._stores['git'])):
to_run['git'] = run_args + ['--local'] \
if self._src_mode == 'dataset-local' \
else run_args
# reload everything that was found todo
while to_run:
store_id, runargs = to_run.popitem()
self._stores[store_id] = self._reload(runargs)
# always update the merged representation, even if we did not reload
# anything from a file. ENV or overrides could change independently
# start with the commit dataset config
merged = self._stores['dataset']['cfg'].copy()
# local config always takes precedence
merged.update(self._stores['git']['cfg'])
# superimpose overrides
merged.update(self.overrides)
# override with environment variables, unless we only want to read the
# dataset's commit config
if self._src_mode != 'dataset':
_update_from_env(merged)
self._merged_store = merged
def _need_reload(self, store):
storestats = store['stats']
if not storestats:
return True
# we have read files before
# check if any file we read from has changed
curstats = self._get_stats(store)
return any(curstats[f] != storestats[f] for f in store['files'])
def _reload(self, run_args):
# query git-config
stdout, stderr = self._run(
run_args,
protocol=StdOutErrCapture,
# always expect git-config to output utf-8
encoding='utf-8',
)
store = {}
store['cfg'], store['files'] = parse_gitconfig_dump(
stdout, cwd=self._runner.cwd)
# update stats of config files, they have just been discovered
# and should still exist
store['stats'] = self._get_stats(store)
return store
@staticmethod
def _get_stats(store):
stats = {}
for f in store['files']:
if f.exists:
stat = f.stat()
stats[f] = _stat_result(stat.st_ino, stat.st_size, stat.st_ctime, stat.st_mtime)
else:
stats[f] = None
return stats
@_where_reload
def obtain(self, var, default=None, dialog_type=None, valtype=None,
store=False, where=None, reload=True, **kwargs):
"""
Convenience method to obtain settings interactively, if needed
A UI will be used to ask for user input in interactive sessions.
Questions to ask, and additional explanations can be passed directly
as arguments, or retrieved from a list of pre-configured items.
Additionally, this method allows for type conversion and storage
of obtained settings. Both aspects can also be pre-configured.
Parameters
----------
var : str
Variable name including any section like `git config` expects them,
e.g. 'core.editor'
default : any type
In interactive sessions and if `store` is True, this default value
will be presented to the user for confirmation (or modification).
In all other cases, this value will be silently assigned unless
there is an existing configuration setting.
dialog_type : {'question', 'yesno', None}
Which dialog type to use in interactive sessions. If `None`,
pre-configured UI options are used.
store : bool
Whether to store the obtained value (or default)
%s
`**kwargs`
Additional arguments for the UI function call, such as a question
`text`.
"""
# do local import, as this module is import prominently and the
# could theoretically import all kind of weird things for type
# conversion
from datalad.interface.common_cfg import definitions as cfg_defs
# fetch what we know about this variable
cdef = cfg_defs.get(var, {})
# type conversion setup
if valtype is None and 'type' in cdef:
valtype = cdef['type']
if valtype is None:
valtype = lambda x: x
# any default?
if default is None and 'default' in cdef:
default = cdef['default']
_value = None
if var in self:
# nothing needs to be obtained, it is all here already
_value = self[var]
elif store is False and default is not None:
# nothing will be stored, and we have a default -> no user confirmation
# we cannot use logging, because we want to use the config to confiugre
# the logging
#lgr.debug('using default {} for config setting {}'.format(default, var))
_value = default
if _value is not None:
# we got everything we need and can exit early
try:
return valtype(_value)
except Exception as e:
raise ValueError(
"value '{}' of existing configuration for '{}' cannot be "
"converted to the desired type '{}' ({})".format(
_value, var, valtype, e)) from e
# now we need to try to obtain something from the user
from datalad.ui import ui
# configure UI
dialog_opts = kwargs
if dialog_type is None: # no override
# check for common knowledge on how to obtain a value
if 'ui' in cdef:
dialog_type = cdef['ui'][0]
# pull standard dialog settings
dialog_opts = cdef['ui'][1]
# update with input
dialog_opts.update(kwargs)
if (not ui.is_interactive or dialog_type is None) and default is None:
raise RuntimeError(
"cannot obtain value for configuration item '{}', "
"not preconfigured, no default, no UI available".format(var))
if not hasattr(ui, dialog_type):
raise ValueError("UI '{}' does not support dialog type '{}'".format(
ui, dialog_type))
# configure storage destination, if needed
if store:
if where is None and 'destination' in cdef:
where = cdef['destination']
if where is None:
raise ValueError(
"request to store configuration item '{}', but no "
"storage destination specified".format(var))
# obtain via UI
dialog = getattr(ui, dialog_type)
_value = dialog(default=default, **dialog_opts)
if _value is None:
# we got nothing
if default is None:
raise RuntimeError(
"could not obtain value for configuration item '{}', "
"not preconfigured, no default".format(var))
# XXX maybe we should return default here, even it was returned
# from the UI -- if that is even possible
# execute type conversion before storing to check that we got
# something that looks like what we want
try:
value = valtype(_value)
except Exception as e:
raise ValueError(
"cannot convert user input `{}` to desired type ({})".format(
_value, e)) from e
# XXX we could consider "looping" until we have a value of proper
# type in case of a user typo...
if store:
# store value as it was before any conversion, needs to be str
# anyway
# needs string conversion nevertheless, because default could come
# in as something else
self.add(var, '{}'.format(_value), where=where, reload=reload)
return value
def __repr__(self):
# give full list of all tracked config files, plus overrides
return "ConfigManager({}{})".format(
[str(p) for p in self._stores['dataset']['files'].union(
self._stores['git']['files'])],
', overrides={!r}'.format(self.overrides) if self.overrides else '',
)
def __str__(self):
# give path of dataset, if there is any, plus overrides
return "ConfigManager({}{})".format(
self._repo_pathobj if self._repo_pathobj else '',
'with overrides' if self.overrides else '',
)
#
# Compatibility with dict API
#
def __len__(self):
return len(self._merged_store)
def __getitem__(self, key):
return self._merged_store.__getitem__(key)
def __contains__(self, key):
return self._merged_store.__contains__(key)
def keys(self):
"""Returns list of configuration item names"""
return self._merged_store.keys()
# XXX should this be *args?
def get(self, key, default=None, get_all=False):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
Parameters
----------
default : optional
Value to return when key is not present. `None` by default.
get_all : bool, optional
If True, return all values of multiple identical configuration keys.
By default only the last specified value is returned.
"""
try:
val = self[key]
if get_all or not isinstance(val, tuple):
return val
else:
return val[-1]
except KeyError:
# return as-is, default could be a tuple, hence do not subject to
# get_all processing
return default
def get_from_source(self, source, key, default=None):
"""Like get(), but a source can be specific.
If `source` is 'dataset', only the committed configuration is queried,
overrides are applied. In the case of 'local', the committed
configuration is ignored, but overrides and configuration from
environment variables are applied as usual.
"""
if source not in ('dataset', 'local'):
raise ValueError("source must be 'dataset' or 'local'")
if source == 'dataset':
return self.overrides.get(
key,
self._stores['dataset']['cfg'].get(
key,
default))
else:
if key not in self._stores['dataset']['cfg']:
# the key is not in the committed config, hence we can
# just report based on the merged representation
return self.get(key, default)
else:
# expensive case, rebuild a config without the committed
# dataset config contributing
env = {}
_update_from_env(env)
return env.get(
key,
self.overrides.get(
key,
self._stores['local']['cfg'].get(
key,
default)))
#
# Compatibility with ConfigParser API
#
def sections(self):
"""Returns a list of the sections available"""
return list(set([cfg_section_regex.match(k).group(1) for k in self._merged_store]))
def options(self, section):
"""Returns a list of options available in the specified section."""
opts = []
for k in self._merged_store:
sec, opt = cfg_sectionoption_regex.match(k).groups()
if sec == section:
opts.append(opt)
return opts
def has_section(self, section):
"""Indicates whether a section is present in the configuration"""
for k in self._merged_store:
if k.startswith(section):
return True
return False
def has_option(self, section, option):
"""If the given section exists, and contains the given option"""
for k in self._merged_store:
sec, opt = cfg_sectionoption_regex.match(k).groups()
if sec == section and opt == option:
return True
return False
def _get_type(self, typefn, section, option):
key = '.'.join([section, option])
# Mimic the handling of get_value(..., default=None), while still going
# through get() in order to get its default tuple handling.
if key not in self:
raise KeyError(key)
return typefn(self.get(key))
def getint(self, section, option):
"""A convenience method which coerces the option value to an integer"""
return self._get_type(int, section, option)
def getfloat(self, section, option):
"""A convenience method which coerces the option value to a float"""
return self._get_type(float, section, option)
def getbool(self, section, option, default=None):
"""A convenience method which coerces the option value to a bool
Values "on", "yes", "true" and any int!=0 are considered True
Values which evaluate to bool False, "off", "no", "false" are considered
False
TypeError is raised for other values.
"""
key = '.'.join([section, option])
# Mimic the handling of get_value(..., default=None), while still going
# through get() in order to get its default tuple handling.
if default is None and key not in self:
raise KeyError(key)
val = self.get(key, default=default)
if val is None: # no value at all, git treats it as True
return True
return anything2bool(val)
# this is a hybrid of ConfigParser and dict API
def items(self, section=None):
"""Return a list of (name, value) pairs for each option
Optionally limited to a given section.
"""
if section is None:
return self._merged_store.items()
return [(k, v) for k, v in self._merged_store.items()
if cfg_section_regex.match(k).group(1) == section]
#
# Compatibility with GitPython's ConfigParser
#
def get_value(self, section, option, default=None):
"""Like `get()`, but with an optional default value
If the default is not None, the given default value will be returned in
case the option did not exist. This behavior imitates GitPython's
config parser.
"""
try:
return self['.'.join((section, option))]
except KeyError as e:
# this strange dance is needed because gitpython does it this way
if default is not None:
return default
else:
raise e
#
# Modify configuration (proxy respective git-config call)
#
@_where_reload
def _run(self, args, where=None, reload=False, **kwargs):
"""Centralized helper to run "git config" calls
Parameters
----------
args : list
Arguments to pass for git config
%s
**kwargs
Keywords arguments for Runner's call
"""
if where:
args = self._get_location_args(where) + args
if '-l' in args:
# we are just reading, no need to reload, no need to lock
out = self._runner.run(self._config_cmd + args, **kwargs)
return out['stdout'], out['stderr']
# all other calls are modifications
if '--file' in args:
# all paths we are passing are absolute
custom_file = Path(args[args.index('--file') + 1])
custom_file.parent.mkdir(exist_ok=True)
lockfile = None
if self._repo_dot_git and ('--local' in args or '--file' in args):
# modification of config in a dataset
lockfile = self._repo_dot_git / 'config.dataladlock'
else:
# follow pattern in downloaders for lockfile location
lockfile = Path(self.obtain('datalad.locations.locks')) \
/ 'gitconfig.lck'
with ConfigManager._run_lock, InterProcessLock(lockfile, logger=lgr):
out = self._runner.run(self._config_cmd + args, **kwargs)
if reload:
self.reload()
return out['stdout'], out['stderr']
def _get_location_args(self, where, args=None):
if args is None:
args = []
cfg_labels = ('dataset', 'local', 'global', 'override')
if where not in cfg_labels:
raise ValueError(
"unknown configuration label '{}' (not in {})".format(
where, cfg_labels))
if where == 'dataset':
if not self._repo_pathobj:
raise ValueError(
'ConfigManager cannot store configuration to dataset, '
'none specified')
dataset_cfgfile = self._repo_pathobj / DATASET_CONFIG_FILE
args.extend(['--file', str(dataset_cfgfile)])
elif where == 'global':
args.append('--global')
elif where == 'local':
args.append('--local')
return args
@_where_reload
def add(self, var, value, where='dataset', reload=True):
"""Add a configuration variable and value
Parameters
----------
var : str
Variable name including any section like `git config` expects them, e.g.
'core.editor'
value : str
Variable value
%s"""
if where == 'override':
from datalad.utils import ensure_list
val = ensure_list(self.overrides.pop(var, None))
val.append(value)
self.overrides[var] = val[0] if len(val) == 1 else val
if reload:
self.reload(force=True)
return
self._run(['--add', var, value], where=where, reload=reload,
protocol=StdOutErrCapture)
@_where_reload
def set(self, var, value, where='dataset', reload=True, force=False):
"""Set a variable to a value.
In opposition to `add`, this replaces the value of `var` if there is
one already.
Parameters
----------
var : str
Variable name including any section like `git config` expects them, e.g.
'core.editor'
value : str
Variable value
force: bool
if set, replaces all occurrences of `var` by a single one with the
given `value`. Otherwise raise if multiple entries for `var` exist
already
%s"""
if where == 'override':
self.overrides[var] = value
if reload:
self.reload(force=True)
return
from datalad.support.gitrepo import to_options
self._run(to_options(replace_all=force) + [var, value],
where=where, reload=reload, protocol=StdOutErrCapture)
@_where_reload
def rename_section(self, old, new, where='dataset', reload=True):
"""Rename a configuration section
Parameters
----------
old : str
Name of the section to rename.
new : str
Name of the section to rename to.
%s"""
if where == 'override':
self.overrides = {
(new + k[len(old):]) if k.startswith(old + '.') else k: v
for k, v in self.overrides.items()
}
if reload:
self.reload(force=True)
return
self._run(['--rename-section', old, new], where=where, reload=reload)
@_where_reload
def remove_section(self, sec, where='dataset', reload=True):
"""Rename a configuration section
Parameters
----------
sec : str
Name of the section to remove.
%s"""
if where == 'override':
self.overrides = {
k: v
for k, v in self.overrides.items()
if not k.startswith(sec + '.')
}
if reload:
self.reload(force=True)
return
self._run(['--remove-section', sec], where=where, reload=reload)
@_where_reload
def unset(self, var, where='dataset', reload=True):
"""Remove all occurrences of a variable
Parameters
----------
var : str
Name of the variable to remove
%s"""
if where == 'override':
self.overrides.pop(var, None)
if reload:
self.reload(force=True)
return
# use unset all as it is simpler for now
self._run(['--unset-all', var], where=where, reload=reload)
def rewrite_url(cfg, url):
"""Any matching 'url.<base>.insteadOf' configuration is applied
Any URL that starts with such a configuration will be rewritten
to start, instead, with <base>. When more than one insteadOf
strings match a given URL, the longest match is used.
Parameters
----------
cfg : ConfigManager or dict
dict-like with configuration variable name/value-pairs.
url : str
URL to be rewritten, if matching configuration is found.
Returns
-------
str
Rewritten or unmodified URL.
"""
insteadof = {
# only leave the base url
k[4:-10]: v
for k, v in cfg.items()
if k.startswith('url.') and k.endswith('.insteadof')
}
# all config that applies
matches = {
key: v
for key, val in insteadof.items()
for v in (val if isinstance(val, tuple) else (val,))
if url.startswith(v)
}
# find longest match, like Git does
if matches:
rewrite_base, match = sorted(
matches.items(),
key=lambda x: len(x[1]),
reverse=True,
)[0]
if sum(match == v for v in matches.values()) > 1:
lgr.warning(
"Ignoring URL rewrite configuration for '%s', "
"multiple conflicting definitions exists: %s",
match,
['url.{}.insteadof'.format(k)
for k, v in matches.items()
if v == match]
)
else:
url = '{}{}'.format(rewrite_base, url[len(match):])
return url
# for convenience, bind to class too
ConfigManager.rewrite_url = rewrite_url
#
# Helpers for bypassing git-config when _writing_ config items,
# mostly useful when a large number of changes needs to be made
# and directly file manipulation without a safety net is worth
# the risk for performance reasons.
#
def quote_config(v):
"""Helper to perform minimal quoting of config keys/value parts
Parameters
----------
v : str
To-be-quoted string
"""
white = (' ', '\t')
# backslashes need to be quoted in any case
v = v.replace('\\', '\\\\')
# must not have additional unquoted quotes
v = v.replace('"', '\\"')
if v[0] in white or v[-1] in white:
# quoting the value due to leading/trailing whitespace
v = '"{}"'.format(v)
return v
def write_config_section(fobj, suite, name, props):
"""Write a config section with (multiple) settings.
Parameters
----------
fobj : File
Opened target file
suite : str
First item of the section name, e.g. 'submodule', or
'datalad'
name : str
Remainder of the section name
props : dict
Keys are configuration setting names within the section
context (i.e. not duplicating `suite` and/or `name`, values
are configuration setting values.
"""
fmt = '[{_suite_} {_q_}{_name_}{_q_}]\n'
for p in props:
fmt += '\t{p} = {{{p}}}\n'.format(p=p)
quoted_name = quote_config(name)
fobj.write(
fmt.format(
_suite_=suite,
_q_='' if quoted_name.startswith('"') else '"',
_name_=quoted_name,
**{k: quote_config(v) for k, v in props.items()}))
|
py | b411853af7f991a71fc5cf8d81f4632b412db5e7 | """Add modulemd column to ModuleBuild
Revision ID: 9c3fbfc5f9d2
Revises: dbd98058e57f
Create Date: 2019-03-15 19:42:59.987965+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9c3fbfc5f9d2'
down_revision = 'dbd98058e57f'
branch_labels = None
depends_on = None
def upgrade():
op.execute('DELETE from flatpak_update_builds')
op.execute('DELETE from flatpak_updates')
op.execute('DELETE from flatpak_build_package_builds')
op.execute('DELETE from flatpak_build_module_builds')
op.execute('DELETE from flatpak_builds')
op.execute('DELETE from module_build_package_builds')
op.execute('DELETE from module_builds')
op.execute('DELETE from update_cache_items WHERE content_type = "flatpak"')
op.execute('DELETE from build_cache_items')
with op.batch_alter_table('module_builds', recreate='always') as batch_op:
batch_op.add_column(sa.Column('modulemd', sa.String(), nullable=False))
def downgrade():
with op.batch_alter_table('module_builds', recreate='always') as batch_op:
batch_op.drop_column('modulemd')
|
py | b411858f917d1ec697f139403a4cdf4ae38d6b03 | # -*- coding: utf-8 -*-
import re
from ._compatibility.builtins import *
from ._compatibility import abc
from ._utils import regex_types
from .difference import BaseDifference
class MatcherBase(abc.ABC):
"""Base class for objects that implement rich predicate matching."""
@abc.abstractmethod
def __repr__(self):
return super(MatcherBase, self).__repr__()
class MatcherObject(MatcherBase):
"""Wrapper to call *function* when evaluating the '==' operator."""
def __init__(self, function, repr_string):
self._func = function
self._repr = repr_string
def __eq__(self, other):
return self._func(other)
def __ne__(self, other): # <- For Python 2.x compatibility.
return not self.__eq__(other)
def __repr__(self):
return self._repr
class MatcherTuple(MatcherBase, tuple):
"""Wrapper to mark tuples that contain one or more MatcherObject
instances.
"""
pass
def _check_type(type_, value):
"""Return true if *value* is an instance of the specified type
or if *value* is the specified type.
"""
return value is type_ or isinstance(value, type_)
def _check_callable(func, value):
"""Return true if func(value) returns is true or if *func* is
*value*.
"""
return value is func or func(value)
def _check_wildcard(value):
"""Always returns true."""
return True
def _check_truthy(value):
"""Return true if *value* is truthy."""
return bool(value)
def _check_falsy(value):
"""Return true if *value* is falsy."""
return not bool(value)
def _check_regex(regex, value):
"""Return true if *value* matches regex."""
try:
return regex.search(value) is not None
except TypeError:
if value is regex:
return True # <- EXIT!
value_repr = repr(value)
if len(value_repr) > 45:
value_repr = value_repr[:42] + '...'
msg = 'expected string or bytes-like object, got {0}: {1}'
exc = TypeError(msg.format(value.__class__.__name__, value_repr))
exc.__cause__ = None
raise exc
def _check_set(set_, value):
"""Return true if *value* is a member of the given set or if
the *value* is equal to the given set."""
return value in set_ or value == set_
def _get_matcher_parts(obj):
"""Return a 2-tuple containing a handler function (to check for
matches) and a string (to use for displaying a user-readable
value). Return None if *obj* can be matched with the "==" operator
and requires no other special handling.
"""
if isinstance(obj, type):
pred_handler = lambda x: _check_type(obj, x)
repr_string = getattr(obj, '__name__', repr(obj))
elif callable(obj):
pred_handler = lambda x: _check_callable(obj, x)
repr_string = getattr(obj, '__name__', repr(obj))
elif obj is Ellipsis:
pred_handler = _check_wildcard # <- Matches everything.
repr_string = '...'
elif obj is True:
pred_handler = _check_truthy
repr_string = 'True'
elif obj is False:
pred_handler = _check_falsy
repr_string = 'False'
elif isinstance(obj, regex_types):
pred_handler = lambda x: _check_regex(obj, x)
repr_string = 're.compile({0!r})'.format(obj.pattern)
elif isinstance(obj, set):
pred_handler = lambda x: _check_set(obj, x)
repr_string = repr(obj)
else:
return None
return pred_handler, repr_string
def _get_matcher_or_original(obj):
parts = _get_matcher_parts(obj)
if parts:
return MatcherObject(*parts)
return obj
def get_matcher(obj):
"""Return an object suitable for comparing against other objects
using the "==" operator.
If special comparison handling is implemented, a MatcherObject or
MatcherTuple will be returned. If the object is already suitable
for this purpose, the original object will be returned unchanged.
"""
if isinstance(obj, MatcherBase):
return obj # <- EXIT!
if isinstance(obj, Predicate):
return obj.matcher # <- EXIT!
if isinstance(obj, tuple):
matcher = tuple(_get_matcher_or_original(x) for x in obj)
for x in matcher:
if isinstance(x, MatcherBase):
return MatcherTuple(matcher) # <- Wrapper.
return obj # <- Orignal reference.
return _get_matcher_or_original(obj)
class Predicate(object):
"""A Predicate is used like a function of one argument that
returns ``True`` when applied to a matching value and ``False``
when applied to a non-matching value. The criteria for matching
is determined by the *obj* type used to define the predicate:
+-------------------------+-----------------------------------+
| *obj* type | matches when |
+=========================+===================================+
| set | value is a member of the set |
+-------------------------+-----------------------------------+
| function | the result of ``function(value)`` |
| | tests as True |
+-------------------------+-----------------------------------+
| type | value is an instance of the type |
+-------------------------+-----------------------------------+
| ``re.compile(pattern)`` | value matches the regular |
| | expression pattern |
+-------------------------+-----------------------------------+
| str or non-container | value is equal to the object |
+-------------------------+-----------------------------------+
| tuple of predicates | tuple of values satisfies |
| | corresponding tuple of |
| | predicates---each according |
| | to their type |
+-------------------------+-----------------------------------+
| ``True`` | ``bool(value)`` returns True |
| | (value is truthy) |
+-------------------------+-----------------------------------+
| ``False`` | ``bool(value)`` returns False |
| | (value is falsy) |
+-------------------------+-----------------------------------+
| ``...`` (Ellipsis | (used as a wildcard, matches |
| literal) | any value) |
+-------------------------+-----------------------------------+
Example matches:
+---------------------------+----------------+---------+
| *obj* example | value | matches |
+===========================+================+=========+
| .. code-block:: python | ``'A'`` | Yes |
| +----------------+---------+
| {'A', 'B'} | ``'C'`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``4`` | Yes |
| +----------------+---------+
| def iseven(x): | ``9`` | No |
| return x % 2 == 0 | | |
+---------------------------+----------------+---------+
| .. code-block:: python | ``1.0`` | Yes |
| +----------------+---------+
| float | ``1`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``'bake'`` | Yes |
| +----------------+---------+
| re.compile('[bc]ake') | ``'cake'`` | Yes |
| +----------------+---------+
| | ``'fake'`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``'foo'`` | Yes |
| +----------------+---------+
| 'foo' | ``'bar'`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``'x'`` | Yes |
| +----------------+---------+
| True | ``''`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``''`` | Yes |
| +----------------+---------+
| False | ``'x'`` | No |
+---------------------------+----------------+---------+
| .. code-block:: python | ``('A', 'X')`` | Yes |
| +----------------+---------+
| ('A', ...) | ``('A', 'Y')`` | Yes |
| +----------------+---------+
| Uses ellipsis wildcard. | ``('B', 'X')`` | No |
+---------------------------+----------------+---------+
Example code::
>>> pred = Predicate({'A', 'B'})
>>> pred('A')
True
>>> pred('C')
False
Predicate matching behavior can also be inverted with the inversion
operator (``~``). Inverted Predicates return ``False`` when applied
to a matching value and ``True`` when applied to a non-matching
value::
>>> pred = ~Predicate({'A', 'B'})
>>> pred('A')
False
>>> pred('C')
True
"""
def __init__(self, obj):
if isinstance(obj, Predicate):
self.obj = obj.obj
self.matcher = obj.matcher
self._inverted = obj._inverted
else:
self.obj = obj
self.matcher = get_matcher(obj)
self._inverted = False
def __call__(self, other):
if self._inverted:
return not self.matcher == other # <- Do not change to "!=".
return self.matcher == other
def __invert__(self):
new_pred = self.__class__(self)
new_pred._inverted = not self._inverted
return new_pred
def __repr__(self):
cls_name = self.__class__.__name__
inverted = '~' if self._inverted else ''
return '{0}{1}({2})'.format(inverted, cls_name, repr(self.matcher))
def __str__(self):
inverted = 'not ' if self._inverted else ''
return '{0}{1}'.format(inverted, repr(self.matcher))
|
py | b41186ae0c1a9678ad9275bb200ba40582beb9d0 | # Copyright 2016 IBM, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
etcd models
"""
import etcd
from oslo_serialization import jsonutils as json
from zun.common import exception
import zun.db.etcd as db
from zun import objects
class Base(object):
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key):
return getattr(self, key)
def etcd_path(self, sub_path):
return self.path + '/' + sub_path
def as_dict(self):
d = {}
for f in self._fields:
d[f] = getattr(self, f, None)
return d
def path_already_exist(self, client, path):
try:
client.read(path)
except etcd.EtcdKeyNotFound:
return False
return True
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.items():
setattr(self, k, v)
def save(self, session=None):
if session is None:
session = db.api.get_backend()
client = session.client
path = self.etcd_path(self.uuid)
if self.path_already_exist(client, path):
raise exception.ResourceExists(name=getattr(self, '__class__'))
client.write(path, json.dump_as_bytes(self.as_dict()))
return
def items(self):
"""Make the model object behave like a dict."""
return self.as_dict().items()
def iteritems(self):
"""Make the model object behave like a dict."""
return self.as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.iteritems()]
class ZunService(Base):
"""Represents health status of various zun services"""
_path = '/zun_services'
_fields = objects.ZunService.fields.keys()
def __init__(self, service_data):
self.path = ZunService.path()
for f in ZunService.fields():
setattr(self, f, None)
self.id = 1
self.disabled = False
self.forced_down = False
self.report_count = 0
self.update(service_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
def save(self, session=None):
if session is None:
session = db.api.get_backend()
client = session.client
path = self.etcd_path(self.host + '_' + self.binary)
if self.path_already_exist(client, path):
raise exception.ZunServiceAlreadyExists(host=self.host,
binary=self.binary)
client.write(path, json.dump_as_bytes(self.as_dict()))
return
class Container(Base):
"""Represents a container."""
_path = '/containers'
_fields = objects.Container.fields.keys()
def __init__(self, container_data):
self.path = Container.path()
for f in Container.fields():
setattr(self, f, None)
self.id = 1
self.disk = 0
self.auto_remove = False
self.interactive = False
self.auto_heal = False
self.privileged = False
self.update(container_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class Image(Base):
"""Represents a container image."""
_path = '/images'
_fields = objects.Image.fields.keys()
def __init__(self, image_data):
self.path = Image.path()
for f in Image.fields():
setattr(self, f, None)
self.id = 1
self.update(image_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class ResourceClass(Base):
"""Represents a resource class."""
_path = '/resource_classes'
_fields = objects.ResourceClass.fields.keys()
def __init__(self, resource_class_data):
self.path = ResourceClass.path()
for f in ResourceClass.fields():
setattr(self, f, None)
self.id = 1
self.update(resource_class_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class Capsule(Base):
"""Represents a capsule."""
_path = '/capsules'
_fields = objects.Capsule.fields.keys()
def __init__(self, capsule_data):
self.path = Capsule.path()
for f in Capsule.fields():
setattr(self, f, None)
self.id = 1
self.update(capsule_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class ComputeNode(Base):
"""Represents a compute node. """
_path = '/compute_nodes'
# NOTE(kiennt): Use list(fields) instead of fields.keys()
# because in Python 3, the dict.keys() method
# returns a dictionary view object, which acts
# as a set. To do the replacement, _fields should
# be a list.
_fields = list(objects.ComputeNode.fields)
def __init__(self, compute_node_data):
self.path = ComputeNode.path()
for f in ComputeNode.fields():
setattr(self, f, None)
self.cpus = 0
self.cpu_used = 0
self.mem_used = 0
self.mem_total = 0
self.mem_free = 0
self.mem_available = 0
self.total_containers = 0
self.stopped_containers = 0
self.paused_containers = 0
self.running_containers = 0
self.disk_used = 0
self.disk_total = 0
self.disk_quota_supported = False
self.update(compute_node_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
# NOTE(kiennt): The pci_device_pools field in object maps to the
# pci_stats field in the database. Therefore, need
# replace these fields.
for index, value in enumerate(cls._fields):
if value == 'pci_device_pools':
cls._fields.pop(index)
cls._fields.insert(index, 'pci_stats')
break
return cls._fields
def save(self, session=None):
if session is None:
session = db.api.get_backend()
client = session.client
path = self.etcd_path(self.uuid)
if self.path_already_exist(client, path):
raise exception.ComputeNodeAlreadyExists(
field='UUID', value=self.uuid)
client.write(path, json.dump_as_bytes(self.as_dict()))
return
class PciDevice(Base):
"""Represents a PciDevice. """
_path = '/pcidevices'
_fields = objects.PciDevice.fields.keys()
def __init__(self, pci_data):
self.path = PciDevice.path()
for f in PciDevice.fields():
setattr(self, f, None)
self.id = 1
self.numa_node = 0
self.update(pci_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class VolumeMapping(Base):
"""Represents a VolumeMapping."""
_path = '/volume_mapping'
_fields = objects.VolumeMapping.fields.keys()
def __init__(self, volume_mapping_data):
self.path = VolumeMapping.path()
for f in VolumeMapping.fields():
setattr(self, f, None)
self.id = 1
self.auto_remove = False
self.update(volume_mapping_data)
@classmethod
def path(cls):
return cls._path
@classmethod
def fields(cls):
return cls._fields
class ContainerAction(Base):
"""Represents a container action.
The intention is that there will only be one of these pre user request. A
lookup by(container_uuid, request_id) should always return a single result.
"""
_path = '/container_actions'
_fields = list(objects.ContainerAction.fields) + ['uuid']
def __init__(self, action_data):
self.path = ContainerAction.path(action_data['container_uuid'])
for f in ContainerAction.fields():
setattr(self, f, None)
self.id = 1
self.update(action_data)
@classmethod
def path(cls, container_uuid):
return cls._path + '/' + container_uuid
@classmethod
def fields(cls):
return cls._fields
class ContainerActionEvent(Base):
"""Track events that occur during an ContainerAction."""
_path = '/container_actions_events'
_fields = list(objects.ContainerActionEvent.fields) + ['action_uuid',
'uuid']
def __init__(self, event_data):
self.path = ContainerActionEvent.path(event_data['action_uuid'])
for f in ContainerActionEvent.fields():
setattr(self, f, None)
self.id = 1
self.action_id = 0
self.update(event_data)
@classmethod
def path(cls, action_uuid):
return cls._path + '/' + action_uuid
@classmethod
def fields(cls):
return cls._fields
class Quota(Base):
"""Represents a Quota."""
_path = '/quotas'
_fields = list(objects.Quota.fields) + ['uuid']
def __init__(self, quota_data):
self.path = Quota.path(project_id=quota_data.get('class_name'),
resource=quota_data.get('resource'))
for f in Quota.fields():
setattr(self, f, None)
self.id = 1
self.update(quota_data)
@classmethod
def path(cls, project_id, resource=None):
if resource is not None:
path = '{}/{}/{}' . format(cls._path, project_id, resource)
else:
path = '{}/{}' . format(cls._path, project_id)
return path
@classmethod
def fields(cls):
return cls._fields
class QuotaClass(Base):
"""Represents a QuotaClass."""
_path = '/quota_classes'
_fields = list(objects.QuotaClass.fields) + ['uuid']
def __init__(self, quota_class_data):
self.path = QuotaClass.path(
class_name=quota_class_data.get('class_name'),
resource=quota_class_data.get('resource'))
for f in QuotaClass.fields():
setattr(self, f, None)
self.id = 1
self.update(quota_class_data)
@classmethod
def path(cls, class_name, resource=None):
if resource is not None:
path = '{}/{}/{}' . format(cls._path, class_name, resource)
else:
path = '{}/{}' . format(cls._path, class_name)
return path
@classmethod
def fields(cls):
return cls._fields
class QuotaUsage(Base):
"""Represents the current usage for a given resource."""
_path = '/quota_usages'
_fields = ['id', 'project_id', 'resource', 'in_use', 'reserved']
def __init__(self, quota_usage_data):
self.path = QuotaUsage.path(
project_id=quota_usage_data['project_id'],
resource=quota_usage_data['resource'])
for f in QuotaUsage.fields():
setattr(self, f, None)
self.id = 1
self.update(quota_usage_data)
@classmethod
def path(cls, project_id, resource):
return '{}/{}/{}' . format(cls._path, project_id, resource)
@classmethod
def fields(cls):
return cls._fields
|
py | b41186cab3d777d1b3f6deea4852d5203576ebc3 | #!/usr/bin/env python3
import os
import hashlib
import struct
import pyaudio
from picovoice import Picovoice
import pigpio
import requests
import asyncio
from kasa import SmartBulb
import time
gpio = pigpio.pi()
gpio.set_mode(27, pigpio.OUTPUT)
gpio.set_mode(17, pigpio.OUTPUT)
gpio.set_mode(22, pigpio.OUTPUT)
lights = SmartBulb('192.168.0.18')
colours = {
'black': (0, 0, 0),
'blue': (0, 0, 10),
'green': (0, 10, 0),
'orange': (10, 5, 0),
'pink': (10, 2, 2),
'purple': (10, 0, 10),
'red': (10, 0, 0),
'white': (10, 10, 10),
'yellow': (10, 10, 0),
'warm': (10, 5, 2),
'cold': (8, 8, 10)
}
ledsColour = 'black'
def setColor(red, green, blue):
gpio.set_PWM_dutycycle(27, red)
gpio.set_PWM_dutycycle(17, green)
gpio.set_PWM_dutycycle(22, blue)
keyword_path = 'computer_raspberry-pi.ppn'
context_path = 'Sanctum_raspberry-pi_2021-02-15-utc_v1_6_0.rhn'
def wake_word_callback():
print('Hotword Detected')
setColor(*colours['blue'])
play('computerbeep_10.mp3')
#say('Yes you sef')
def inference_callback(inference):
global colours
global ledsColour
if not inference.is_understood:
say("Sorry, I didn't understand that.")
elif inference.intent == 'tellJoke':
joke()
elif inference.intent == 'lightsDim':
print(inference)
elif inference.intent == 'lightsMax':
print(inference)
elif inference.intent == 'lightsBrightness':
print(inference)
elif inference.intent == 'lightsColor':
if (not ('which' in inference.slots)) or inference.slots['which'] == 'window':
ledsColour = inference.slots['color']
elif inference.intent == 'lightsState':
if (not ('which' in inference.slots)) or inference.slots['which'] == 'main':
if inference.slots['state'] == 'on':
asyncio.run(lights.turn_on())
else:
asyncio.run(lights.turn_off())
if (not ('which' in inference.slots)) or inference.slots['which'] == 'window':
if inference.slots['state'] == 'on':
ledsColour = 'warm'
else:
ledsColour = 'black'
elif inference.intent == 'redAlert':
setColor(*colours['red'])
play('tos_red_alert_3.mp3');
elif inference.intent == 'lightFold':
print(inference)
elif inference.intent == 'sayTime':
say(time.strftime('%H:%M'))
print(inference)
setColor(*colours[ledsColour])
def play(sound):
os.system('play sfx/' + sound)
def say(text):
hash = hashlib.md5(text.encode()).hexdigest()
file = 'speech-cache/{}.wav'.format(hash)
cmd = 'play {}'.format(file)
if not os.path.isfile(file):
cmd = 'pico2wave -w {} "{}" && {}'.format(file, text, cmd)
os.system(cmd)
def joke():
j = requests.get('https://v2.jokeapi.dev/joke/Any?format=txt').text
print(j)
say(j)
handle = Picovoice(
keyword_path=keyword_path,
wake_word_callback=wake_word_callback,
context_path=context_path,
inference_callback=inference_callback)
pa = pyaudio.PyAudio()
audio_stream = pa.open(
rate=16000,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=512,
input_device_index=6
)
while True:
pcm = audio_stream.read(512, exception_on_overflow = False)
pcm = struct.unpack_from("h" * 512, pcm)
handle.process(pcm)
#finally:
# if porcupine is not None:
# porcupine.delete()
#
# if audio_stream is not None:
# audio_stream.close()
#
# if pa is not None:
# pa.terminate()
|
py | b41187d9614b8429ca2eafc1f927883c1da809da | # Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides authentication functions for bce services.
"""
from __future__ import absolute_import
from builtins import str
from builtins import bytes
import hashlib
import hmac
import logging
from bsamcli.lib.baidubce.http import http_headers
from bsamcli.lib.baidubce import utils
from bsamcli.lib.baidubce import compat
_logger = logging.getLogger(__name__)
def _get_canonical_headers(headers, headers_to_sign=None):
headers = headers or {}
if headers_to_sign is None or len(headers_to_sign) == 0:
headers_to_sign = set([b"host",
b"content-md5",
b"content-length",
b"content-type"])
result = []
for k in headers:
k_lower = k.strip().lower()
value = utils.convert_to_standard_string(headers[k]).strip()
if k_lower.startswith(http_headers.BCE_PREFIX) \
or k_lower in headers_to_sign:
str_tmp = b"%s:%s" % (utils.normalize_string(k_lower), utils.normalize_string(value))
result.append(str_tmp)
result.sort()
return (b'\n').join(result)
def sign(credentials, http_method, path, headers, params,
timestamp=0, expiration_in_seconds=1800, headers_to_sign=None):
"""
Create the authorization
"""
_logger.debug('Sign params: %s %s %s %s %d %d %s' % (
http_method, path, headers, params, timestamp, expiration_in_seconds, headers_to_sign))
headers = headers or {}
params = params or {}
sign_key_info = b'bce-auth-v1/%s/%s/%d' % (
credentials.access_key_id,
utils.get_canonical_time(timestamp),
expiration_in_seconds)
sign_key = hmac.new(
credentials.secret_access_key,
sign_key_info,
hashlib.sha256).hexdigest()
canonical_uri = path
canonical_querystring = utils.get_canonical_querystring(params, True)
canonical_headers = _get_canonical_headers(headers, headers_to_sign)
string_to_sign = (b'\n').join([
http_method, canonical_uri,
canonical_querystring, canonical_headers
])
sign_result = hmac.new(compat.convert_to_bytes(sign_key), string_to_sign, hashlib.sha256).hexdigest()
# convert to bytes
sign_result = compat.convert_to_bytes(sign_result)
if headers_to_sign:
result = b'%s/%s/%s' % (sign_key_info, (b';').join(headers_to_sign), sign_result)
else:
result = b'%s//%s' % (sign_key_info, sign_result)
_logger.debug('sign_key=[%s] sign_string=[%d bytes][ %s ]' %
(sign_key, len(string_to_sign), string_to_sign))
_logger.debug('result=%s' % result)
return result
|
py | b4118814d2b664544658223e673d36815c92fe03 | import functools
import sys
import math
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, ScalarType, array, alltrue, cumprod, arange, ndim
)
from numpy.core.numerictypes import find_common_type, issubdtype
import numpy.matrixlib as matrixlib
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
from numpy.core.overrides import set_module
from numpy.core import overrides, linspace
from numpy.lib.stride_tricks import as_strided
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
'diag_indices', 'diag_indices_from'
]
def _ix__dispatcher(*args):
return args
@array_function_dispatch(_ix__dispatcher)
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Each sequence should be of integer or boolean type.
Boolean sequences will be interpreted as boolean masks for the
corresponding dimension (equivalent to passing in
``np.nonzero(boolean_sequence)``).
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0, 1], [2, 4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
>>> ixgrid = np.ix_([True, True], [2, 4])
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
>>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
if not isinstance(new, _nx.ndarray):
new = asarray(new)
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
out.append(new)
return tuple(out)
class nd_grid:
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`, approximately defined as::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][tuple(slobj)]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
class MGridClass(nd_grid):
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
"""
def __init__(self):
super(MGridClass, self).__init__(sparse=False)
mgrid = MGridClass()
class OGridClass(nd_grid):
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
-------
mesh-grid
`ndarrays` with only one dimension not equal to 1
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self):
super(OGridClass, self).__init__(sparse=True)
ogrid = OGridClass()
class AxisConcatenator:
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
# allow ma.mr_ to override this
concatenate = staticmethod(_nx.concatenate)
makemat = staticmethod(matrixlib.matrix)
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self.axis = axis
self.matrix = matrix
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
# handle matrix builder syntax
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
# copy attributes, since they can be overridden in the first argument
trans1d = self.trans1d
ndmin = self.ndmin
matrix = self.matrix
axis = self.axis
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k, item in enumerate(key):
scalar = False
if isinstance(item, slice):
step = item.step
start = item.start
stop = item.stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(item, str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
if item in ('r', 'c'):
matrix = True
col = (item == 'c')
continue
if ',' in item:
vec = item.split(',')
try:
axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except Exception:
raise ValueError("unknown special directive")
try:
axis = int(item)
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(item) in ScalarType:
newobj = array(item, ndmin=ndmin)
scalars.append(len(objs))
scalar = True
scalartypes.append(newobj.dtype)
else:
item_ndim = ndim(item)
newobj = array(item, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and item_ndim < ndmin:
k2 = ndmin - item_ndim
k1 = trans1d
if k1 < 0:
k1 += k2 + 1
defaxes = list(range(ndmin))
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Ensure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = self.concatenate(tuple(objs), axis=axis)
if matrix:
oldndim = res.ndim
res = self.makemat(res)
if oldndim == 1 and col:
res = res.T
return res
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, ..., 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
See Also
--------
column_stack : Stack 1-D arrays as columns into a 2-D array.
r_ : For more detailed documentation.
Examples
--------
>>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, ..., 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
@set_module('numpy')
class ndenumerate:
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
arr : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print(index, x)
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
@set_module('numpy')
class ndindex:
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape,
strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
next(self._it)
return self._it.multi_index
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <[email protected]>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression:
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def _fill_diagonal_dispatcher(a, val, wrap=None):
return (a,)
@array_function_dispatch(_fill_diagonal_dispatcher)
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
locations with indices ``a[i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
The anti-diagonal can be filled by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.zeros((3, 3), int);
>>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
>>> a
array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
>>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
>>> a
array([[0, 0, 3],
[0, 2, 0],
[1, 0, 0]])
Note that the order in which the diagonal is filled varies depending
on the flip function.
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
@set_module('numpy')
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def _diag_indices_from(arr):
return (arr,)
@array_function_dispatch(_diag_indices_from)
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
py | b411883bb599c9d2183bd8ceb623fa9c361e7e5c | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import pprint
import threading
import types as types_lib
import weakref
import numpy as np
import six
from six.moves import map
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import trace
from tensorflow.python.saved_model import save_context
from tensorflow.python.types import core
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->autograph->->dataset->tf.function).
# TODO(b/133251390): Use a regular import.
ag_ctx = lazy_loader.LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
np_arrays = lazy_loader.LazyLoader(
"np_arrays", globals(),
"tensorflow.python.ops.numpy_ops.np_arrays")
FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name"
BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name"
IMPLEMENTS_ATTRIBUTE_NAME = "_implements"
SHARED_RENDEZVOUS_ATTRIBUTE_NAME = "shared_rendezvous"
# A temporary flag. Turning this on will allow tf.function to aggressively avoid
# retracing ResourceVariable inputs. This feature will change tf.function's
# Variable tracing behavior, hence we want to limit the potential blockers that
# are not detected by Global TAP.
# TODO(jiaweix): remove this flag and related args (b/198782192)
ENCODE_VARIABLES_BY_RESOURCE_ID = True
_graph_building_time_counter = monitoring.Counter(
"/tensorflow/core/tf_function/graph_building_time_usecs",
"Time for tf.function to build a graph (us).")
# TODO(b/195985838): cleanup this function.
def _make_input_signature_hashable(elem):
"""Rewrite input signature to be hashable.
We replace nested variables in the input signature with TensorSpec in order to
be hashable.
Args:
elem: Input signature element
Returns:
A hashable object for the requested input signature
"""
try:
hash(elem)
except TypeError:
# TODO(slebedev): consider using nest.
if isinstance(elem, tuple):
return tuple(map(_make_input_signature_hashable, elem))
# TFE_Py_EncodeArg weakrefs arguments it does not recognize, and we expect
# all recognized types to be hashable.
assert isinstance(elem, weakref.ReferenceType)
v = elem()
if resource_variable_ops.is_resource_variable(v):
# We special case variables here to use unique_id as the cache key. This
# ensures we have to retrace whenever a different variable is passed in.
# This is needed to support cases where the user may use the id of a
# variable in the function perhaps as a lookup in a dictionary.
#
# This choice leads to more retracing when we could have possibly used the
# shape and dtype instead. However, we expect the number of variables in a
# program to be bounded, and correspondingly the number of retraces.
#
# Note we also include the class name to avoid collisions with strings.
return v.__class__, v._unique_id # pylint: disable=protected-access
if _is_ndarray(v):
# Numpy arrays are not hashable, but when calling functions we treat them
# in the same way as tf.Tensors.
if not hasattr(v, "shape") or not hasattr(v, "dtype"):
# TODO(tomhennigan) De-dup with _as_ndarray in _convert_numpy_inputs.
v = _as_ndarray(v)
return tensor_spec.TensorSpec(v.shape, v.dtype)
raise ValueError("Arguments to a tf.function must be a nested structure of "
"Tensors, Variables, NumPy arrays, or hashable Python "
f"objects, got {type(v)}.")
return elem
CacheKey = collections.namedtuple("CacheKey", [
"input_signature",
"parent_graph",
"device_functions",
"colocation_stack",
"in_cross_replica_context",
"variable_policy",
"xla_context_id",
])
def _type_spec_for(x):
"""Returns a TypeSpec for `x`, or `None` if `x` doesn't have a TensorSpec."""
if isinstance(x, ops.Tensor):
return tensor_spec.TensorSpec.from_tensor(x)
elif isinstance(x, type_spec.TypeSpec):
return x
elif isinstance(x, composite_tensor.CompositeTensor):
return x._type_spec # pylint: disable=protected-access
else:
return None
def _is_type_subset(a, b):
"""Returns true if TypeSpec `b` is a subset of type `a` (or if a is None.)"""
if a is None:
return True
else:
return a.most_specific_compatible_type(b) == a
def _shape_relaxed_type_for_composite_tensor(x):
"""Returns a shape-relaxed TypeSpec for x (if composite) or x (if not)."""
if isinstance(x, composite_tensor.CompositeTensor):
# pylint: disable=protected-access
return x._type_spec._with_tensor_ranks_only()
else:
return x
def common_shape(x, y):
"""Find a `TensorShape` that is compatible with both `x` and `y`."""
if x is None != y is None:
raise RuntimeError(
"Cannot find a common shape when LHS shape is None but RHS shape "
f"is not (or vice versa): {x} vs. {y}.")
if x is None:
return None # The associated input was not a Tensor, no shape generated.
if not isinstance(x, tensor_shape.TensorShape):
raise TypeError(f"`x` must be a TensorShape, got type {type(x)}.")
if not isinstance(y, tensor_shape.TensorShape):
raise TypeError(f"`y` must be a TensorShape, got type {type(y)}.")
if x.rank != y.rank or x.rank is None:
return tensor_shape.TensorShape(None)
dims = []
for dim_x, dim_y in zip(x.dims, y.dims):
if (dim_x != dim_y
or tensor_shape.dimension_value(dim_x) is None
or tensor_shape.dimension_value(dim_y) is None):
dims.append(None)
else:
dims.append(tensor_shape.dimension_value(dim_x))
return tensor_shape.TensorShape(dims)
def is_same_structure(structure1,
structure2,
check_values=False):
"""Check two structures for equality, optionally of types and of values."""
try:
nest.assert_same_structure(structure1, structure2, expand_composites=True)
except (ValueError, TypeError):
return False
if check_values:
flattened1 = nest.flatten(structure1, expand_composites=True)
flattened2 = nest.flatten(structure2, expand_composites=True)
# First check the types to avoid AttributeErrors.
if any(type(f1) != type(f2) for f1, f2 in zip(flattened1, flattened2)):
return False
return flattened1 == flattened2
return True
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unallowlisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes, six.text_type)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError(f"Attribute {key} must be bool, int, float, string, or "
f"AttrValue. Got {type(value)}.")
return attrs
class _InterpolateFunctionError(object):
"""Context Manager that interpolates the exception from 'top_level_func'."""
__slots__ = ["_func"]
def __init__(self, top_level_func):
self._func = top_level_func
def __enter__(self):
pass
def __exit__(self, typ, exc, tb):
if not exc or not isinstance(exc, errors.OpError):
return False
message = compat.as_text(exc.message)
_, tags = error_interpolation.parse_message(message)
g = None
func_stack = []
for t in tags:
if t.type == "function_node":
# TODO(mdan): Tests should cover this.
if t.name == compat.as_str(self._func.name):
g = self._func.graph
elif g:
next_func = g._get_function(t.name) # pylint: disable=protected-access
if next_func is not None and isinstance(next_func,
_EagerDefinedFunction):
g = next_func.graph
if g:
func_stack.append(g.name)
else:
func_stack.append("<unknown>")
if g:
message = error_interpolation.interpolate(message, g)
if len(func_stack) >= 2:
message += "\n\nFunction call stack:\n"
message += " -> ".join(func_stack)
message += "\n"
exc._message = message # pylint: disable=protected-access
return False
_function_callbacks = set()
def add_function_callback(function_callback):
"""Add a callback function for Function creation.
The callback function has the signature:
`def function_callback(function, name, graph, inputs, outputs):`
where:
- `function`: _EagerDefinedFunction being created before finalizing the graph.
Do not modify the function directly but instead modify the graph.
- `name`: name of the function.
- `graph`: Graph of the function.
- `inputs`: `tuple` of tensors used as inputs to the function.
- `outputs`: `tuple` of tensors used as outputs from the function.
The callback is at the top of the `_EagerDefinedFunction` construction, giving
callback an opportunity to make the last edits to the graph. Do not make
changes to `graph, inputs`, and `outputs` manually, but, instead, set the
`graph` as the default then define ops.
Repeated registration of the same callback function is idempotent.
After a callback is added, it can be removed with the
`remove_function_callback()` method.
Args:
function_callback: The callback to add.
"""
_function_callbacks.add(function_callback)
def remove_function_callback(function_callback):
"""Remove an already-added function callback.
See the doc string of `add_function_callback()` for more information.
Args:
function_callback: The callback to remove.
"""
_function_callbacks.remove(function_callback)
def clear_function_callbacks():
"""Clear all function callbacks, if any have been regisered."""
_function_callbacks.clear()
_FORWARD_PREFIX = "__forward_"
_BACKWARD_PREFIX = "__backward_"
_INFERENCE_PREFIX = "__inference_"
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "%s%s_%s" % (_FORWARD_PREFIX, n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "%s%s_%s" % (_BACKWARD_PREFIX, n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "%s%s_%s" % (_INFERENCE_PREFIX, n, ops.uid())
def _enclosing_xla_context():
"""Returns the XLAControlFlowContext, which exists inside a tpu.rewrite()."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, control_flow_ops.XLAControlFlowContext):
return context_
context_ = context_.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None
class _EagerDefinedFunctionDeleter(object):
"""Unregister function from eager context."""
__slots__ = ["name"]
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
class FunctionAlreadyGarbageCollectedError(Exception):
def __init__(self, function_name):
super(FunctionAlreadyGarbageCollectedError, self).__init__(
"{} has already been garbage collected and cannot be called.".format(
function_name))
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction`.
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs from the function
attrs: dict mapping names of attributes to their AttrValue values
"""
for function_callback in _function_callbacks:
function_callback(self, name, graph, tuple(inputs), tuple(outputs))
input_ops = set(arg.op for arg in inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
graph_output_names = graph._output_names # pylint: disable=protected-access
if (graph_output_names is not None and
all(ops.tensor_id(t) in graph_output_names for t in outputs)):
output_names = [
compat.as_bytes(graph_output_names[ops.tensor_id(t)]) for t in outputs
]
if len(set(output_names)) != len(output_names):
# There are duplicate names for some reason, probably an invalid
# signature. Revert to auto-naming.
output_names = []
else:
output_names = []
fn = pywrap_tf_session.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
output_names,
[o._c_op for o in graph.control_outputs], # pylint: disable=protected-access
[], # control_output_names
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tf_session.TF_FunctionSetAttrValueProto(fn, compat.as_str(name),
serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tf_session.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
self._name = compat.as_bytes(function_def.signature.name)
with ops.init_scope():
if context.executing_eagerly():
context.ensure_initialized()
context.add_function(fn)
self._function_deleter = _EagerDefinedFunctionDeleter(self.name)
self._registered_on_context = True
self.definition = function_def
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self._control_captures = graph.control_captures
# Shallow copy outputs since ConcreteFunction may mutate it.
self._func_graph_outputs = list(outputs)
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self.graph = graph
self._stateful_ops = tuple(op for op in operations if op._is_stateful) # pylint: disable=protected-access
def add_to_graph(self, g=None):
"""Add the function to the current context or a graph, if supplied.
Args:
g: the graph to add the function to. If not supplied, the function will
be added to the current context.
"""
# pylint: disable=protected-access
if not g and context.executing_eagerly():
ctx = context.context()
if not ctx.has_function(self.name):
ctx.add_function_def(self.definition)
else:
if not g._is_function(self.name):
g._add_function(self)
for f in self.graph._functions.values():
if not g._is_function(f.name):
g._add_function(f)
# pylint: enable=protected-access
@property
def name(self):
return self._name
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args, cancellation_manager=None):
"""Calls this function with `args` as inputs.
`ConcreteFunction` execution respects device annotations only if the
function won't be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
cancellation_manager: a `CancellationManager` object that can be used to
cancel function execution.
Returns:
The outputs of the function call.
Raises:
ValueError: if the number of arguments is incorrect.
FunctionAlreadyGarbageCollectedError: if the function is no longer
available to be called because it has been garbage collected.
"""
if len(args) != len(self.signature.input_arg):
raise ValueError(
f"Signature specifies {len(list(self.signature.input_arg))} "
f"arguments, got: {len(args)}.")
# If the `ScopedTFFunction` (accessed via `_c_func`) has already been
# cleaned up as a part of garbage collection, this `_EagerDefinedFunction`
# should also be garbage and is likely being called as part of a `__del__`
# elsewhere. In that case, there's nothing we can do, so we raise an
# exception for the caller to handle.
if self._c_func.has_been_garbage_collected:
raise FunctionAlreadyGarbageCollectedError(self.name)
function_call_options = ctx.function_call_options
if function_call_options.config_proto_serialized is None:
config = function_utils.get_disabled_rewriter_config()
else:
config = function_call_options.config_proto_serialized
executor_type = function_call_options.executor_type or ""
executing_eagerly = ctx.executing_eagerly()
attrs = ("executor_type", executor_type, "config_proto", config)
if executing_eagerly:
with _InterpolateFunctionError(self):
if cancellation_manager is None:
outputs = execute.execute(
str(self.signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=attrs,
ctx=ctx)
else:
outputs = execute.execute_with_cancellation(
str(self.signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=attrs,
ctx=ctx,
cancellation_manager=cancellation_manager)
# Replace empty list with None
outputs = outputs or None
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
with _InterpolateFunctionError(self):
with ops.control_dependencies(self._control_captures):
# The caller must use record_operation to record this operation in the
# eager case, so we enforce the same requirement for the non-eager
# case by explicitly pausing recording. We don't have a gradient
# registered for PartitionedCall, so recording this operation confuses
# forwardprop code (GradientTape manages to ignore it).
with tape.stop_recording():
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly,
config=config,
executor_type=executor_type)
for i, func_graph_output in enumerate(self._func_graph_outputs):
handle_data_util.copy_handle_data(func_graph_output, outputs[i])
if executing_eagerly:
return outputs
else:
# TODO(b/128924522): This additional set_shape should not be
# necessary. ShapeRefiner likely needs to inspect handle_data. Remove this
# once that's done.
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
return outputs
def _create_forward_backward_with_graph(attrs, forward_graph, backwards_graph):
"""Creates forward and backward functions from the function graphs."""
forward_function_name = _forward_name(forward_graph.name)
common_attributes = dict(attrs)
# NB: forward and backward function need to drop "_implements".
# attribute, because their signature contains all the intermediate tensors
# that they compute. Thus they don't have a stable signature which can
# be directly optimized downstream.
# See for more details:
# https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md#appendix-future-support-for-optimizing-gradient-functions
common_attributes.pop(IMPLEMENTS_ATTRIBUTE_NAME, None)
backward_function_attr = _parse_func_attrs(
{FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name})
backward_function_attr.update(common_attributes)
backward_function = ConcreteFunction(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
BACKWARD_FUNCTION_ATTRIBUTE_NAME:
backward_function.name})
forward_function_attr.update(common_attributes)
forward_function = _EagerDefinedFunction(
forward_function_name, forward_graph, forward_graph.inputs,
forward_graph.outputs, forward_function_attr)
return forward_function, backward_function
class _DelayedRewriteGradientFunctions(object):
"""Caches forward/backward functions with a delayed forward rewrite."""
def __init__(self, func_graph, attrs, func_graph_deleter):
"""Construct an inference function and initialize caches."""
# A map from the number of forward function outputs with accepted gradients
# to forward and backward functions, used to cache non-tape backward
# function generation.
self._cached_function_pairs = {}
self._func_graph = func_graph
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, attrs)
self._attrs = attrs
self._gradient_name = None
# Note that the FuncGraph is mutated later, so we need to inspect it now to
# figure out the user-specified outputs of the inference function.
self._num_inference_outputs = len(self._func_graph.outputs)
self._func_graph_deleter = func_graph_deleter
def forward_backward(self, num_doutputs=None):
"""A possibly-cached pair of forward and backward functions."""
if num_doutputs is None:
num_doutputs = self._num_inference_outputs
forward_backward = self._cached_function_pairs.get(num_doutputs)
if forward_backward is not None:
return forward_backward
forward, backward = self._construct_forward_backward(num_doutputs)
self._cached_function_pairs[num_doutputs] = (forward, backward)
return forward, backward
def _construct_forward_backward(self, num_doutputs):
"""Constructs a pair of forward and backward functions.
Args:
num_doutputs: The constructed backprop function will take output gradients
for the first `num_doutputs` outputs of the forward function. Defaults
to the number of outputs for the inference function, but when
higher-order gradients are computed this will increase to include side
outputs.
Returns:
A pair of (forward_function, backward_function):
forward_function: A re-generated inference function (an
_EagerDefinedFunction) to account for new side outputs, if any extra
were required when building the backward pass.
backward_function: A ConcreteFunction that Takes `num_doutputs`
arguments and returns gradients with respect to inputs of the forward
function.
"""
trainable_outputs = [
output for output in self._func_graph.outputs[:num_doutputs]
if backprop_util.IsTrainable(output)]
signature = []
for t in trainable_outputs:
signature.append(
tensor_spec.TensorSpec(*default_gradient.shape_and_dtype(t)))
def _backprop_function(*grad_ys):
with ops.device(None):
return gradients_util._GradientsHelper( # pylint: disable=protected-access
trainable_outputs,
self._func_graph.inputs,
grad_ys=grad_ys,
src_graph=self._func_graph)
with self._func_graph.as_default():
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
func_graph_module.func_graph_from_py_func(
name=backwards_graph.name,
python_func=_backprop_function,
args=[], kwargs={},
signature=signature,
func_graph=backwards_graph)
backwards_graph_captures = backwards_graph.external_captures
captures_from_forward = [
c for c in backwards_graph_captures if
not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph]
existing_outputs = object_identity.ObjectIdentitySet(
self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
forward_function, backward_function = _create_forward_backward_with_graph(
self._attrs, self._func_graph, backwards_graph)
return forward_function, backward_function
def _rewrite_forward_and_call_backward(self, op, *doutputs):
"""Add outputs to the forward call and feed them to the grad function."""
forward_function, backwards_function = self.forward_backward(len(doutputs))
if not backwards_function.outputs:
return backwards_function.structured_outputs
forward_function.add_to_graph(op.graph)
# pylint: disable=protected-access
# Rewrite an inference call op to be a forward call op
op._set_func_attr("f", forward_function.name)
op._set_type_list_attr("Tout", forward_function._output_types)
op._add_outputs(
forward_function._output_types[len(op.outputs):],
forward_function._output_shapes[len(op.outputs):])
for i in range(len(op.outputs)):
func_graph_output = forward_function._func_graph_outputs[i]
handle_data_util.copy_handle_data(func_graph_output, op.outputs[i])
# pylint: enable=protected-access
capture_mapping = dict(
zip((ops.tensor_id(t) for t in self._func_graph.outputs), op.outputs))
remapped_captures = [
capture_mapping.get(ops.tensor_id(capture), capture)
for capture in backwards_function.captured_inputs
]
# Replace Nones with zeros since we're calling a graph function which
# expects numeric inputs.
cleaned_doutputs = []
for doutput, placeholder in zip(doutputs, self._func_graph.outputs):
if backprop_util.IsTrainable(placeholder):
if isinstance(doutput, ops.IndexedSlices):
# Gradient passed to a backward ConcreteFunction must be tf.Tensor,
# so we convert tf.IndexedSlices to tf.Tensor.
cleaned_doutputs.append(ops.convert_to_tensor(doutput))
elif doutput is not None:
cleaned_doutputs.append(doutput)
else:
cleaned_doutputs.append(default_gradient.zeros_like(placeholder))
# Compute the gradients using the side outputs
return backwards_function._call_flat( # pylint: disable=protected-access
cleaned_doutputs, remapped_captures)
def get_gradient_function(self):
"""Returns gradient function.
The gradient rewrites an inference call op to a forward call op, but does
not modify a pre-existing forward call op. It then computes the gradient
from the output's gradients and the side outputs of the forward op.
"""
return self._rewrite_forward_and_call_backward
def forward(self, inference_args=None, input_tangents=None):
"""A forward function with only user-specified outputs.
The call operation for the returned inference function can be rewritten into
a forward function. This only happens if the backward function (from the
`backward` method) ends up being used to compute gradients.
This approach avoids constructing unnecessary graphs, but it only works if
we are calling this function when not executing eagerly.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function. Unused, but taken for compatibility with
_TapeGradientFunctions.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`. Unused; if required, tape functions must be used
instead.
Returns:
An _EagerDefinedFunction.
"""
del inference_args # unused
if input_tangents:
# This class does not support special-cased forwardprop. The arguments are
# here for compatibility with _TapeGradientFunctions.
raise errors.InternalError("unexpectedly got forwardprop information in "
"a class that does not support forwardprop.")
return self._inference_function
def _backward(self, outputs):
"""Fetch a backward function for `outputs` from the forward function."""
def _backward_function(*args):
call_op = outputs[0].op
return self._rewrite_forward_and_call_backward(call_op, *args)
return _backward_function, outputs
def record(self, flat_outputs, inference_args, input_tangents):
"""Record the function call operation.
_DelayedRewriteGradientFunctions supports only first-order backprop tape
gradients (and then only when graph building). It does not work with
higher-order tape gradients or forward autodiff, but does work with
higher-order symbolic gradients (tf.gradients).
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
"""
backward_function, to_record = self._backward(flat_outputs)
tape.record_operation(self._inference_function.signature.name,
to_record, inference_args + input_tangents,
backward_function)
# Contains information about a forward function wrapped to compute jvps.
_ForwardWrapper = collections.namedtuple(
"_ForwardWrapper", (
# The wrapper Graph.
"graph",
# A flat list of non-tangent Tensor outputs from the wrapped forward
# function.
"outputs",
# Indices for output tangents, same format as
# forwardprop_util.pack_tangents.
"output_indices",
# A flat list of tangents for `outputs`.
"output_tangents"))
class _TapeGradientFunctions(object):
"""Caches forward and backward functions compatible with eager gradients.
In contrast to the delayed-rewrite approach in
`_DelayedRewriteGradientFunctions` which only works with delayed execution,
the forward function generated by this class has a fixed set of outputs which
may be preserved by a tape in order to compute gradients later.
This class is abstract; its child classes differ in how many side outputs of
the forward function their backward function accepts gradients for, which
determines whether higher-order tape gradients are possible.
"""
def __init__(self, func_graph, attrs, func_graph_deleter,
forwardprop_input_indices, delayed_rewrite_functions,
need_gradients_for_jvps):
self._func_graph = func_graph
self._forward_graph = None
self._attrs = attrs
self._forward = None
self._backward = None
self._num_outputs = len(func_graph.outputs)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
self._forwardprop_output_indices = None
self._num_forwardprop_outputs = 0
self._num_inference_outputs = len(func_graph.outputs)
self._num_trainable_inference_outputs = len(
[t for t in func_graph.outputs if backprop_util.IsTrainable(t)])
self._delayed_rewrite_functions = delayed_rewrite_functions
self._need_gradients_for_jvps = need_gradients_for_jvps
def _build_functions_for_outputs(
self, outputs, inference_args, input_tangents):
"""Forward+backward functions where the backward function sees `outputs`."""
# First figure out which of `outputs` are trainable. We'll accept gradients
# for each of these in the backward function.
handles_to_variables = self._func_graph.variable_captures
trainable_outputs = []
trainable_indices = []
for index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
# Swap in the Variable object for resource handles if we can so
# sparse gradients work.
output = handles_to_variables.get(id(output), output)
trainable_outputs.append(output)
trainable_indices.append(index)
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with backwards_graph.as_default():
gradients_wrt_outputs = []
for output in trainable_outputs:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
gradient_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
handle_data_util.copy_handle_data(output, gradient_placeholder)
gradients_wrt_outputs.append(gradient_placeholder)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
trainable_outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
if input_tangents:
# Convert IndexedSlices to dense tensors (as we do elsewhere for
# function gradients). Our C++ bindings don't know how to handle them
# currently.
gradients_wrt_inputs = nest.map_structure(
lambda x: ops.convert_to_tensor(x) if x is not None else None,
gradients_wrt_inputs)
captures_from_forward = [
c for c in backwards_graph.external_captures
if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph
]
existing_outputs = object_identity.ObjectIdentitySet(
self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `backward_function` correspond to outputs (including
# side outputs) of `self._tape_forward_function`.
backwards_graph.inputs = (
gradients_wrt_outputs + backwards_graph.internal_captures)
backwards_graph.outputs.extend(
grad
for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
forward_function, backward_function = _create_forward_backward_with_graph(
self._attrs, self._func_graph, backwards_graph)
if not input_tangents:
# There is no need to special-case forwardprop, so we can return the
# forward+backward pair we've created without further wrapping.
return (forward_function, self._func_graph, backward_function,
# No forwardprop outputs.
None, 0)
forward_wrapper = self._wrap_forward_function_with_jvps(
forward_function, backward_function, inference_args, input_tangents)
(wrapped_backwards_graph,
forward_wrapper) = self._wrap_backward_function_with_jvp_backprop(
backward_function, gradients_wrt_outputs, forward_wrapper)
# Now that we've added new captures, we need to make sure forward outputs
# are in the same order the backward function expects them to be in:
# [inference outputs] + [jvps] + [side outputs] + [captures].
forward_wrapper = self._shuffle_forward_outputs(forward_wrapper)
(wrapped_forward_function,
wrapped_backward_function) = _create_forward_backward_with_graph(
self._attrs, forward_wrapper.graph, wrapped_backwards_graph)
if (len(inference_args) + len(input_tangents)
!= len(forward_wrapper.graph.inputs)):
raise errors.InternalError(
f"The forward graph had {len(forward_wrapper.graph.inputs)} inputs, "
f"but we expected {len(inference_args) + len(input_tangents)} "
f"({len(inference_args)} inference inputs and "
f"{len(input_tangents)} input tangents).")
return (wrapped_forward_function, forward_wrapper.graph,
wrapped_backward_function, forward_wrapper.output_indices,
len(forward_wrapper.output_tangents))
def _wrap_forward_function_with_jvps(
self, forward_function, backward_function,
inference_args, input_tangents):
"""Adds inline JVP computation to a forward function."""
forward_wrapper_graph = func_graph_module.FuncGraph(
_forward_name(self._func_graph.name))
with forward_wrapper_graph.as_default():
# Tell forward accumulators to free up space for new JVP computations,
# since one may be in the process of computing a JVP (if that computation
# triggered this function building).
#
# We'll make symbolic versions of input JVPs, run the forward function
# under forward accumulators to get symbolic output JVPs, then set those
# as outputs of the new wrapped forward function.
with forwardprop_util.push_forwardprop_state():
forward_captures = {
ops.tensor_id(internal): external
for external, internal in self._func_graph.captures}
for input_index, real_input in enumerate(self._func_graph.inputs):
# This loop is more or less equivalent to running tf.identity on each
# of self._func_graph.inputs. However, doing that also captures jvps
# for resource handles, which confuses the jvp capturing code below
# (since primal inputs are interwoven with jvp inputs).
input_placeholder = array_ops.placeholder(
dtype=real_input.dtype,
shape=real_input.shape)
capture = forward_captures.get(ops.tensor_id(real_input))
if capture is not None:
forward_wrapper_graph.add_capture(capture, input_placeholder)
if capture.dtype == dtypes.resource:
handle_data_util.copy_handle_data(capture, input_placeholder)
else:
forward_wrapper_graph.inputs.append(input_placeholder)
for inp, arg in zip(forward_wrapper_graph.inputs, inference_args):
tape.record_operation(
"captured_value", [inp], [arg],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
num_inference_inputs = len(inference_args)
for tape_indices in self._forwardprop_input_indices:
for input_index, jvp_index in tape_indices:
input_placeholder = forward_wrapper_graph.inputs[input_index]
if len(forward_wrapper_graph.inputs) != jvp_index:
raise errors.InternalError(
f"Expected {jvp_index} forward graph inputs, "
f"got {len(forward_wrapper_graph.inputs)}.")
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
input_placeholder)
jvp_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
external_jvp = input_tangents[jvp_index - num_inference_inputs]
forward_wrapper_graph.add_capture(external_jvp, jvp_placeholder)
tensor_shape.TensorShape(
external_jvp.shape).assert_is_compatible_with(
jvp_placeholder.shape)
tape.record_operation(
"captured_value",
[jvp_placeholder],
[external_jvp],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
forward_inputs = forward_wrapper_graph.inputs[:num_inference_inputs]
gradient_function = (
self._delayed_rewrite_functions._rewrite_forward_and_call_backward) # pylint: disable=protected-access
with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": gradient_function,
"StatefulPartitionedCall": gradient_function}):
forward_outputs = forward_function.call(context.context(),
forward_inputs)
if isinstance(forward_outputs, ops.Operation):
# _wrapped_backward_function expects a list, but if the function has
# no outputs its call() returns an Operation. We need to undo that
# so we don't cause problems later.
forward_outputs = []
py_backward, _ = self._wrap_backward_function(
self._func_graph, backward_function, forward_outputs)
# We will never request backward tape gradients for this operation
# directly since we're wrapping the call; forwardprop will call the
# backward function (and nested forward accumulators may build
# higher-order gradients), but any watching GradientTapes should ignore
# it.
#
# TODO(allenl): It might be better to explicitly stop backward recording
# so we don't use the second-order tape cases unnecessarily.
tape.record_operation_forwardprop_only(
forward_function.signature.name,
forward_outputs, forward_inputs, py_backward, None)
output_indices, output_tangents = (
pywrap_tfe.TFE_Py_PackJVPs(forward_outputs))
output_tangents = [forward_wrapper_graph.capture(t)
for t in output_tangents]
return _ForwardWrapper(
graph=forward_wrapper_graph, outputs=forward_outputs,
output_indices=output_indices, output_tangents=output_tangents)
def _wrap_backward_function_with_jvp_backprop(
self, backward_function, gradients_wrt_outputs, forward_wrapper):
"""Wraps `backward_function` to include gradients for JVPs."""
wrapped_backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with wrapped_backwards_graph.as_default():
py_backward, recorded_outputs = self._wrap_backward_function(
self._func_graph, backward_function, forward_wrapper.outputs)
trainable_index = 0
forward_doutputs = []
doutput_args = []
for output in recorded_outputs:
if backprop_util.IsTrainable(output):
doutput = gradients_wrt_outputs[trainable_index]
doutput_placeholder = graph_placeholder(doutput.dtype, doutput.shape)
doutput_args.append(doutput_placeholder)
forward_doutputs.append(doutput_placeholder)
trainable_index += 1
else:
doutput_args.append(None)
dinputs = py_backward(*doutput_args)
existing_outputs = object_identity.ObjectIdentitySet(
forward_wrapper.outputs + forward_wrapper.output_tangents)
num_processed_output_tangents = 0
gradients_wrt_output_tangents = []
tangent_doutputs = []
output_tangents = forward_wrapper.output_tangents
output_indices = forward_wrapper.output_indices
if self._need_gradients_for_jvps:
# TODO(allenl): Consider using a throwaway graph to avoid extra gradient
# evaluations; gradients for jvps may have common subgraphs.
while num_processed_output_tangents != len(output_tangents):
for output in output_tangents[num_processed_output_tangents:]:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
placeholder = graph_placeholder(gradient_dtype, gradient_shape)
gradients_wrt_output_tangents.append(placeholder)
tangent_doutputs.append(placeholder)
num_processed_output_tangents = len(output_tangents)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
output_tangents,
forward_wrapper.graph.inputs,
grad_ys=gradients_wrt_output_tangents,
src_graph=forward_wrapper.graph)
dinputs = [
backprop.aggregate_indexed_slices_gradients((existing, new))
for existing, new in zip(dinputs, gradients_wrt_inputs)
if existing is not None or new is not None]
dinputs.extend(gradients_wrt_inputs[len(dinputs):])
captures_from_forward = [
c for c in wrapped_backwards_graph.external_captures
if (not isinstance(c, ops.EagerTensor)
and c.graph is forward_wrapper.graph)]
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
forward_wrapper.outputs.append(capture)
output_indices, output_tangents = (
forwardprop_util.pack_tangents(forward_wrapper.outputs))
output_tangents = [forward_wrapper.graph.capture(t)
for t in output_tangents]
for t in output_tangents:
existing_outputs.add(t)
wrapped_backwards_graph.inputs = (
forward_doutputs[:self._num_trainable_inference_outputs]
+ tangent_doutputs
+ forward_doutputs[self._num_trainable_inference_outputs:]
+ wrapped_backwards_graph.internal_captures)
wrapped_backwards_graph.structured_outputs = dinputs
wrapped_backwards_graph.outputs = [t for t in dinputs if t is not None]
return (wrapped_backwards_graph,
forward_wrapper._replace(output_indices=output_indices,
output_tangents=output_tangents))
def _shuffle_forward_outputs(self, forward_wrapper):
"""Reorders function outputs so captures are last."""
def _index_map(original):
if original < self._num_inference_outputs:
return original
if original >= len(forward_wrapper.outputs):
return (original - len(forward_wrapper.outputs)
+ self._num_inference_outputs)
return original + len(forward_wrapper.output_tangents)
output_indices = nest.map_structure(
_index_map, forward_wrapper.output_indices)
forward_wrapper.graph.outputs = (
forward_wrapper.outputs[:self._num_inference_outputs]
+ forward_wrapper.output_tangents
+ forward_wrapper.outputs[self._num_inference_outputs:])
return forward_wrapper._replace(output_indices=output_indices)
def forward(self, inference_args, input_tangents):
"""Construct or fetch a forward function with side-outputs.
When graph building without a tape active, symbolic gradients rely on
regenerating the backward function for higher-order gradients (to account
for new side outputs of the rewritten forward function call). Thus there is
no fixed backward function for this case. However, when a tape is active
(eager or graph building), we generate fixed backward and forward functions
at forward function call time.
This difference between the tape and non-tape cases is to avoid building
unneeded backward functions while graph building (where we may or may not
eventually need gradients).
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A forward _EagerDefinedFunction.
"""
if self._forward is None:
(self._forward, self._forward_graph, self._backward,
self._forwardprop_output_indices, self._num_forwardprop_outputs) = (
self._forward_and_backward_functions(inference_args, input_tangents))
return self._forward
def _wrap_backward_function(self, forward_graph, backward, outputs):
"""Create a backward function given `outputs` from the forward function."""
capture_mapping = dict(
zip((ops.tensor_id(t) for t in forward_graph.outputs), outputs))
captured_inputs = backward.captured_inputs
remapped_captures = [
capture_mapping.get(ops.tensor_id(capture), capture)
for capture in captured_inputs
]
if any(t.graph is forward_graph for t in remapped_captures
if not isinstance(t, ops.EagerTensor)):
incorrect_mapping = [t for t in remapped_captures
if (not isinstance(t, ops.EagerTensor) and
t.graph is not forward_graph)]
raise errors.InternalError("Failed to map all backward graph captures to "
"the forward graph. Incorrectly mapped: "
f"{incorrect_mapping}.")
# We may need to use zeros_like to get a zero for variant Tensors with
# unconnected gradients. We do that in advance so we don't have to hold on
# to the outputs themselves, which may not be needed otherwise.
variant_zeros_like = {}
backward_function_inputs = (len(backward.inputs) - len(captured_inputs))
recorded_outputs = []
trainable_recorded_outputs = 0
skip_positions = []
if self._num_forwardprop_outputs and not self._need_gradients_for_jvps:
relevant_outputs = (
outputs[:self._num_inference_outputs]
+ outputs[self._num_inference_outputs
+ self._num_forwardprop_outputs:])
else:
relevant_outputs = outputs
for output_index, output in enumerate(relevant_outputs):
if trainable_recorded_outputs < backward_function_inputs:
recorded_outputs.append(output)
if backprop_util.IsTrainable(output):
trainable_recorded_outputs += 1
else:
skip_positions.append(output_index)
if output.dtype == dtypes.variant:
variant_zeros_like[output_index] = default_gradient.zeros_like(output)
def _backward_function_wrapper(*args):
"""Process output gradients and call the backward function."""
if not backward.outputs:
return backward.structured_outputs
processed_args = []
input_index = 0
for output_index, arg in enumerate(args):
# Convert IndexedSlices to dense tensors. The IndexedSlices optimization
# is only really effective when doing tf.gather(variable) as the
# adjoint functions for most operations are unlikely to preserve the
# sparsity in IndexedSlices.
if isinstance(arg, ops.IndexedSlices):
arg = ops.convert_to_tensor(arg)
if output_index in skip_positions:
continue
if arg is None:
# We're calling a (non-polymorphic) ConcreteFunction, so we need to
# have a Tensor value for each Tensor we thought would be trainable
# based on its dtype, even if it ended up being unconnected.
input_placeholder = backward.inputs[
input_index]
if input_placeholder.dtype == dtypes.variant:
arg = variant_zeros_like[output_index]
else:
arg = array_ops.zeros(
*default_gradient.shape_and_dtype(input_placeholder))
processed_args.append(arg)
input_index += 1
if input_index >= backward_function_inputs:
break
return backward._call_flat( # pylint: disable=protected-access
processed_args, remapped_captures)
return _backward_function_wrapper, recorded_outputs
def record(self, flat_outputs, inference_args, input_tangents):
"""Record the function call operation.
For backprop, indicates the backward function to use and which new Tensors
must be watched. For forwardprop from eager, the function call itself will
have produced tangents which need to be recorded.
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
"""
backward_function, to_record = self._wrap_backward_function(
self._forward_graph, self._backward, flat_outputs)
if self._forwardprop_output_indices:
tape.record_operation_backprop_only(
self._forward.signature.name,
to_record, inference_args,
backward_function)
tape.record_operation_forwardprop_only(
self._forward.signature.name,
flat_outputs, inference_args + input_tangents,
backward_function,
self._forwardprop_output_indices)
else:
tape.record_operation(self._forward.signature.name,
to_record, inference_args + input_tangents,
backward_function)
class _FirstOrderTapeGradientFunctions(_TapeGradientFunctions):
"""Caches tape-friendly functions for first-order gradients."""
def __init__(self, func_graph, attrs, func_graph_deleter,
forwardprop_input_indices, delayed_rewrite_functions,
need_gradients_for_jvps):
super(_FirstOrderTapeGradientFunctions, self).__init__(
func_graph, attrs, func_graph_deleter, forwardprop_input_indices,
delayed_rewrite_functions, need_gradients_for_jvps)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Shortcut for when only first-order gradients are required.
The returned backward function does not accept gradients with respect to
side output of forward_function. This is fine as long as the user can't
possibly request second order tape gradients, as when they've used a single
non-persistent GradientTape. Since we don't need the backward function to
take gradients with respect to side outputs, we can skip some potentially
slow graph building.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to the "real" outputs of forward_function and
returns gradients with respect to the inputs.
"""
outputs = self._func_graph.outputs[:self._num_inference_outputs]
return self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
class _HigherOrderTapeGradientFunctions(_TapeGradientFunctions):
"""Caches tape-friendly functions for higher-order gradients."""
# TODO(b/136189779): Cond/while under a tape may need similar logic. Consider
# generalizing if so.
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Forward and backward functions suitable for higher-order gradients.
Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by
this method accepts gradients for all of the outputs of the returned forward
function, including side outputs.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to all of its outputs, real and side. Returns
gradients with respect to the inputs.
"""
outputs = []
iteration_count = 0
# First we need to figure out how many side outputs from the forward pass
# will be required. We do this in a temporary graph to avoid actually
# running multiple copies of the backward pass (one per _GradientsHelper
# call).
#
# While computing gradients, the backward function captures Tensors from
# the forward function. We add these as side outputs of the original
# function. However, we then need to accept output gradients with respect
# to these side outputs for higher order gradients to work. Thus we loop
# until the number of outputs of the function stabilizes. Note that this
# is only required for tape gradients, where we need to declare in advance
# all of the forward op's outputs: symbolic gradients with tf.gradients
# instead rely on regenerating backward functions when higher-order
# gradients are requested.
while (len(outputs) < len(self._func_graph.outputs)
# It's possible for gradient generation to add new ops to the forward
# pass. If all of the new outputs are non-trainable, there's no
# reason to continue.
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
iteration_count += 1
if iteration_count >= 20 and iteration_count % 5 == 0:
new_op_with_trainable_output = None
num_new_trainable_outputs = 0
for output in self._func_graph.outputs[len(outputs):]:
if backprop_util.IsTrainable(output):
num_new_trainable_outputs += 1
new_op_with_trainable_output = output.op
logging.warning(
("Determining side outputs for the function '{}' is taking longer "
"than expected ({} iterations, typically this converges in 5 or "
"so). This could indicate that a gradient registration is adding "
"new ops to the forward pass every time gradients are generated. "
"{} new trainable output(s) were added this iteration, one from "
"the following op:\n {}\nThis may indicate a TensorFlow bug, or "
"an issue in a tf.custom_gradient.")
.format(
self._func_graph.name, iteration_count,
num_new_trainable_outputs, new_op_with_trainable_output))
outputs = list(self._func_graph.outputs)
self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
(forward_function, forward_graph,
backward_function, output_indices, num_output_tangents) = (
self._build_functions_for_outputs(
outputs, inference_args, input_tangents))
if (len(self._func_graph.outputs) > len(outputs)
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
raise errors.InternalError(
"Unexpectedly added new outputs to the forward function when "
"building the backward function: "
f"{self._func_graph.outputs[len(outputs):]}.")
return (forward_function, forward_graph, backward_function, output_indices,
num_output_tangents)
class _ForwardBackwardCall(object):
"""Holds the state of a function call between execution and recording."""
__slots__ = [
"_functions", "_inference_args", "_input_tangents", "_tape_watching"
]
def __init__(self, functions, inference_args, input_tangents, tape_watching):
"""Collects information about the function call.
Args:
functions: An object which produces forward and backward functions, either
a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
tape_watching: Boolean, with True indicating that recording is necessary.
"""
self._functions = functions
self._inference_args = inference_args
self._input_tangents = input_tangents
self._tape_watching = tape_watching
def forward(self):
"""Builds or retrieves a forward function for this call."""
forward_function = self._functions.forward(
self._inference_args, self._input_tangents)
return forward_function, self._inference_args + self._input_tangents
def record(self, flat_outputs):
"""Given outputs from the execution of `forward`, records the operation."""
if (self._tape_watching
and not isinstance(flat_outputs, ops.Operation)
and flat_outputs is not None):
# We only record function calls which have outputs, and then only when a
# tape is watching.
self._functions.record(
flat_outputs, self._inference_args, self._input_tangents)
# Sentinel value used by with ConcreteFunction's structured signature to
# indicate that a non-tensor parameter should use the value that was
# specified when the concrete function was created.
_BOUND_VALUE = object()
class ConcreteFunction(core.ConcreteFunction):
"""A `tf.types.experimental.ConcreteFunction` created from `tf.function`."""
def __init__(self,
func_graph,
attrs=None,
shared_func_graph=True,
function_spec=None):
"""Initialize a `ConcreteFunction`.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
shared_func_graph: If False, the ConcreteFunction takes ownership of
`func_graph` and will break reference cycles when it is deleted. This
makes the FuncGraph inoperable.
function_spec: FunctionSpec for the original function. If not specified,
then this ConcreteFunction may only be called using the flat signature.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
# _arg_keywords and _num_positional_args define the flat signature. They
# are assigned after construction.
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = func_graph
self._captured_inputs = self._func_graph.external_captures
self._captured_closures = self._func_graph.deferred_external_captures
# function_spec defines the structured signature.
self._set_function_spec(function_spec)
if attrs and IMPLEMENTS_ATTRIBUTE_NAME in attrs:
# The alternative is to silently drop "implements" tag
# but it seems likely it would lead to hard to catch bugs.
# Another alternative is to make func_body to preserve the order
# of arguments if variables are present. Yet another option
# is to automatically replace variables as arguments to functions
# to v.read_value() whenever "implements" tag is present
# Anytime we annotate existing function we probably want to wrap
# it with safe read_value for backward compatibility.
has_resource_vars = any(inp.dtype == dtypes.resource
for inp in self.inputs)
assert not any(
(has_resource_vars, self._captured_inputs, self._captured_closures)
), ('Function {name} has "{attr}={value}" attribute and thus can not '
"depend on any tensors outside of its signature or modify variables. "
"\n\nNote: variables are always captured and cause function "
"re-tracing for every variable called.\n"
" inputs: {inputs}\n captures: {captured}\n"
" closures: {closures}.\n\n"
"To pass a variable to such function use "
"use variable.read_value().".format(
name=func_graph.name,
attr=IMPLEMENTS_ATTRIBUTE_NAME,
value=attrs[IMPLEMENTS_ATTRIBUTE_NAME],
inputs=self.inputs,
captured=self._captured_inputs,
closures=self._captured_closures))
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
if shared_func_graph:
self._garbage_collector = None
else:
self._garbage_collector = ConcreteFunctionGarbageCollector(func_graph)
# Pairs of forward and backward functions used for computing gradients.
#
# These each get a reference to the FuncGraph deleter since they use the
# FuncGraph directly.
self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(
func_graph, self._attrs, self._garbage_collector)
self._first_order_tape_functions = {}
self._higher_order_tape_functions = {}
# Cache the inference function to avoid a (Python) function call when not
# building gradients.
self._inference_function = self._delayed_rewrite_functions.forward()
def _set_function_spec(self, function_spec):
"""Enables the structured signature by supplying a function_spec."""
self._function_spec = None
self._pre_initialized_function_spec = function_spec
# Note: when ConcreteFunctions are built by recreate_function() in
# function_deserialization.py, they don't have a structured_input_signature
# yet. In that case, _initialize_function_spec() gets called by
# _setup_functions_structures() in load.py.
if (function_spec is not None and
self.structured_input_signature is not None):
self._initialize_function_spec()
def _initialize_function_spec(self):
"""Updates `self._function_spec` to include varargs and bound variables.
Adds new positional arguments for any varargs (i.e., for args that are
in `structured_input_signature`, but not in the original fullargspec.args).
Replaces `defaults` and `kwonlydefaults` with the `_BOUND_VALUE`, for
all args and kwargs in `structured_input_signature`.
Sets `varkw` and `varargs` to None.
"""
if self._pre_initialized_function_spec is None:
return # e.g., SavedBareConcreteFunction doesn't have function_spec yet.
assert not self._function_spec, "already initialized"
function_spec = self._pre_initialized_function_spec
args = function_spec.fullargspec.args
arg_specs, kwarg_specs = self.structured_input_signature
vararg_indices = range(len(function_spec.arg_names), len(arg_specs))
fullargspec = tf_inspect.FullArgSpec(
args=list(args) + ["<arg{}>".format(i + 1) for i in vararg_indices],
varargs=None,
varkw=None,
defaults=[_BOUND_VALUE] * len(arg_specs),
kwonlyargs=list(sorted(kwarg_specs)),
kwonlydefaults=dict((k, _BOUND_VALUE) for k in kwarg_specs),
annotations=function_spec.fullargspec.annotations)
self._function_spec = FunctionSpec(
fullargspec,
function_spec.is_method,
function_spec.input_signature,
function_spec.is_pure,
name=self._func_graph.name)
@property
def variables(self):
"""Sequence of variables for this function."""
return tuple(self._func_graph.variables)
@property
def trainable_variables(self):
"""Sequence of trainable variables for this function."""
return tuple(self._func_graph.trainable_variables)
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
ConcreteFunctions have two signatures:
* The signature of the original function wrapped by this ConcreteFunction.
* A flat signature, where each argument accepts a single Tensor.
The original function signature is generally preferred, but the flat input
signature is supported for backward compatibility.
### Original Function Signature
When calling a ConcreteFunction with the signature of the original function,
each argument must match the type or value that was used when the
ConcreteFunction's graph was traced. In particular:
* Tensor arguments (including CompositeTensors, such as RaggedTensor) must
have matching `TypeSpec`s.
* Non-Tensor arguments (such as booleans or ints) must have equal values.
* Nested arguments (such as lists, tuples, or dictionaries) must have the
same nesting structure; and each nested value must have a matching type
or value.
The default value for any arguments that were traced with non-Tensor values
is the value that was used in the trace. Arguments that were traced with
tensor arguments do not have a default value (even if the original function
had a default value for that argument).
### Flat Signature
When calling a ConcreteFunction with the flat signature, the arguments
correspond to the flattened component tensors of the arguments that were
used to construct the ConcreteFunction. Parameter names are assigned based
on `TensorSpec.name` (when specified) or the original argument names (with
suffixes automatically added for nested arguments or composite tensors with
multiple components).
Args:
*args: Positional arguments to the concrete function.
**kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `ConcreteFunction` was not created through
`get_concrete_function`.
TypeError: If the arguments do not match the function's signature.
"""
return self._call_impl(args, kwargs)
def _call_impl(self, args, kwargs, cancellation_manager=None):
"""See `__call__` for details."""
with trace.Trace(self._func_graph.name, tf_function_call="concrete"):
# Construct the list of input tensors: check if the structured signature
# applies first; and if not, then use the flat signature.
if self._function_spec is not None:
try:
return self._call_with_structured_signature(args, kwargs,
cancellation_manager)
except TypeError as structured_err:
try:
return self._call_with_flat_signature(args, kwargs,
cancellation_manager)
except TypeError:
raise structured_err
return self._call_with_flat_signature(args, kwargs, cancellation_manager)
def _call_with_flat_signature(self, args, kwargs, cancellation_manager):
"""Executes the wrapped function with the flat signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
cancellation_manager: A `CancellationManager` that can be used to cancel
function invocation.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the flat signature of this
`ConcreteFunction`.
"""
if len(args) > self._num_positional_args:
raise TypeError(
f"{self._flat_signature_summary()} takes {self._num_positional_args} "
f"positional arguments, got {len(args)}.")
args = list(args)
kwargs = dict(kwargs)
for keyword in self._arg_keywords[len(args):]:
try:
args.append(kwargs.pop(compat.as_str(keyword)))
except KeyError:
specified_keywords = (
list(self._arg_keywords[:len(args)]) + list(kwargs.keys()))
missing_required_args = sorted(
set(self._arg_keywords) - set(specified_keywords))
raise TypeError(f"{self._flat_signature_summary()} missing required "
f"arguments: {', '.join(missing_required_args)}.")
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError(f"{self._flat_signature_summary()} got two values "
f"for '{unused_key}'.")
raise TypeError(f"{self._flat_signature_summary()} got unexpected "
f"keyword arguments: {', '.join(sorted(kwargs))}.")
for i, arg in enumerate(args):
if not isinstance(
arg, (ops.Tensor, resource_variable_ops.BaseResourceVariable)):
raise TypeError(f"{self._flat_signature_summary()}: expected argument "
f"#{i}(zero-based) to be a Tensor; "
f"got {type(arg).__name__} ({arg}).")
return self._call_flat(args, self.captured_inputs, cancellation_manager)
def _call_with_structured_signature(self, args, kwargs, cancellation_manager):
"""Executes the wrapped function with the structured signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
cancellation_manager: A `CancellationManager` that can be used to cancel
function invocation.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the structured signature
of this `ConcreteFunction`.
"""
args, kwargs, _, filtered_flat_args = \
self._function_spec.canonicalize_function_inputs(*args, **kwargs)
self._structured_signature_check_missing_args(args, kwargs)
self._structured_signature_check_unexpected_args(args, kwargs)
self._structured_signature_check_arg_types(args, kwargs)
return self._call_flat(
filtered_flat_args,
captured_inputs=self.captured_inputs,
cancellation_manager=cancellation_manager)
def _structured_signature_check_missing_args(self, args, kwargs):
"""Raises a TypeError if any args are missing."""
arg_specs, kwarg_specs = self.structured_input_signature
missing_arguments = []
for i, (arg, spec) in enumerate(zip(args, arg_specs)):
if arg is _BOUND_VALUE and _contains_type_spec(spec):
missing_arguments.append(self._function_spec.arg_names[i])
for (name, arg) in kwargs.items():
if arg is _BOUND_VALUE and _contains_type_spec(kwarg_specs[name]):
missing_arguments.append(name)
if missing_arguments:
raise TypeError(f"{self._structured_signature_summary()} missing "
"required arguments: "
f"{', '.join(sorted(missing_arguments))}.")
def _structured_signature_check_unexpected_args(self, args, kwargs):
"""Raises a TypeError if there are any extra args."""
arg_specs, kwarg_specs = self.structured_input_signature
if len(args) > len(arg_specs):
raise TypeError(
f"{self._structured_signature_summary()} takes "
f"{len(self._function_spec.arg_names)} positional arguments but got "
f"{len(args)}.")
if len(kwargs) > len(kwarg_specs):
extra_args = set(kwargs) - set(kwarg_specs)
raise TypeError(f"{self._structured_signature_summary()} got unexpected "
f"keyword arguments: {', '.join(extra_args)}.")
def _structured_signature_check_arg_types(self, args, kwargs):
"""Raises a TypeError if any args have the wrong type."""
# Check argument types
arg_specs, kwarg_specs = self.structured_input_signature
for i, (arg, spec) in enumerate(zip(args, arg_specs)):
name = self._function_spec.arg_names[i]
self._structured_signature_check_arg_type(arg, spec, name)
for (name, arg) in kwargs.items():
self._structured_signature_check_arg_type(arg, kwarg_specs[name], name)
def _structured_signature_check_arg_type(self, arg, spec, name):
"""Raise TypeError if `arg`'s type doesn't match `spec`."""
if arg is _BOUND_VALUE:
return
# Check the overall nested structure of the argument.
try:
nest.assert_same_structure(arg, spec, expand_composites=True)
except (ValueError, TypeError):
try:
nest.assert_same_structure(arg, spec, expand_composites=False)
expected, got = spec, arg
except (ValueError, TypeError):
expected, got = _structure_summary(spec), _structure_summary(arg)
raise TypeError(f"{self._structured_signature_summary()}: argument "
f"{name} had incorrect type\n"
f" expected: {expected}\n"
f" got: {got}")
# Check the type for each leaf in the nested structure.
arg_pieces = nest.flatten(arg, expand_composites=True)
spec_pieces = nest.flatten(spec, expand_composites=True)
for (arg_piece, spec_piece) in zip(arg_pieces, spec_pieces):
# TODO(mdan): Use consistent error messages.
if isinstance(spec_piece, tensor_spec.DenseSpec):
# TODO(edloper): Consider calling convert_to_tensor on non-tensor
# values here. That would match the behavior of
# _call_concrete_function() in function_deserialization.py. If
# we do, then we need to change the nest assert_same_structure and
# flatten calls above to use shallow variants.
tensor_types = (ops.Tensor, resource_variable_ops.BaseResourceVariable)
if not isinstance(arg_piece, tensor_types):
raise TypeError(f"{self._structured_signature_summary()} expected a "
f"Tensor in {name}, but got "
f"{type(arg_piece).__name__} value {arg_piece}.")
elif arg_piece is not _BOUND_VALUE:
try:
arg_matches_spec = bool(arg_piece == spec_piece)
except (ValueError, TypeError):
logging.vlog(1, "Error matching value with spec", exc_info=True)
arg_matches_spec = False
if not arg_matches_spec:
raise TypeError(
f"ConcreteFunction {self._structured_signature_summary()} was "
f"constructed with {type(spec_piece).__name__} value "
f"{spec_piece} in {name}, but was called with "
f"{type(arg_piece).__name__} value {arg_piece}.")
def _call_flat(self, args, captured_inputs, cancellation_manager=None):
"""Executes the wrapped function.
Args:
args: a list of Tensors or Variables. Arguments from the Python function
should be filtered before calling this method: objects aside from
Tensors, CompositeTensors, and Variables are ignored. Any
CompositeTensors should be expanded before calling this method.
captured_inputs: the captured inputs that are also part of the input args
to the actual execution. By default, it should be self._captured_inputs.
cancellation_manager: (Optional.) A `CancellationManager` that can be
used to cancel function invocation.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
ctx = context.context()
executing_eagerly = ctx.executing_eagerly()
# Copy saveable status of function's graph to current FuncGraph.
default_graph = ops.get_default_graph()
if default_graph.building_function and not self._func_graph.saveable:
default_graph.mark_as_unsaveable(self._func_graph.saving_errors)
if (tape.could_possibly_record() or
hasattr(default_graph, "watch_variable")):
for v in self._func_graph.variables:
resource_variable_ops.variable_accessed(v)
tensor_inputs = []
variables_used = set([])
for i, arg in enumerate(args):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# We can pass a variable more than once, and in this case we need to
# pass its handle only once.
if id(arg.handle) in variables_used:
continue
resource_variable_ops.variable_accessed(arg)
tensor_inputs.append(arg.handle)
variables_used.add(id(arg.handle))
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
if not executing_eagerly:
# If we're graph building, shape inference is on. We check for input
# compatibility up front to avoid hard to debug incompatibilities
# later.
graph_input_shape = tensor_shape.TensorShape(
self._func_graph.inputs[i].shape)
if not graph_input_shape.is_compatible_with(arg.shape):
if self._arg_keywords:
arg_name = "'{}'".format(self._arg_keywords[i])
else:
arg_name = "with index {}".format(i)
raise ValueError(
f"The argument {arg_name} (value {arg}) is not compatible with "
"the shape this function was traced with. Expected shape "
f"{self._func_graph.inputs[i].shape}, but got shape "
f"{arg.shape}.\n\nIf you called get_concrete_function, you may "
"need to pass a tf.TensorSpec(..., shape=...) with a less "
"specific shape, having None on axes which can vary.")
else:
raise ValueError(f"{i:d}-th input {arg} must be a Tensor, got "
f"{type(arg)} when calling {self._func_graph.name}.")
args = tensor_inputs + captured_inputs
possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
and executing_eagerly):
# No tape is watching; skip to running the function.
return self._build_call_outputs(self._inference_function.call(
ctx, args, cancellation_manager=cancellation_manager))
forward_backward = self._select_forward_and_backward_functions(
args,
possible_gradient_type,
executing_eagerly)
forward_function, args_with_tangents = forward_backward.forward()
if executing_eagerly:
flat_outputs = forward_function.call(
ctx, args_with_tangents, cancellation_manager=cancellation_manager)
else:
with default_graph._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": self._get_gradient_function(),
"StatefulPartitionedCall": self._get_gradient_function()}):
flat_outputs = forward_function.call(ctx, args_with_tangents)
forward_backward.record(flat_outputs)
return self._build_call_outputs(flat_outputs)
def _experimental_with_cancellation_manager(self, cancellation_manager):
"""Returns a callable that invokes a cancellable version of this function.
Args:
cancellation_manager: A `CancellationManager` object that can be used to
cancel function invocation.
Returns:
A callable with the same signature as this concrete function.
"""
def cancellable_call(*args, **kwargs):
return self._call_impl(
args, kwargs, cancellation_manager=cancellation_manager)
return cancellable_call
@property
def name(self):
"""`ConcreteFunction` name."""
return self._delayed_rewrite_functions.forward().name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def structured_input_signature(self):
"""Returns structured signature for this concrete function.
Returns:
A tuple `(args, kwargs)`, where:
* `args` is a tuple that specifies the expected type or value each for
positional argument.
* `kwargs` is a dictionary that specifies the expected type or value
for each keyword-only argument.
The type or value for each argument is specified using one of the
following:
* A `tf.TypeSpec`, indicating that a Tensor or other TensorFlow-native
value is expected.
* A Python value, such as an integer, indicating that an equal value
is expected.
* A nested structure of `tf.TypeSpec`s and Python values, indicating
that a corresponding nested structure is expected.
"""
return self._func_graph.structured_input_signature
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to returned tensors."""
return self._func_graph.outputs
@property
def structured_outputs(self):
"""Returns outputs in `self.graph` as returned by the original function."""
return self._func_graph.structured_outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
from_closures = nest.flatten([x() for x in self._captured_closures],
expand_composites=True)
return self._captured_inputs + from_closures
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._delayed_rewrite_functions.forward().definition
@property
def output_shapes(self):
"""The function's output shapes."""
return nest.map_structure(
lambda x: getattr(x, "shape", tensor_shape.TensorShape(None)),
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(
lambda x: x.dtype if x is not None else None,
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
def add_to_graph(self, g=None):
"""Registers the function, adds it to the graph g or default graph.
Args:
g: If specified, registers the function with this graph. Defaults to the
current context (either the default graph or the eager context).
"""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
self._delayed_rewrite_functions.forward().add_to_graph(g)
def add_gradient_functions_to_graph(self, g=None):
"""Add forward/backward functions to graph `g` or the current context."""
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
self._delayed_rewrite_functions.forward().add_to_graph(g)
forward_function, backward_function = (
self._delayed_rewrite_functions.forward_backward())
forward_function.add_to_graph(g)
backward_function.add_to_graph(g)
def _get_gradient_function(self):
"""Returns gradient function. It will be lazily created at first call."""
return self._delayed_rewrite_functions._rewrite_forward_and_call_backward # pylint: disable=protected-access
def _select_forward_and_backward_functions(
self, args, possible_gradient_type, executing_eagerly):
"""Selects forward and backward functions based on the calling context.
The forward function computes the "real" function outputs, `self._outputs`,
and any extra values needed by the corresponding backward function.
Args:
args: A flat list of Tensors with all of the inputs to the forward
function (including user-specified and captured inputs).
possible_gradient_type: One of gradients_util.POSSIBLE_GRADIENT_TYPES_*.
executing_eagerly: Boolean, the value of context.executing_eagerly().
Returns:
An object with a `forward` method returning a tuple of (forward_function :
_EagerDefinedFunction, augmented_arguments : List), and a corresponding
`record` method which takes outputs from the forward function and records
the operation. forward_function should be called with augmented_arguments.
"""
if executing_eagerly:
input_tangents = forwardprop_util.pack_tangents(args)
else:
input_tangents = forwardprop_util.TangentInfo()
need_gradients_for_jvps = tape.should_record_backprop(
input_tangents.tangents)
# Allows re-use of forward and backward function pairs depending on the
# tapes and forward accumulators watching its inputs.
cache_key = (need_gradients_for_jvps, input_tangents.indices)
if (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_FIRST_ORDER):
if input_tangents.indices or executing_eagerly:
# There is a single non-persistent tape active, so the user can only
# request first-order gradients from a tape. We can spend less time
# graph building since we know this.
#
# We may still end up computing higher-order gradients, but that'd be
# through `tf.gradients`, which can re-write the forward pass and so
# needs no preparation here.
functions = self._first_order_tape_functions.get(cache_key, None)
if functions is None:
functions = _FirstOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._first_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(
functions, args, input_tangents.tangents, tape_watching=True)
else:
# We can avoid computing second-order gradients in some cases by doing a
# delayed rewrite when graph building. Since we know we'll only compute
# first-order tape gradients, the delayed rewrite is safe: we won't need
# to tell the tape about side outputs.
#
# TODO(allenl): This case is really dirty. It would be better if we
# could temporarily pop all of the current tapes to avoid
# accidentally taking second-order gradients.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=True)
elif (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER):
# Either there's a persistent tape watching, or there are multiple nested
# tapes. Either way, the user may request higher-order gradients. We'll
# spend a bit more time and make sure higher-order gradients are correct.
functions = self._higher_order_tape_functions.get(
cache_key, None)
if functions is None:
functions = _HigherOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._higher_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(functions, args, input_tangents.tangents,
tape_watching=True)
# else possible_gradient_type == POSSIBLE_GRADIENT_TYPES_NONE, meaning no
# tape is recording.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=False)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
# TODO(jlchu): call C++ version in function.cc when speed is improved
if self._func_graph.structured_outputs is None:
return result
# Replace outputs with results, skipping over any 'None' values.
outputs_list = nest.flatten(
self._func_graph.structured_outputs, expand_composites=True)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
handle_data_util.copy_handle_data(self.outputs[j], result[j])
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list, expand_composites=True)
return ret
@property
def _as_name_attr_list(self):
"""Returns a `NameAttrList` representing this function."""
ret = attr_value_pb2.NameAttrList(name=self.name)
for name, value in self._attrs.items():
ret.attr[name].CopyFrom(value)
return ret
def _structured_signature_summary(self, default_values=False):
"""Returns a string summarizing this function's structured signature.
Args:
default_values: If true, then include default values in the signature.
Returns:
A `string`.
"""
# Note: we can't just use self._funcion_spec.signature_summary(), because
# that would show "_BOUND_VALUE" as the default value for all arguments.
assert self._function_spec is not None
arg_specs, kwarg_specs = self.structured_input_signature
arg_names = list(self._function_spec.arg_names)
# If an explicit input_signature is provided to @tf.function, then any
# arguments with defaults that are not covered by that explicit signature
# are simply dropped from the signature.
# TODO(b/159639913) Look into whether dropping arguments with default values
# from the signature is the right thing to do.
arg_names = arg_names[:len(arg_specs)]
if default_values:
for i in range(len(arg_names)):
if not _contains_type_spec(arg_specs[i]):
arg_names[i] += "={}".format(arg_specs[i])
if kwarg_specs:
arg_names.append("*")
for name, spec in kwarg_specs.items():
arg_names.append(name)
if default_values and not _contains_type_spec(spec):
arg_names[-1] += "={}".format(spec)
signature = f"{self._func_graph.name}({', '.join(arg_names)})"
return signature
def _flat_signature_summary(self):
"""Returns a string summarizing this function's flat signature."""
assert self._arg_keywords is not None
assert self._num_positional_args is not None
arg_names = self._arg_keywords
if self._num_positional_args > len(arg_names):
arg_names.extend(
"<arg{}>".format(i + 1)
for i in range(len(arg_names), self._num_positional_args))
return f"{self._func_graph.name}({', '.join(arg_names)})"
def pretty_printed_signature(self, verbose=True):
"""Returns a string summarizing the signature of this concrete function."""
if not verbose:
return self._structured_signature_summary(default_values=True)
def pretty_print_spec(spec):
"""Returns a string describing the spec for a single argument."""
if isinstance(spec, tensor_spec.TensorSpec):
return "{} Tensor, shape={}".format(spec.dtype.name, spec.shape)
elif nest.is_sequence(spec):
pieces = nest.flatten(spec, expand_composites=False)
markers = [_Marker("<{}>".format(i + 1)) for i in range(len(pieces))]
structure = nest.pack_sequence_as(spec, markers)
# Ensure dictionaries are sorted by key (for determinism)
result = pprint.pformat(structure, width=10000)
for (marker, piece) in zip(markers, pieces):
result += "\n {}: {}".format(marker, pretty_print_spec(piece))
return result
else:
return repr(spec)
lines = [self._structured_signature_summary(default_values=True)]
arg_specs, kwarg_specs = self.structured_input_signature
names = list(self._function_spec.arg_names)
# If an explicit input_signature is provided to @tf.function, then any
# arguments with defaults that are not covered by that explicit signature
# are simply dropped from the signature.
# TODO(b/159639913) Look into whether dropping arguments with default values
# from the signature is the right thing to do.
# Note: we can skip bound args, since we already displayed their bound
# value in the signature summary.
arg_details = []
for (name, spec) in zip(names[:len(arg_specs)], list(arg_specs)):
if _contains_type_spec(spec):
arg_details.append(" {}: {}".format(name, pretty_print_spec(spec)))
if kwarg_specs:
for kwarg in sorted(kwarg_specs):
spec = kwarg_specs[kwarg]
if _contains_type_spec(spec):
arg_details.append(" {}: {}".format(
kwarg, pretty_print_spec(spec)))
if arg_details:
lines.append(" Args:")
lines.extend(arg_details)
lines.append(" Returns:")
def spec_from_value(value):
# For loaded function, structured_outputs are already specs.
if isinstance(value, type_spec.TypeSpec):
return value
return type_spec.type_spec_from_value(value)
lines.append(" {}".format(
pretty_print_spec(
nest.map_structure(spec_from_value, self.structured_outputs))))
return "\n".join(lines)
def __repr__(self):
if self._function_spec is not None:
return "<ConcreteFunction {} at 0x{:X}>".format(
self.pretty_printed_signature(verbose=False), id(self))
elif not (self._num_positional_args is None or self._arg_keywords is None):
return "<ConcreteFunction {} at 0x{:X}>".format(
self._flat_signature_summary(), id(self))
else:
return object.__repr__(self)
def __str__(self):
if self._function_spec is not None:
return "ConcreteFunction {}".format(self.pretty_printed_signature())
else:
return self.__repr__()
_pywrap_utils.RegisterType("Tensor", ops.Tensor)
_pywrap_utils.RegisterType("EagerTensor", ops.EagerTensor)
_pywrap_utils.RegisterType("IndexedSlices", ops.IndexedSlices)
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class FunctionSpec(object):
"""Specification of how to bind arguments to a function."""
@staticmethod
def from_function_and_signature(python_function,
input_signature,
is_pure=False,
experimental_follow_type_hints=False,
jit_compile=None):
"""Create a FunctionSpec instance given a python function and signature.
Args:
python_function: a function to inspect
input_signature: a signature of the function (None, if variable)
is_pure: if True all input arguments (including variables and constants)
will be converted to tensors and no variable changes allowed.
experimental_follow_type_hints: see `tf.function`
jit_compile: see `tf.function`
Returns:
instance of FunctionSpec
"""
fullargspec = tf_inspect.getfullargspec(python_function)
if (input_signature is not None and
set(fullargspec.kwonlyargs) - set(fullargspec.kwonlydefaults or ())):
nodefault_kwonlyargs = set(fullargspec.kwonlyargs)
if fullargspec.kwonlydefaults is not None:
nodefault_kwonlyargs -= set(fullargspec.kwonlydefaults)
raise ValueError("Cannot build TF function from "
f"{python_function.__name__}: keyword-only arguments "
"must have default values when input_signature is "
"provided. Got keyword-only arguments without default "
f"values: {sorted(nodefault_kwonlyargs)}.")
# Checks if the `fullargspec` contains self or cls as its first argument.
is_method = tf_inspect.isanytargetmethod(python_function)
# Treat a wrapped partial function as a special case. For all arguments that
# were overridden with keywords in the partial:
# - remove the corresponding arguments,
# - remove the corresponding keywords.
_, unwrapped = tf_decorator.unwrap(python_function)
if isinstance(unwrapped, functools.partial):
# Also consider the Python3 case with kwonlydefaults.
if fullargspec.defaults or fullargspec.kwonlydefaults:
new_defaults = fullargspec.defaults
new_args = fullargspec.args
if fullargspec.defaults:
# To be able to canonicalize the function properly, we want to ignore
# default values that are overridden via a partial kwarg. For example:
#
# def func(a, b, c, d=5, e=7):
# return a, b, c, d, e
# p_func = tf.function(functools.partial(func, 10, e=9))
#
# Here we want to drop from the defaults the parameter `e`. If we
# forwarded the call to the partial function with a default for `e`
# we would get an error for passing two values for one parameter.
#
# Note that this has a limitation: we can only override parameters at
# the end of the parameter list.
#
# In this case we want to end up with 3 arguments (b, c, d) and 1
# default value (5). We do this by constructing a mask where 0 stands
# for a value that was overridden by a partial kwarg. The seemingly
# complicated logic below does just that - for arguments (b, c, d, e)
# we would get a mask (1, 1, 1, 0).
old_args = fullargspec.args
old_defaults = fullargspec.defaults
no_default = object()
num_args_without_defaults = len(old_args) - len(old_defaults)
left_padding = tuple([no_default] * num_args_without_defaults)
args_with_defaults = zip(old_args, left_padding + old_defaults)
# Create a mask where 0 stands for args that had a partial kwarg
# defined.
non_keyword_defaults_mask = [
0 if key in unwrapped.keywords else 1 for key in old_args
]
# Keep only arguments and defaults that were not kwargs of partial.
new_args_with_defaults = list(
itertools.compress(args_with_defaults, non_keyword_defaults_mask))
# Keep all args.
new_args = [arg for arg, _ in new_args_with_defaults]
# Keep only real default values.
new_defaults = [
default for _, default in new_args_with_defaults
if default is not no_default
]
fullargspec = tf_inspect.FullArgSpec(
args=new_args,
varargs=fullargspec.varargs,
varkw=fullargspec.varkw,
defaults=new_defaults,
kwonlyargs=[],
kwonlydefaults={},
annotations=fullargspec.annotations)
# Get the function's name. Remove functools.partial wrappers if necessary.
while isinstance(python_function, functools.partial):
python_function = python_function.func
name = getattr(python_function, "__name__", "f")
return FunctionSpec(
fullargspec,
is_method,
input_signature,
is_pure=is_pure,
jit_compile=jit_compile,
experimental_follow_type_hints=experimental_follow_type_hints,
name=name)
def __init__(self,
fullargspec,
is_method,
input_signature,
is_pure=False,
experimental_follow_type_hints=False,
name=None,
jit_compile=None):
"""Constructs a FunctionSpec describing a python function.
Args:
fullargspec: `tf_inspect.FullArgSpec` object describing the function.
is_method: True if the function is a method.
input_signature: a signature of the function (None, if variable)
is_pure: if True all input arguments (including variables and constants)
will be converted to tensors and no variable changes allowed.
experimental_follow_type_hints: see `tf.function`.
name: Name of the function
jit_compile: see `tf.function`.
"""
self._fullargspec = fullargspec
self._is_method = is_method
self._is_pure = is_pure
self._jit_compile = jit_compile
self._experimental_follow_type_hints = experimental_follow_type_hints
# TODO(edloper): Include name when serializing for SavedModel?
self._name = name or "f"
if self._is_method:
# Remove `self`: default arguments shouldn't be matched to it.
# TODO(b/127938157): Should this error out if there is no arg to
# be removed?
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
self._arg_names = args
# A cache mapping from arg index to default value, for canonicalization.
default_values = fullargspec.defaults
offset = len(args) - len(default_values or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(default_values or [])
}
self._arg_indices_no_default_values = set(range(len(args))) - set(
self._arg_indices_to_default_values)
if input_signature is None:
self._input_signature = None
else:
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature,
expand_composites=True))
@property
def fullargspec(self):
return self._fullargspec
@property
def is_method(self):
return self._is_method
@property
def args_to_indices(self):
return self._args_to_indices
@property
def kwargs_to_include(self):
return self._kwargs_to_include
@property
def input_signature(self):
return self._input_signature
@property
def flat_input_signature(self):
return self._flat_input_signature
@property
def is_pure(self):
return self._is_pure
@property
def jit_compile(self):
return self._jit_compile
@property
def arg_names(self):
return self._arg_names
@property
def vararg_name(self):
return self._fullargspec.varargs
@property
def varkw_name(self):
return self._fullargspec.varkw
def signature_summary(self, default_values=False):
"""Returns a string summarizing this function's signature.
Args:
default_values: If true, then include default values in the signature.
Returns:
A `string`.
"""
args = list(self._arg_names)
if default_values:
for (i, default) in self._arg_indices_to_default_values.items():
args[i] += "={}".format(default)
if self._fullargspec.kwonlyargs:
args.append("*")
for arg_name in self._fullargspec.kwonlyargs:
args.append(arg_name)
if default_values and arg_name in self._fullargspec.kwonlydefaults:
args[-1] += "={}".format(self._fullargspec.kwonlydefaults[arg_name])
return f"{self._name}({', '.join(args)})"
def _to_tensor_or_tensor_spec(self, x):
return (x if isinstance(x, (ops.Tensor, tensor_spec.TensorSpec))
else ops.convert_to_tensor(x))
def _convert_variables_to_tensors(self, args, kwargs):
args = [self._to_tensor_or_tensor_spec(x) for x in args]
kwargs = {kw: self._to_tensor_or_tensor_spec(x)
for kw, x in kwargs.items()}
return tuple(args), kwargs
def _convert_annotated_args_to_tensors(self, args, kwargs):
"""Attempts to autobox arguments annotated as tf.Tensor."""
if self.input_signature is not None:
return
args = list(args)
for i, arg in enumerate(args):
# See
# https://docs.python.org/3/library/inspect.html#inspect.getfullargspec
if i < len(self._fullargspec.args):
annotation_key = self._fullargspec.args[i]
else:
annotation_key = self._fullargspec.varargs
arg_annotation = self._fullargspec.annotations.get(annotation_key, None)
# TODO(rahulkamat): Change to TensorLike (here ans below)
if arg_annotation == ops.Tensor:
args[i] = self._to_tensor_or_tensor_spec(arg)
for kw, v in kwargs.items():
if kw in self._fullargspec.kwonlyargs or kw in self._fullargspec.args:
annotation_key = kw
else:
annotation_key = self._fullargspec.varkw
kwarg_annotation = self._fullargspec.annotations.get(annotation_key, None)
if kwarg_annotation == ops.Tensor:
kwargs[kw] = self._to_tensor_or_tensor_spec(v)
return tuple(args), kwargs
def _validate_inputs(self, flat_inputs):
"""Raises an error if inputs contain illegal values."""
for inp in flat_inputs:
# TODO(b/183107079): Allow these once they're handled properly.
if isinstance(inp, weakref.ref):
raise ValueError(
f"weakref input {inp} not supported for function {self._name}")
def canonicalize_function_inputs(self, *args, **kwargs):
"""Canonicalizes `args` and `kwargs`.
Canonicalize the inputs to the Python function using a `FunctionSpec`
instance. In particular, we parse the varargs and kwargs that the
original function was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs. Missing default arguments are added.
If this `FunctionSpec` has an input signature, then it is used to convert
arguments to tensors; otherwise, any inputs containing numpy arrays are
converted to tensors.
Additionally, any inputs containing numpy arrays are converted to Tensors.
Args:
*args: The varargs this object was called with.
**kwargs: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs, as well as full and filtered
(Tensors and Variables only) versions of their concatenated flattened
representations, represented by a tuple in the form (args, kwargs,
flat_args, filtered_flat_args). Here: `args` is a full list of bound
arguments, and `kwargs` contains only true keyword arguments, as opposed
to named arguments called in a keyword-like fashion.
Raises:
ValueError: If a keyword in `kwargs` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
if self._is_pure:
args, kwargs = self._convert_variables_to_tensors(args, kwargs)
if self._experimental_follow_type_hints:
args, kwargs = self._convert_annotated_args_to_tensors(args, kwargs)
# Pre-calculate to reduce overhead
arglen = len(args)
if self._input_signature is not None:
if arglen > len(self._input_signature):
raise TypeError(f"{self.signature_summary()} specifies "
f"{len(self._input_signature)} positional arguments, "
f"but got {arglen}.")
for arg in six.iterkeys(kwargs):
index = self._args_to_indices.get(arg, None)
if index is None:
raise TypeError(f"{self.signature_summary()} got unexpected keyword "
f"argument `{arg}`.")
if index >= len(self._input_signature):
raise TypeError(
f"{self.signature_summary()} got keyword argument `{arg}` that "
"was not included in input_signature.")
if not kwargs:
inputs = args
if self._arg_indices_to_default_values:
try:
inputs += tuple(self._arg_indices_to_default_values[i]
for i in range(arglen, len(self._arg_names)))
except KeyError:
missing_args = [
self._arg_names[i]
for i in range(arglen, len(self._arg_names))
if i not in self._arg_indices_to_default_values
]
raise TypeError(f"{self.signature_summary()} missing required "
f"arguments: {', '.join(missing_args)}.")
if self._fullargspec.kwonlydefaults:
kwargs.update(self._fullargspec.kwonlydefaults)
else:
# Maps from index of arg to its corresponding value, according to `args`
# and `kwargs`; seeded with the default values for the named args that
# aren't in `args`.
arg_indices_to_values = {
index: default for index, default in six.iteritems(
self._arg_indices_to_default_values) if index >= arglen
}
consumed_args = []
missing_arg_indices = self._arg_indices_no_default_values - set(
range(arglen))
for arg, value in six.iteritems(kwargs):
index = self._args_to_indices.get(arg, None)
if index is not None:
if index < arglen:
raise TypeError(f"{self.signature_summary()} got two values for "
f"{arg!r}.")
arg_indices_to_values[index] = value
# These arguments in 'kwargs' might also belong to
# positional arguments
missing_arg_indices.discard(index)
consumed_args.append(arg)
for arg in consumed_args:
# After this loop, `kwargs` will only contain keyword_only arguments,
# and all positional_or_keyword arguments have been moved to `inputs`.
kwargs.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
# Exclude positional args with values
if missing_arg_indices:
missing_args = [self._arg_names[i] for i in sorted(missing_arg_indices)]
if len(missing_args) == 1:
raise TypeError(f"{self.signature_summary()} missing 1 required "
f"argument: {missing_args[0]}.")
else:
raise TypeError(f"{self.signature_summary()} missing required "
f"arguments: {', '.join(missing_args)}.")
if kwargs and self._input_signature is not None:
raise TypeError("Keyword arguments are not supported when "
"input_signature is provided. Signature: "
f"{self.signature_summary()}. Keyword arguments: "
f"{kwargs}.")
if self._fullargspec.kwonlydefaults:
for (kwarg, default) in self._fullargspec.kwonlydefaults.items():
kwargs.setdefault(kwarg, default)
if self._input_signature is None:
inputs, flat_inputs, filtered_flat_inputs = _convert_numpy_inputs(inputs)
kwargs, flat_kwargs, filtered_flat_kwargs = _convert_numpy_inputs(kwargs)
flat_inputs += flat_kwargs
filtered_flat_inputs += filtered_flat_kwargs
else:
inputs, flat_inputs, filtered_flat_inputs = _convert_inputs_to_signature(
inputs, self._input_signature, self._flat_input_signature)
self._validate_inputs(flat_inputs)
return inputs, kwargs, flat_inputs, filtered_flat_inputs
def _as_ndarray(value):
"""Converts value to an ndarray, assumes _is_ndarray(value)."""
# TODO(tomhennigan) Support __array_interface__ too.
return value.__array__()
def _is_ndarray(value):
"""Tests whether the given value is an ndarray (and not a TF tensor/var)."""
# TODO(tomhennigan) Support __array_interface__ too.
return hasattr(value, "__array__") and not (
isinstance(value, ops.Tensor)
or isinstance(value, resource_variable_ops.BaseResourceVariable)
or hasattr(value, "_should_act_as_resource_variable")
# For legacy reasons we do not automatically promote Numpy strings.
or isinstance(value, np.str_)
# NumPy dtypes have __array__ as unbound methods.
or isinstance(value, type)
# CompositeTensors should be flattened instead.
or isinstance(value, composite_tensor.CompositeTensor))
def _convert_numpy_inputs(inputs):
"""Convert numpy array inputs to tensors."""
# We assume that any CompositeTensors have already converted their components
# from numpy arrays to Tensors, so we don't need to expand composites here for
# the numpy array conversion. Instead, we do so because the flattened inputs
# are eventually passed to ConcreteFunction()._call_flat, which requires
# expanded composites.
flat_inputs = nest.flatten(inputs, expand_composites=True)
# Check for NumPy arrays in arguments and convert them to Tensors.
# TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps
# finding a way to store them directly in the cache key (currently not
# possible since ndarrays are not hashable).
need_packing = False
filtered_flat_inputs = []
for index, value in enumerate(flat_inputs):
if isinstance(value,
(ops.Tensor, resource_variable_ops.BaseResourceVariable)):
filtered_flat_inputs.append(value)
elif hasattr(value, "__array__") and not (
hasattr(value, "_should_act_as_resource_variable") or
isinstance(value, (np.str_, type, composite_tensor.CompositeTensor))):
# This case is equivalent to _is_ndarray(value) == True
a = _as_ndarray(value)
if not isinstance(a, np.ndarray):
raise TypeError(f"The output of __array__ must be an np.ndarray, "
f"got {type(a)} from {value}.")
flat_inputs[index] = constant_op.constant(a)
filtered_flat_inputs.append(flat_inputs[index])
need_packing = True
if need_packing:
return (nest.pack_sequence_as(
structure=inputs, flat_sequence=flat_inputs,
expand_composites=True), flat_inputs, filtered_flat_inputs)
else:
return inputs, flat_inputs, filtered_flat_inputs
def _convert_inputs_to_signature(inputs, input_signature, flat_input_signature):
"""Convert inputs to pass into a function with an explicit signature."""
def format_error_message(inputs, input_signature):
return (" inputs: (\n" + " " + ",\n ".join(str(i) for i in inputs) +
")\n" + " input_signature: (\n" + " " +
",\n ".join(str(i) for i in input_signature) + ")")
try:
flatten_inputs = nest.flatten_up_to(
input_signature,
inputs[:len(input_signature)],
expand_composites=True,
check_types=False) # lists are convert to tuples for `tf.data`.
except ValueError:
raise ValueError("Structure of Python function inputs does not match "
"input_signature:\n"
f"{format_error_message(inputs, input_signature)}.")
need_packing = False
for index, (value, spec) in enumerate(zip(flatten_inputs,
flat_input_signature)):
if (isinstance(spec, tensor_spec.TensorSpec) and
not _pywrap_utils.IsTensor(value)):
try:
flatten_inputs[index] = ops.convert_to_tensor(
value, dtype_hint=spec.dtype)
need_packing = True
except ValueError:
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be convertible to "
"tensors:\n"
f"{format_error_message(inputs, input_signature)}.")
if any(not spec.is_compatible_with(other) for spec, other in zip(
flat_input_signature,
flatten_inputs)):
raise ValueError("Python inputs incompatible with input_signature:\n"
f"{format_error_message(inputs, input_signature)}.")
if need_packing:
inputs = nest.pack_sequence_as(
structure=input_signature,
flat_sequence=flatten_inputs,
expand_composites=True)
flat_inputs = nest.flatten(inputs, expand_composites=True)
return (inputs, flat_inputs, [
t for t in flat_inputs
if isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))
])
class FunctionCache(object):
"""A lightweight container for cached functions.
"""
__slots__ = [
"missed", "primary", "arg_relaxed_specs", "arg_relaxed",
"_garbage_collectors"
]
def __init__(self):
# The set of functions that have been missed; entries are CacheKey with
# input_signature `None` (e.g. a "call context key")
self.missed = set()
# The primary cache, mapping a fully shaped CacheKey to a function.
self.primary = collections.OrderedDict()
# A cache key lookup, mapping a CacheKey generated without shape info to a
# flat list of `TypeSpec`s with relaxed shapes (one for each flattened
# argument). Arguments that are not Tensors or `CompositeTensor`s contain a
# `None` for the corresponding relaxed spec.
self.arg_relaxed_specs = collections.OrderedDict()
# The secondary cache, mapping a CacheKey generated without shape info to a
# function.
self.arg_relaxed = collections.OrderedDict()
# All OrderedDicts require manual garbage collection.
self._garbage_collectors = [
_FunctionGarbageCollector(self.primary),
_FunctionGarbageCollector(self.arg_relaxed),
_FunctionGarbageCollector(self.arg_relaxed_specs)]
def all_values(self):
"""A list of all `ConcreteFunction` instances held by this cache."""
# We need to simultaneously make sure our returned concrete functions are
# unique *and* make sure they are returned in a deterministic order for
# serialization.
#
# TODO(b/174215821): It's likely that we ultimately would just prefer to
# choose the most specific concrete function shape given a set of
# arguments. If and when that is implemented, this logic can be revisited.
primary_functions = set(self.primary.values())
return list(self.primary.values()) + [
v for v in self.arg_relaxed.values() if v not in primary_functions]
# TODO(mdan): Refactor this and clarify relationship with def_function.Function.
# Right now, def_function.Function is the higher level implementation.
class Function(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
`Function` class is thread-compatible meaning that minimal usage of defuns
(defining and calling) is thread-safe, but if users call other methods or
invoke the base `python_function` themselves, external synchronization is
necessary.
In addition, Function is not reentrant, so recursive functions need to call
the wrapped function, not the wrapper.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None,
autograph=True,
autograph_options=None,
experimental_relax_shapes=False,
capture_by_value=None,
jit_compile=None,
experimental_follow_type_hints=False):
"""Initializes a `Function`.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
autograph: whether to use autograph to compile
`python_function`. See https://www.tensorflow.org/guide/autograph for
more information.
autograph_options: Experimental knobs to control behavior
`when autograph=True`. See https://www.tensorflow.org/guide/autograph
for more information.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unnecessary retracing.
capture_by_value: Experimental. Whether to capture resource variables by
value or reference. If None, will inherit from a parent context or
default to False.
jit_compile: Force-compile the function with XLA, cf.
def_function.Function doc on jit_compile.
experimental_follow_type_hints: See the documentation for `tf.function`.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
self._python_function = python_function
pure_function = attributes and IMPLEMENTS_ATTRIBUTE_NAME in attributes
self._function_spec = FunctionSpec.from_function_and_signature(
python_function,
input_signature,
is_pure=pure_function,
experimental_follow_type_hints=experimental_follow_type_hints)
self._name = name
self._autograph = autograph
self._autograph_options = autograph_options
self._experimental_relax_shapes = experimental_relax_shapes
self._function_cache = FunctionCache()
self._function_attributes = attributes or {}
self._capture_by_value = capture_by_value
self.tracing_count = 0
if self.input_signature is not None:
self._hashable_input_signature = _make_input_signature_hashable(
self.flat_input_signature)
self._lock = threading.Lock()
# _descriptor_cache is a of instance of a class to an instance-specific
# `Function`, used to make sure defun-decorated methods create different
# functions for each instance.
self._descriptor_cache = weakref.WeakKeyDictionary()
self._jit_compile = jit_compile
self._experimental_follow_type_hints = experimental_follow_type_hints
def __call__(self, *args, **kwargs):
"""Calls a graph function specialized to the inputs."""
with self._lock:
(graph_function,
filtered_flat_args) = self._maybe_define_function(args, kwargs)
return graph_function._call_flat(
filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function # pylint: disable=protected-access
@property
def function_spec(self):
return self._function_spec
@property
def input_signature(self):
"""Returns the input signature."""
return self._function_spec.input_signature
@property
def flat_input_signature(self):
"""Returns the flattened input signature."""
return self._function_spec.flat_input_signature
def _get_concrete_function_internal_garbage_collected(self, *args, **kwargs):
"""Returns a concrete function which cleans up its graph function."""
if self.input_signature:
args, kwargs = None, None
with self._lock:
graph_function, _ = self._maybe_define_function(args, kwargs)
return graph_function
def _get_concrete_function_internal(self, *args, **kwargs):
"""Bypasses error checking when getting a graph function."""
graph_function = self._get_concrete_function_internal_garbage_collected(
*args, **kwargs)
# We're returning this concrete function to someone, and they may keep a
# reference to the FuncGraph without keeping a reference to the
# ConcreteFunction object. So we won't clean up the reference cycles
# manually and instead will leave them to Python's garbage collector.
graph_function._garbage_collector.release() # pylint: disable=protected-access
return graph_function
def _get_concrete_function_garbage_collected(self, *args, **kwargs):
"""Returns a `ConcreteFunction` specialized to inputs and execution context.
Unlike `get_concrete_function(...)`, the graph will be deleted when the
returned function is deleted. It's useful to avoid creating a reference
cycle when you know for sure that the graph will be no longer used without
the returned function.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
if self.input_signature:
if kwargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided, got keyword arguments "
f"({kwargs}) with input_signature "
f"({self.input_signature}).")
if args:
# If args are provided, they must match the input signature.
if not is_same_structure(self.input_signature, args):
raise ValueError("Structure of Python function inputs does not match "
f"input_signature: inputs ({args}), "
f"input_signature ({self.input_signature}).")
flat_inputs = nest.flatten(args, expand_composites=True)
if any(not isinstance(arg, (ops.Tensor, tensor_spec.DenseSpec,
resource_variable_ops.BaseResourceVariable))
for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors, Variables, "
"tf.TensorSpec or tf.VariableSpec objects.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self.flat_input_signature, flat_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
f"inputs ({args}), input_signature "
f"({self.input_signature}).")
args, kwargs = None, None
with self._lock:
graph_function, _ = self._maybe_define_function(args, kwargs)
seen_names = set()
captured = object_identity.ObjectIdentitySet(
graph_function.graph.internal_captures)
# pylint: disable=protected-access
graph_function._arg_keywords = []
prefix_counts = {}
# pylint: enable=protected-access
num_positional = 0
for arg in graph_function.graph.inputs:
if arg in captured:
break
num_positional += 1
user_arg_name = compat.as_str(arg.op.get_attr("_user_specified_name"))
proposal = user_arg_name
while proposal in seen_names:
index = prefix_counts.get(user_arg_name, 1)
proposal = "{}_{}".format(user_arg_name, index)
prefix_counts[user_arg_name] = index + 1
seen_names.add(proposal)
graph_function._arg_keywords.append(proposal) # pylint: disable=protected-access
# Anything can be a positional argument, in the same order as .inputs
graph_function._num_positional_args = num_positional # pylint: disable=protected-access
return graph_function
def get_concrete_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` specialized to inputs and execution context.
Args:
*args: inputs to specialize on. Can be concrete values (e.g. 1)
or `tf.Tensor` or `tf.TensorSpec`.
**kwargs: keyword inputs to specialize on. Concrete values (e.g. 1)
or `tf.Tensor` or `tf.TensorSpec`.
"""
graph_function = self._get_concrete_function_garbage_collected(
*args, **kwargs)
graph_function._garbage_collector.release() # pylint: disable=protected-access
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `Function` was accessed through
# e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `Function` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of `Function` here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# defun. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
# If there is no instance-specific `Function` in the cache, we construct
# an instance-specific `Function` that uses a weak reference to the
# instance (so that the instance will be correctly gc'd).
# And finally add the wrapped function to the description cache
self._descriptor_cache[instance] = class_method_to_instance_method(
self, instance)
# Return the cached `Function` for the instance
return self._descriptor_cache[instance]
def _cache_key(self,
args,
kwargs,
cache_key_context,
include_tensor_ranks_only=False):
"""Computes the cache key given inputs and execution context."""
if self.input_signature is None:
# We always use both args and kwargs to form input even if one is empty.
# This reduces ambiguity, for example, when args contains a dict and
# kwargs is empty.
inputs = (args, kwargs)
input_signature = pywrap_tfe.TFE_Py_EncodeArg(
inputs, include_tensor_ranks_only, ENCODE_VARIABLES_BY_RESOURCE_ID)
hashable_input_signature = _make_input_signature_hashable(input_signature)
else:
del args, kwargs
assert not include_tensor_ranks_only
hashable_input_signature = self._hashable_input_signature
(parent_graph, device_functions, colocation_stack, in_cross_replica_context,
variable_policy, xla_context_id) = cache_key_context
return CacheKey(hashable_input_signature, parent_graph, device_functions,
colocation_stack, in_cross_replica_context, variable_policy,
xla_context_id)
def _cache_key_context(self):
"""Returns execution context."""
ctx = context.context()
# Don't need to open an init_scope if the _cache_key call is in eager mode
# already.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None
xla_context_id = 0
if not executing_eagerly:
# We want to force function retracing for each different
# XLAControlFlowContext, so add `xla_context_id` to the cache key.
xla_context = _enclosing_xla_context()
if xla_context is not None and \
xla_context.RequiresUniqueFunctionRetracing():
xla_context_id = id(xla_context)
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None if executing_eagerly else ops.get_default_graph()
# pylint: disable=protected-access
default_graph = ops.get_default_graph()
# TODO(b/117617952): The current distribution strategy will affect graph
# building (e.g. accessing different variables from different devices) and
# so requires retracing for each device.
strategy_stack = default_graph._distribution_strategy_stack
uses_distribution_strategy = (
strategy_stack and
strategy_stack[-1].strategy.extended._retrace_functions_for_each_device
)
if executing_eagerly:
colocation_stack = ()
if uses_distribution_strategy:
device_functions = (pydev.merge_device(ctx.device_name),)
else:
device_functions = ()
else:
colocation_stack = tuple(default_graph._colocation_stack.peek_objs())
if (uses_distribution_strategy
or func_graph_module.device_stack_has_callable(
default_graph._device_function_stack)):
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner)
else:
device_functions = ()
in_cross_replica_context = False
try:
in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access
except (AttributeError, IndexError):
pass
if save_context.in_save_context():
variable_policy = (
save_context.get_save_options().experimental_variable_policy)
else:
variable_policy = None
return (parent_graph, device_functions, colocation_stack,
in_cross_replica_context, variable_policy, xla_context_id)
def _create_graph_function(self, args, kwargs, override_flat_arg_shapes=None):
"""Create a `ConcreteFunction` from `args` and `kwargs`."""
self.tracing_count += 1
if self.input_signature is None:
arglen = len(args)
else:
arglen = len(self.input_signature)
base_arg_names = self._function_spec.arg_names[:arglen]
num_missing_args = arglen - len(self._function_spec.arg_names)
missing_arg_names = [self._function_spec.vararg_name] * num_missing_args
# Produce a list of missing args of the form ["arg_0", "arg_1", ...],
# where arg is based on the self._function_spec.vararg_name.
missing_arg_names = [
"%s_%d" % (arg, i) for i, arg in enumerate(missing_arg_names)
]
arg_names = base_arg_names + missing_arg_names
graph_function = ConcreteFunction(
func_graph_module.func_graph_from_py_func(
self._name,
self._python_function,
args,
kwargs,
self.input_signature,
autograph=self._autograph,
autograph_options=self._autograph_options,
arg_names=arg_names,
override_flat_arg_shapes=override_flat_arg_shapes,
capture_by_value=self._capture_by_value),
self._function_attributes,
function_spec=self.function_spec,
# Tell the ConcreteFunction to clean up its graph once it goes out of
# scope. This is not the default behavior since it gets used in some
# places (like Keras) where the FuncGraph lives longer than the
# ConcreteFunction.
shared_func_graph=False)
return graph_function
def _define_function_with_shape_relaxation(self, args, kwargs, flat_args,
filtered_flat_args,
cache_key_context):
"""Define a function, relaxing arg shapes to avoid unnecessary retracing."""
flat_no_comp = nest.flatten((args, kwargs), expand_composites=False)
any_composite_args = any(
isinstance(x, composite_tensor.CompositeTensor) for x in flat_no_comp)
# Build a cache key where TensorShapes include only rank information (and
# not information about the size of each dimension).
if not any_composite_args:
rank_only_cache_key = self._cache_key(
args, kwargs, cache_key_context, include_tensor_ranks_only=True)
else:
# For the rank-only cache key, replace any composite tensors with
# shape-relaxed TypeSpecs.
(cache_key_args, cache_key_kwargs) = nest.map_structure(
_shape_relaxed_type_for_composite_tensor, (args, kwargs))
rank_only_cache_key = self._cache_key(
cache_key_args,
cache_key_kwargs,
cache_key_context,
include_tensor_ranks_only=True)
arg_specs = [_type_spec_for(x) for x in flat_no_comp]
relaxed_arg_specs = self._function_cache.arg_relaxed_specs.get(
rank_only_cache_key, None)
relaxed_arg_function = self._function_cache.arg_relaxed.get(
rank_only_cache_key, None)
if (relaxed_arg_function is not None
and all(_is_type_subset(x, y) for (x, y) in
zip(relaxed_arg_specs, arg_specs))):
return relaxed_arg_function, filtered_flat_args
if relaxed_arg_specs is None:
relaxed_arg_specs = arg_specs
else:
if len(arg_specs) != len(relaxed_arg_specs):
raise RuntimeError("Expected arg_specs len to match relaxed_arg_specs "
f"len: {len(arg_specs):d} vs. "
f"{len(relaxed_arg_specs):d}.")
relaxed_arg_specs = [
x if x is None else x.most_specific_compatible_type(y)
for (x, y) in zip(arg_specs, relaxed_arg_specs)]
self._function_cache.arg_relaxed_specs[rank_only_cache_key] = (
relaxed_arg_specs)
relaxed_arg_shapes = [
x if x is None else x.shape
for x in nest.flatten(relaxed_arg_specs, expand_composites=True)]
if any_composite_args:
# Rebuild composite tensors with the relaxed TypeSpecs. For example,
# if a tf.data iterator is passed as an argument, then we need to relax
# the TensorShapes in its element_spec.
(relaxed_arg_specs, relaxed_kwarg_specs) = nest.pack_sequence_as(
(args, kwargs), relaxed_arg_specs, expand_composites=False)
(args, kwargs) = nest.pack_sequence_as(
(relaxed_arg_specs, relaxed_kwarg_specs),
flat_args,
expand_composites=True)
graph_function = self._create_graph_function(
args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
return (graph_function, [
t for t in nest.flatten((args, kwargs), expand_composites=True)
if isinstance(t, (ops.Tensor,
resource_variable_ops.BaseResourceVariable))
])
def _maybe_define_function(self, args, kwargs):
"""Gets a function for these inputs, defining it if necessary.
`args` and `kwargs` can be None if this `Function` was created with an
`input_signature`.
Caller must hold self._lock.
Args:
args: The varargs for the Python function.
kwargs: The keyword args for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwargs, as well as filtered flattened inputs (only Tensors and Variables)
that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
RuntimeError: If there's an internal bug (inconsistency) in handling
shape relaxation retracing.
"""
if self.input_signature is None or args is not None or kwargs is not None:
args, kwargs, flat_args, filtered_flat_args = \
self._function_spec.canonicalize_function_inputs(*args, **kwargs)
else:
flat_args, filtered_flat_args = [None], []
cache_key_context = self._cache_key_context()
cache_key = self._cache_key(args, kwargs, cache_key_context)
try:
hash(cache_key)
except TypeError as e:
raise TypeError(
"Arguments supplied to `defun`-generated functions must be "
f"hashable. Original error: {e}.")
graph_function = self._function_cache.primary.get(cache_key, None)
if graph_function is not None:
return graph_function, filtered_flat_args
with monitoring.MonitoredTimer(_graph_building_time_counter.get_cell()):
with trace.Trace("tf.function-graph_building"):
logging.vlog(1,
"Creating new FuncGraph for Python function %r (key: %r)",
self._python_function, cache_key)
logging.vlog(2, "Python function signature [args: %s] [kwargs: %s]",
args, kwargs)
# pylint: disable=protected-access
call_context_key = cache_key._replace(input_signature=None)
# pylint: disable=protected-access
ag_status = (
ag_ctx.Status.ENABLED
if self._autograph else ag_ctx.Status.DISABLED)
with ag_ctx.ControlStatusCtx(
status=ag_status, options=self._autograph_options):
# Build a function with shape relaxation retracing if:
# 1. shape relaxation is explicitly enabled
# and 2. there's no provided input signature
# and 3. there's been a cache miss for this calling context
if (self._experimental_relax_shapes and
self.input_signature is None and
call_context_key in self._function_cache.missed):
return self._define_function_with_shape_relaxation(
args, kwargs, flat_args, filtered_flat_args, cache_key_context)
self._function_cache.missed.add(call_context_key)
graph_function = self._create_graph_function(args, kwargs)
self._function_cache.primary[cache_key] = graph_function
return graph_function, filtered_flat_args
def register(func, *args, **kwargs):
"""Register a specialization of a `Function` into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the `Function` instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `ConcreteFunction` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, Function):
raise ValueError("Only defun function is allowed to be registered. "
f"Got {func} with type {type(func)}.")
concrete_func = func.get_concrete_function(*args, **kwargs)
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
return concrete_func
def validate_signature(signature):
if not isinstance(signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a list, got "
f"{type(signature)}.")
if any(not isinstance(arg, tensor_spec.DenseSpec)
for arg in nest.flatten(signature, expand_composites=True)):
bad_args = [arg for arg in nest.flatten(signature, expand_composites=True)
if not isinstance(arg, tensor_spec.DenseSpec)]
raise TypeError("input_signature must be a possibly nested sequence of "
f"TensorSpec objects, got invalid args {bad_args} with "
f"types {list(map(type, bad_args))}.")
def validate_python_function(python_function):
if not callable(python_function):
raise TypeError(f"{python_function} is not a callable object.")
def defun(func=None,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debuggability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb`; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random.uniform([2])` will execute a different graph than
`F(tf.random.uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random.uniform(([50, 300, 10])
second_input = tf.random.uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random.uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random.uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.compat.v1.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random.normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random.normal((3, 5))
x = tf.random.normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
_TensorFlow Control Flow_
When `autograph` is `True`, data-dependent control flow is allowed as well.
Control flow statements that depend on `Tensor` values are staged into
corresponding TensorFlow ops. For example, the following code will work as
expected:
```python
@tf.contrib.eager.defun
def dynamic_rnn_loop(cell, seq):
state, output = cell.zero_state()
for input in seq:
state, output = cell(input, state)
return output
```
For more information see `tf.autograph`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
autograph: Whether `func` should be compiled before
constructing the graph. See https://www.tensorflow.org/guide/autograph
for more information.
experimental_autograph_options: Experimental knobs (in the form of a tuple
of tensorflow.autograph.Feature values) to control behavior when
autograph=True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unnecessary retracing.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(
func=func,
input_signature=input_signature,
autograph=autograph,
experimental_autograph_options=experimental_autograph_options,
experimental_relax_shapes=experimental_relax_shapes)
@tf_export("__internal__.function.defun_with_attributes", v1=[])
def defun_with_attributes(func=None,
input_signature=None,
attributes=None,
autograph=True,
experimental_autograph_options=None,
jit_compile=None,
experimental_relax_shapes=False,
experimental_follow_type_hints=False):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
allowlisted attribute name is allowed. Unallowlisted attribute name or
unsupported value will result into ValueError. `func_name` is also one of
the allowlisted argument which is a python string, and sets the name for
this `ConcreteFunction` in the graph.
autograph: same as defun()'s autograph.
experimental_autograph_options: same as defun()'s
experimental_autograph_options.
jit_compile: same as defun()'s jit_compile.
experimental_relax_shapes: same as defun()'s experimental_relax_shapes
experimental_follow_type_hints: see `tf.function`.
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
if attributes:
name = attributes.pop("func_name", function.__name__)
else:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
Function(
function,
name,
input_signature=input_signature,
attributes=attributes,
autograph=autograph,
autograph_options=experimental_autograph_options,
jit_compile=jit_compile,
experimental_relax_shapes=experimental_relax_shapes,
experimental_follow_type_hints=experimental_follow_type_hints))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
# When a method is bound to objects of this type, it allows AutoGraph to
# recover a weak reference the original method's self pointer, so that it can
# execute it consistent with class_method_to_instance_method's
# bound_method_wrapper.
# TODO(b/119246461): This is not pretty. Use a descriptor instead?
class TfMethodTarget(object):
"""Binding target for methods replaced by function and defun."""
__slots__ = ("weakrefself_target__", "weakrefself_func__")
def __init__(self, target, original_python_function):
self.weakrefself_target__ = target
self.weakrefself_func__ = weakref.ref(original_python_function)
@property
def target(self):
return self.weakrefself_target__()
@property
def target_class(self):
true_self = self.weakrefself_target__()
if tf_inspect.isclass(true_self):
# Class method
return true_self
else:
return true_self.__class__
def call(self, args, kwargs):
wrapped_fn = self.weakrefself_func__()
if tf_inspect.ismethod(wrapped_fn):
wrapped_fn = six.get_unbound_function(wrapped_fn)
return wrapped_fn(self.weakrefself_target__(), *args, **kwargs)
def class_method_to_instance_method(original_function, instance):
"""Constructs a new `Function` with `self` bound."""
weak_instance = weakref.ref(instance)
# Note: while we could bind to a weakref proxy instead, that causes the
# bound method to be unhashable.
bound_method = types_lib.MethodType(
original_function.python_function,
TfMethodTarget(weak_instance, original_function.python_function))
# original_function is expected to be of one of the two `Function` types
# (defined either in function.py or def_function.py).
assert hasattr(original_function, "_name")
assert hasattr(original_function, "_autograph")
assert hasattr(original_function, "_function_spec")
assert hasattr(original_function, "python_function")
weak_bound_method_wrapper = None
def bound_method_wrapper(*args, **kwargs):
"""Wraps either a dummy MethodType or a converted AutoGraph function."""
# __wrapped__ allows AutoGraph to swap in a converted function.
strong_bound_method_wrapper = weak_bound_method_wrapper()
wrapped_fn = strong_bound_method_wrapper.__wrapped__
if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__:
# If __wrapped__ was not replaced, then call original_function.
# TODO(mdan): For better consistency, use the wrapper's call().
wrapped_fn = original_function.python_function
if tf_inspect.ismethod(wrapped_fn):
wrapped_fn = six.get_unbound_function(wrapped_fn)
return wrapped_fn(weak_instance(), *args, **kwargs)
# If __wrapped__ was replaced, then it is always an unbound function.
# However, the replacer is still responsible for attaching self properly.
# TODO(mdan): Is it possible to do it here instead?
return wrapped_fn(*args, **kwargs)
weak_bound_method_wrapper = weakref.ref(bound_method_wrapper)
# pylint: disable=protected-access
# We make a dummy MethodType object to generate the correct bound method
# signature. The actual call is to a function with a weak reference to
# `instance`.
instance_func = type(original_function)(
tf_decorator.make_decorator(bound_method, bound_method_wrapper),
name=original_function._name,
autograph=original_function._autograph,
input_signature=original_function.input_signature,
experimental_relax_shapes=original_function._experimental_relax_shapes,
jit_compile=original_function._jit_compile)
# pylint: enable=protected-access
# We wrap the the bound method with tf_decorator so inspection works correctly
wrapped_instance_func = tf_decorator.make_decorator(bound_method,
instance_func)
return wrapped_instance_func
class _FunctionGarbageCollector(object):
"""Cleans up cycles when a defun goes out of scope."""
__slots__ = ["_cache"]
def __init__(self, cache):
self._cache = cache
def __del__(self):
if func_graph_module is None or memory is None:
return
try:
while self._cache:
self._cache.popitem()
memory.dismantle_ordered_dict(self._cache)
except: # pylint: disable=bare-except
pass
class ConcreteFunctionGarbageCollector(object):
"""Cleans up reference cycles when a `ConcreteFunction` goes out of scope."""
__slots__ = ["_func_graph"]
def __init__(self, func_graph):
self._func_graph = func_graph
def release(self):
"""Call off the FuncGraph deletion."""
self._func_graph = None
def __del__(self):
if func_graph_module is None or memory is None or self._func_graph is None:
return
try:
func_graph_module.dismantle_func_graph(self._func_graph)
except: # pylint: disable=bare-except
pass
class _Marker(object):
"""Markers used to pretty-print nested args in function signatures."""
__slots__ = ["_s"]
def __init__(self, s):
self._s = s
def __repr__(self):
return str(self._s)
def _structure_summary(structure):
"""Displays a summary of the nesting structure of the given value."""
def type_name(x):
if isinstance(x, type_spec.TypeSpec):
return x.value_type.__name__
else:
return type(x).__name__
markers = [_Marker(type_name(v)) for v in nest.flatten(structure)]
return str(nest.pack_sequence_as(structure, markers))
def _contains_type_spec(value):
return any(isinstance(x, type_spec.TypeSpec) for x in nest.flatten(value))
|
py | b411889ba2d39bbff18ea5cbd5a22c0a296697a2 | import pytest
from email_validator import EmailNotValidError
from mock import patch
from uaa_bot import config, notifier
from fixtures.config import SMTP_CONFIG, user_email, username
def test_render_account_expired_template():
template = "account_expired"
notification = notifier.Notifier(user_email)
rendered = notification.render_template(template, username=username)
assert type(rendered) == str
assert f"Hello," in rendered
assert "Your account has been" in rendered
assert "deactivated" in rendered
def test_render_account_expiration_10_days_template():
template = "account_expires_in_10_days"
notification = notifier.Notifier(user_email)
rendered = notification.render_template(template, username=username)
assert type(rendered) == str
assert f"Hello," in rendered
assert "Your account will be" in rendered
assert "deactivated" in rendered
assert "10 days" in rendered
def test_render_account_expiration_1_day_template():
template = "account_expires_in_1_day"
notification = notifier.Notifier(user_email)
rendered = notification.render_template(template, username=username)
assert type(rendered) == str
assert f"Hello," in rendered
assert "Your account will be" in rendered
assert "deactivated" in rendered
assert "1 day" in rendered
def test_uses_default_smtp_config_if_not_provided():
notification = notifier.Notifier(user_email)
assert type(notification.smtp_config) == dict
for key, value in notification.smtp_config.items():
assert value == config.smtp[key]
def test_uses_smtp_config_if_provided():
notification = notifier.Notifier(user_email, smtp_config=SMTP_CONFIG)
assert type(notification.smtp_config) == dict
for key, value in notification.smtp_config.items():
assert value == SMTP_CONFIG[key]
def test_raise_error_with_invalid_email():
with pytest.raises(EmailNotValidError):
invalid_email = "invalid email address"
notification = notifier.Notifier(invalid_email)
def test_raise_error_with_invalid_template():
with pytest.raises(Exception):
invalid_template = "invalid template name"
notification = notifier.Notifier(user_email)
rendered = notification.render_template(invalid_template, username=username)
def test_get_email_subject_from_template_name():
template = "account_expires_in_1_day"
expected = "Your cloud.gov account expires in 1 day"
notification = notifier.Notifier(user_email)
subject = notification.get_email_subject(template)
assert subject == expected
@patch("uaa_bot.notifier.smtplib")
def test_send_email_auth(smtp_connection):
"""IF SMTP_USER and SMTP_PASS are provided, smtp.login() is called"""
template = "account_expired"
notification = notifier.Notifier(user_email, smtp_config=SMTP_CONFIG)
response = notification.send_email(template, username=username)
assert response == True
smtp_connection.SMTP.assert_called_with(
SMTP_CONFIG["SMTP_HOST"], SMTP_CONFIG["SMTP_PORT"]
)
smtp_connection.SMTP().login.assert_called_with("user", SMTP_CONFIG["SMTP_PASS"])
|
py | b41189706a175c4eca72f8d1005962a7b895e2d4 | #!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
import unittest
import logging
import tempfile
import warnings
import pathlib
import pickle
import platform
import glob
import os
import re
from textwrap import dedent
from xmlschema import XMLSchemaParseError, XMLSchemaIncludeWarning, XMLSchemaImportWarning
from xmlschema.names import XML_NAMESPACE, LOCATION_HINTS, SCHEMAS_DIR, XSD_ELEMENT, XSI_TYPE
from xmlschema.etree import etree_element
from xmlschema.validators import XMLSchemaBase, XMLSchema10, XMLSchema11, \
XsdGlobals, Xsd11Attribute
from xmlschema.testing import SKIP_REMOTE_TESTS, XsdValidatorTestCase
from xmlschema.validators.schemas import logger
class CustomXMLSchema(XMLSchema10):
pass
class TestXMLSchema10(XsdValidatorTestCase):
TEST_CASES_DIR = os.path.join(os.path.dirname(__file__), '../test_cases')
maxDiff = None
class CustomXMLSchema(XMLSchema10):
pass
def test_schema_validation(self):
schema = self.schema_class(self.vh_xsd_file)
self.assertEqual(schema.validation, 'strict')
schema = self.schema_class(self.vh_xsd_file, validation='lax')
self.assertEqual(schema.validation, 'lax')
schema = self.schema_class(self.vh_xsd_file, validation='skip')
self.assertEqual(schema.validation, 'skip')
with self.assertRaises(ValueError):
self.schema_class(self.vh_xsd_file, validation='none')
def test_schema_string_repr(self):
schema = self.schema_class(self.vh_xsd_file)
tmpl = "%s(name='vehicles.xsd', namespace='http://example.com/vehicles')"
self.assertEqual(str(schema), tmpl % self.schema_class.__name__)
def test_schema_copy(self):
schema = self.vh_schema.copy()
self.assertNotEqual(id(self.vh_schema), id(schema))
self.assertNotEqual(id(self.vh_schema.namespaces), id(schema.namespaces))
self.assertNotEqual(id(self.vh_schema.maps), id(schema.maps))
def test_schema_location_hints(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://xmlschema.test/ns schema.xsd">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.schema_location, [("http://xmlschema.test/ns", "schema.xsd")])
self.assertIsNone(schema.no_namespace_schema_location)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="schema.xsd">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.schema_location, [])
self.assertEqual(schema.no_namespace_schema_location, 'schema.xsd')
def test_target_prefix(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.target_prefix, '')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://xmlschema.test/ns"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.target_prefix, 'tns')
def test_builtin_types(self):
self.assertIn('string', self.schema_class.builtin_types())
with self.assertRaises(RuntimeError):
self.schema_class.meta_schema.builtin_types()
def test_resolve_qname(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.resolve_qname('xs:element'), XSD_ELEMENT)
self.assertEqual(schema.resolve_qname('xsi:type'), XSI_TYPE)
self.assertEqual(schema.resolve_qname(XSI_TYPE), XSI_TYPE)
self.assertEqual(schema.resolve_qname('element'), 'element')
self.assertRaises(ValueError, schema.resolve_qname, '')
self.assertRaises(ValueError, schema.resolve_qname, 'xsi:a type ')
self.assertRaises(ValueError, schema.resolve_qname, 'xml::lang')
def test_global_group_definitions(self):
schema = self.check_schema("""
<xs:group name="wrong_child">
<xs:element name="foo"/>
</xs:group>""", validation='lax')
self.assertEqual(len(schema.errors), 1)
self.check_schema('<xs:group name="empty" />', XMLSchemaParseError)
self.check_schema('<xs:group name="empty"><xs:annotation/></xs:group>', XMLSchemaParseError)
def test_wrong_includes_and_imports(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
self.check_schema("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="ns">
<xs:include schemaLocation="example.xsd" />
<xs:import schemaLocation="example.xsd" />
<xs:redefine schemaLocation="example.xsd"/>
<xs:import namespace="http://missing.example.test/" />
<xs:import/>
</xs:schema>
""")
self.assertEqual(len(context), 3, "Wrong number of include/import warnings")
self.assertEqual(context[0].category, XMLSchemaIncludeWarning)
self.assertEqual(context[1].category, XMLSchemaIncludeWarning)
self.assertEqual(context[2].category, XMLSchemaImportWarning)
self.assertTrue(str(context[0].message).startswith("Include"))
self.assertTrue(str(context[1].message).startswith("Redefine"))
self.assertTrue(str(context[2].message).startswith("Import of namespace"))
def test_wrong_references(self):
# Wrong namespace for element type's reference
self.check_schema("""
<xs:element name="dimension" type="xs:dimensionType"/>
<xs:simpleType name="dimensionType">
<xs:restriction base="xs:short"/>
</xs:simpleType>
""", XMLSchemaParseError)
def test_annotations(self):
schema = self.check_schema("""
<xs:element name='foo'>
<xs:annotation />
</xs:element>""")
xsd_element = schema.elements['foo']
self.assertIsNone(xsd_element._annotation) # lazy annotation
self.assertIsNotNone(xsd_element.annotation)
self.assertIs(xsd_element.annotation, xsd_element._annotation)
self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation />
<xs:annotation />
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""", XMLSchemaParseError)
schema = self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation>
<xs:documentation> stuff </xs:documentation>
</xs:annotation>
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""")
xsd_type = schema.types["Magic"]
self.assertIsNotNone(xsd_type._annotation) # xs:simpleType annotations are not lazy parsed
self.assertEqual(str(xsd_type.annotation), ' stuff ')
def test_annotation_string(self):
schema = self.check_schema("""
<xs:element name='A'>
<xs:annotation>
<xs:documentation>A element info</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name='B'>
<xs:annotation>
<xs:documentation>B element extended info, line1</xs:documentation>
<xs:documentation>B element extended info, line2</xs:documentation>
</xs:annotation>
</xs:element>""")
xsd_element = schema.elements['A']
self.assertEqual(str(xsd_element.annotation), 'A element info')
self.assertEqual(repr(xsd_element.annotation), "XsdAnnotation('A element info')")
xsd_element = schema.elements['B']
self.assertEqual(str(xsd_element.annotation),
'B element extended info, line1\nB element extended info, line2')
self.assertEqual(repr(xsd_element.annotation),
"XsdAnnotation('B element extended info, line1\\nB element')")
def test_schema_annotations(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
self.assertIsNone(schema._annotations)
annotations = schema.annotations
self.assertListEqual(annotations, [])
self.assertIs(annotations, schema.annotations)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:annotation>
<xs:documentation>First annotation</xs:documentation>
</xs:annotation>
<xs:annotation>
<xs:documentation>Second annotation</xs:documentation>
</xs:annotation>
<xs:element name="root"/>
<xs:annotation>
<xs:documentation>Third annotation</xs:documentation>
</xs:annotation>
</xs:schema>"""))
self.assertIsNone(schema._annotations)
annotations = schema.annotations
self.assertEqual(len(annotations), 3)
self.assertEqual(repr(annotations[0]), "XsdAnnotation('First annotation')")
self.assertEqual(repr(annotations[1]), "XsdAnnotation('Second annotation')")
self.assertEqual(repr(annotations[2]), "XsdAnnotation('Third annotation')")
self.assertIs(annotations, schema.annotations)
def test_base_schemas(self):
xsd_file = os.path.join(SCHEMAS_DIR, 'XML/xml_minimal.xsd')
schema = self.schema_class(xsd_file)
self.assertEqual(schema.target_namespace, XML_NAMESPACE)
def test_root_elements(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"/>"""))
self.assertEqual(schema.root_elements, [])
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.root_elements, [schema.elements['root']])
# Test issue #107 fix
schema = self.schema_class(dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root1" type="root"/>
<xs:element name="root2" type="root"/>
<xs:complexType name="root">
<xs:sequence>
<xs:element name="elementWithNoType"/>
</xs:sequence>
</xs:complexType>
</xs:schema>"""))
self.assertEqual(set(schema.root_elements),
{schema.elements['root1'], schema.elements['root2']})
def test_simple_types(self):
self.assertListEqual(self.vh_schema.simple_types, [])
self.assertGreater(len(self.st_schema.simple_types), 20)
def test_complex_types(self):
self.assertListEqual(self.vh_schema.complex_types,
[self.vh_schema.types['vehicleType']])
def test_is_restriction_method(self):
# Test issue #111 fix
schema = self.schema_class(source=self.casepath('issues/issue_111/issue_111.xsd'))
extended_header_def = schema.types['extendedHeaderDef']
self.assertTrue(extended_header_def.is_derived(schema.types['blockDef']))
@unittest.skipIf(SKIP_REMOTE_TESTS, "Remote networks are not accessible.")
def test_remote_schemas_loading(self):
col_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/collection/collection.xsd",
timeout=300)
self.assertTrue(isinstance(col_schema, self.schema_class))
vh_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/vehicles/vehicles.xsd",
timeout=300)
self.assertTrue(isinstance(vh_schema, self.schema_class))
def test_schema_defuse(self):
vh_schema = self.schema_class(self.vh_xsd_file, defuse='always')
self.assertIsInstance(vh_schema.root, etree_element)
for schema in vh_schema.maps.iter_schemas():
self.assertIsInstance(schema.root, etree_element)
def test_logging(self):
self.schema_class(self.vh_xsd_file, loglevel=logging.ERROR)
self.assertEqual(logger.level, logging.WARNING)
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=logging.INFO)
self.assertEqual(logger.level, logging.WARNING)
self.assertEqual(len(ctx.output), 7)
self.assertIn("INFO:xmlschema:Include schema from 'types.xsd'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'types.xsd' is already loaded", ctx.output)
with self.assertLogs('xmlschema', level='DEBUG') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=logging.DEBUG)
self.assertEqual(logger.level, logging.WARNING)
self.assertEqual(len(ctx.output), 19)
self.assertIn("INFO:xmlschema:Include schema from 'cars.xsd'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'cars.xsd' is already loaded", ctx.output)
self.assertIn("DEBUG:xmlschema:Schema targetNamespace is "
"'http://example.com/vehicles'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'cars.xsd' is already loaded", ctx.output)
# With string argument
with self.assertRaises(ValueError) as ctx:
self.schema_class(self.vh_xsd_file, loglevel='all')
self.assertEqual(str(ctx.exception), "'all' is not a valid loglevel")
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel='INFO')
self.assertEqual(len(ctx.output), 7)
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=' Info ')
self.assertEqual(len(ctx.output), 7)
def test_target_namespace(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.target_namespace, 'http://xmlschema.test/ns')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.target_namespace, '')
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"the attribute 'targetNamespace' cannot be an empty string")
def test_block_default(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="extension restriction ">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.block_default, 'extension restriction ')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="#all">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(set(schema.block_default.split()),
{'substitution', 'extension', 'restriction'})
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="all">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value 'all' for attribute 'blockDefault'")
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="#all restriction">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value '#all restriction' for attribute 'blockDefault'")
def test_final_default(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="extension restriction ">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.final_default, 'extension restriction ')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="#all">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(set(schema.final_default.split()),
{'list', 'union', 'extension', 'restriction'})
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="all">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value 'all' for attribute 'finalDefault'")
def test_use_fallback(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
schema = self.schema_class(source)
self.assertEqual(schema.fallback_locations, LOCATION_HINTS)
schema = self.schema_class(source, use_fallback=False)
self.assertEqual(schema.fallback_locations, {})
def test_global_maps(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
col_schema = self.schema_class(self.col_xsd_file)
with self.assertRaises(TypeError) as ctx:
self.schema_class(self.col_schema, global_maps=col_schema)
self.assertIn("'global_maps' argument must be", str(ctx.exception))
schema = self.schema_class(source, global_maps=col_schema.maps)
self.assertIs(col_schema.maps, schema.maps)
def test_version_control(self):
schema = self.schema_class(dedent("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root">
<xs:complexType>
<xs:attribute name="a" use="required"/>
<xs:assert test="@a > 300" vc:minVersion="1.1"
xmlns:vc="http://www.w3.org/2007/XMLSchema-versioning"/>
</xs:complexType>
</xs:element>
</xs:schema>"""))
self.assertEqual(len(schema.root[0][0]), 1 if schema.XSD_VERSION == '1.0' else 2)
schema = self.schema_class(dedent("""
<xs:schema vc:minVersion="1.1" elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:vc="http://www.w3.org/2007/XMLSchema-versioning">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(len(schema.root), 0 if schema.XSD_VERSION == '1.0' else 1)
def test_xsd_version_compatibility_property(self):
self.assertEqual(self.vh_schema.xsd_version, self.vh_schema.XSD_VERSION)
def test_explicit_locations(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
locations = {'http://example.com/vehicles': self.vh_xsd_file}
schema = self.schema_class(source, locations=locations)
self.assertEqual(len(schema.maps.namespaces['http://example.com/vehicles']), 4)
def test_use_meta_property(self):
self.assertTrue(self.vh_schema.use_meta)
self.assertTrue(self.col_schema.use_meta)
meta_schema = self.schema_class.meta_schema
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="foo"/>
</xs:schema>"""), use_meta=False)
self.assertIsNot(meta_schema, schema.meta_schema)
self.assertFalse(schema.use_meta)
def test_other_schema_root_attributes(self):
self.assertIsNone(self.vh_schema.id)
self.assertIsNone(self.vh_schema.version)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" id="foo" version="2.0">
<xs:element name="foo"/>
</xs:schema>"""))
self.assertEqual(schema.id, 'foo')
self.assertEqual(schema.version, '2.0')
def test_change_maps_attribute(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
with self.assertRaises(ValueError) as ctx:
schema.meta_schema.maps = XsdGlobals(schema, schema.validation)
self.assertEqual(str(ctx.exception),
"cannot change the global maps instance of a meta-schema")
self.assertTrue(schema.built)
maps, schema.maps = schema.maps, XsdGlobals(schema, schema.validation)
self.assertIsNot(maps, schema.maps)
self.assertFalse(schema.built)
schema.maps = maps
self.assertTrue(schema.built)
def test_listed_and_reversed_elements(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem1"/>
<xs:element name="elem2"/>
<xs:element name="elem3"/>
</xs:schema>"""))
elements = list(schema)
self.assertListEqual(elements, [schema.elements['elem1'],
schema.elements['elem2'],
schema.elements['elem3']])
elements.reverse()
self.assertListEqual(elements, list(reversed(schema)))
def test_multi_schema_initialization(self):
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem1"/>
</xs:schema>""")
source2 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem2"/>
</xs:schema>""")
source3 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem3"/>
</xs:schema>""")
schema = self.schema_class([source1, source2, source3])
self.assertEqual(len(schema.elements), 3)
self.assertEqual(len(schema.maps.namespaces['']), 3)
self.assertIs(schema.elements['elem1'].schema, schema)
self.assertIs(schema.elements['elem2'].schema, schema.maps.namespaces[''][1])
self.assertIs(schema.elements['elem3'].schema, schema.maps.namespaces[''][2])
with self.assertRaises(XMLSchemaParseError) as ec:
self.schema_class([source1, source2, source2])
self.assertIn("global element with name='elem2' is already defined", str(ec.exception))
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="elem1"/>
</xs:schema>""")
schema = self.schema_class([source1, source2])
self.assertEqual(len(schema.elements), 2)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 2)
self.assertIs(schema.elements['elem1'].schema, schema)
self.assertIs(schema.elements['elem2'].schema,
schema.maps.namespaces['http://xmlschema.test/ns'][1])
def test_add_schema(self):
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="elem1"/>
</xs:schema>""")
source2 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem2"/>
</xs:schema>""")
source3 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns1">
<xs:element name="elem3"/>
</xs:schema>""")
schema = self.schema_class(source1)
schema.add_schema(source2, build=True)
self.assertEqual(len(schema.elements), 1)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 1)
self.assertEqual(len(schema.maps.namespaces['']), 1)
# Less checks on duplicate objects for schemas added after the build
schema.add_schema(source2, build=True)
self.assertEqual(len(schema.maps.namespaces['']), 2)
self.assertTrue(schema.maps.built)
with self.assertRaises(XMLSchemaParseError) as ec:
schema.maps.clear()
schema.build()
self.assertIn("global element with name='elem2' is already defined", str(ec.exception))
schema = self.schema_class(source1)
schema.add_schema(source2, namespace='http://xmlschema.test/ns', build=True)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 2)
# Need a rebuild to add elem2 from added schema ...
self.assertEqual(len(schema.elements), 1)
schema.maps.clear()
schema.build()
self.assertEqual(len(schema.elements), 2)
# ... so is better to build after sources additions
schema = self.schema_class(source1, build=False)
schema.add_schema(source2, namespace='http://xmlschema.test/ns')
schema.build()
self.assertEqual(len(schema.elements), 2)
# Adding other namespaces do not require rebuild
schema3 = schema.add_schema(source3, build=True)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns1']), 1)
self.assertEqual(len(schema3.elements), 1)
def test_export_errors__issue_187(self):
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_dir)
self.assertIn("target directory", str(ctx.exception))
self.assertIn("is not empty", str(ctx.exception))
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_xsd_file)
self.assertIn("target", str(ctx.exception))
self.assertIn("is not a directory", str(ctx.exception))
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_xsd_file + '/target')
self.assertIn("target parent", str(ctx.exception))
self.assertIn("is not a directory", str(ctx.exception))
with tempfile.TemporaryDirectory() as dirname:
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=dirname + 'subdir/target')
self.assertIn("target parent directory", str(ctx.exception))
self.assertIn("does not exist", str(ctx.exception))
def test_export_same_directory__issue_187(self):
with tempfile.TemporaryDirectory() as dirname:
self.vh_schema.export(target=dirname)
for filename in os.listdir(dirname):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
with pathlib.Path(self.vh_dir).joinpath(filename).open() as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
self.assertFalse(os.path.isdir(dirname))
def test_export_another_directory__issue_187(self):
vh_schema_file = self.casepath('issues/issue_187/issue_187_1.xsd')
vh_schema = self.schema_class(vh_schema_file)
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname)
path = pathlib.Path(dirname).joinpath('examples/vehicles/*.xsd')
for filename in glob.iglob(pathname=str(path)):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
basename = os.path.basename(filename)
with pathlib.Path(self.vh_dir).joinpath(basename).open() as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
with pathlib.Path(dirname).joinpath('issue_187_1.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertNotEqual(exported_schema, original_schema)
self.assertEqual(
exported_schema,
original_schema.replace('../..', dirname.replace('\\', '/'))
)
self.assertFalse(os.path.isdir(dirname))
@unittest.skipIf(SKIP_REMOTE_TESTS, "Remote networks are not accessible.")
def test_export_remote__issue_187(self):
vh_schema_file = self.casepath('issues/issue_187/issue_187_2.xsd')
vh_schema = self.schema_class(vh_schema_file)
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname)
with pathlib.Path(dirname).joinpath('issue_187_2.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
self.assertFalse(os.path.isdir(dirname))
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname, save_remote=True)
path = pathlib.Path(dirname).joinpath('brunato/xmlschema/master/tests/test_cases/'
'examples/vehicles/*.xsd')
for filename in glob.iglob(pathname=str(path)):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
basename = os.path.basename(filename)
with pathlib.Path(self.vh_dir).joinpath(basename).open() as fp:
original_schema = fp.read()
self.assertEqual(exported_schema, original_schema)
with pathlib.Path(dirname).joinpath('issue_187_2.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertNotEqual(exported_schema, original_schema)
self.assertEqual(
exported_schema,
original_schema.replace('https://raw.githubusercontent.com',
dirname.replace('\\', '/') + '/raw.githubusercontent.com')
)
self.assertFalse(os.path.isdir(dirname))
def test_pickling_subclassed_schema__issue_263(self):
cases_dir = pathlib.Path(__file__).parent.parent
schema_file = cases_dir.joinpath('test_cases/examples/vehicles/vehicles.xsd')
xml_file = cases_dir.joinpath('test_cases/examples/vehicles/vehicles.xml')
schema = self.CustomXMLSchema(str(schema_file))
self.assertTrue(schema.is_valid(str(xml_file)))
self.assertIs(self.schema_class.meta_schema, schema.meta_schema)
self.assertNotIn(schema.meta_schema.__class__.__name__, globals())
s = pickle.dumps(schema)
_schema = pickle.loads(s)
self.assertTrue(_schema.is_valid(str(xml_file)))
class CustomLocalXMLSchema(self.schema_class):
pass
schema = CustomLocalXMLSchema(str(schema_file))
self.assertTrue(schema.is_valid(str(xml_file)))
with self.assertRaises((pickle.PicklingError, AttributeError)) as ec:
pickle.dumps(schema)
self.assertIn("Can't pickle", str(ec.exception))
def test_old_subclassing_attribute(self):
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter("always")
class OldXMLSchema10(XMLSchema10):
BUILDERS = {
'attribute_class': Xsd11Attribute,
}
self.assertEqual(len(ctx), 1, "Expected one import warning")
self.assertIn("'BUILDERS' will be removed in v2.0", str(ctx[0].message))
self.assertIs(OldXMLSchema10.xsd_attribute_class, Xsd11Attribute)
name = OldXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaXMLSchema10')
self.assertNotIn(name, globals())
def test_default_namespace_mapping__issue_266(self):
schema_file = self.casepath('issues/issue_266/issue_266b-1.xsd')
with self.assertRaises(XMLSchemaParseError) as ec:
self.schema_class(schema_file)
error_message = str(ec.exception)
self.assertIn("the QName 'testAttribute3' is mapped to no namespace", error_message)
self.assertIn("requires that there is an xs:import statement", error_message)
class TestXMLSchema11(TestXMLSchema10):
schema_class = XMLSchema11
class CustomXMLSchema(XMLSchema11):
pass
def test_default_attributes(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="attrs">
<xs:element name="root"/>
<xs:attributeGroup name="attrs">
<xs:attribute name="a"/>
</xs:attributeGroup>
</xs:schema>"""))
self.assertIs(schema.default_attributes, schema.attribute_groups['attrs'])
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="attrs">
<xs:element name="root"/>
</xs:schema>"""))
self.assertIn("'attrs' doesn't match any attribute group", ctx.exception.message)
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="x:attrs">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual("prefix 'x' not found in namespace map", ctx.exception.message)
class TestXMLSchemaMeta(unittest.TestCase):
def test_wrong_version(self):
with self.assertRaises(ValueError) as ctx:
class XMLSchema12(XMLSchemaBase):
XSD_VERSION = '1.2'
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.1/XMLSchema.xsd')
assert issubclass(XMLSchema12, XMLSchemaBase)
self.assertEqual(str(ctx.exception), "XSD_VERSION must be '1.0' or '1.1'")
def test_from_schema_class(self):
class XMLSchema11Bis(XMLSchema11):
pass
self.assertTrue(issubclass(XMLSchema11Bis, XMLSchemaBase))
def test_dummy_validator_class(self):
class DummySchema(XMLSchemaBase):
XSD_VERSION = '1.1'
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.1/XMLSchema.xsd')
self.assertTrue(issubclass(DummySchema, XMLSchemaBase))
def test_subclass_but_no_replace_meta_schema(self):
class CustomXMLSchema10(XMLSchema10):
pass
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIs(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaXMLSchema10')
self.assertNotIn(name, globals())
def test_subclass_and_replace_meta_schema(self):
class CustomXMLSchema10(XMLSchema10):
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.0/XMLSchema.xsd')
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIsNot(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaCustomXMLSchema10')
self.assertIn(name, globals())
bases = CustomXMLSchema10.meta_schema.__class__.__bases__
self.assertEqual(bases, (XMLSchema10.meta_schema.__class__,))
def test_subclass_and_create_base_meta_schema(self):
class CustomXMLSchema10(XMLSchemaBase):
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.0/XMLSchema.xsd')
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIsNot(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaCustomXMLSchema10')
self.assertIn(name, globals())
bases = CustomXMLSchema10.meta_schema.__class__.__bases__
self.assertEqual(bases, (XMLSchemaBase,))
if __name__ == '__main__':
header_template = "Test xmlschema's schema classes with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
|
py | b41189d5c745f03fe1bc68b75a1c0fc8ad422675 | # coding=utf-8
from seafileapi.repo import Repo
from seafileapi.utils import raise_does_not_exist
from seafileapi.files import SeafDir
class Repos(object):
def __init__(self, client):
self.client = client
def create_repo(self, name, password=None):
data = {'name': name}
if password:
data['passwd'] = password
repo_json = self.client.post('/api2/repos/', data=data).json()
return self.get_repo(repo_json['repo_id'])
@staticmethod
def normalize_repo_name(repo_name):
"""
Remove characters from a given repo name that would be considered restricted on Windows platforms
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
"""
remove_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
for char in remove_chars:
repo_name = repo_name.replace(char, '')
# Remove leading and trailing '.'
repo_name = repo_name.strip('.')
# Replace em-dash with standard dash
repo_name = repo_name.replace('—', '-')
# Replace smart-quotes with dumb-quotes
return repo_name.replace('“', '"').replace('”', '"').replace('‘', '\'').replace('’', '\'')
@raise_does_not_exist('The requested library does not exist')
def get_repo(self, repo_id):
"""Get the repo which has the id `repo_id`.
Raises :exc:`DoesNotExist` if no such repo exists.
"""
repo_json = self.client.get('/api2/repos/' + repo_id).json()
return Repo.from_json(self.client, repo_json)
def list_repos(self, type=None):
params = {}
if type is not None:
params['type'] = type
repos_json = self.client.get('/api2/repos/', params=params).json()
return [Repo.from_json(self.client, j) for j in repos_json]
@raise_does_not_exist('The requested library does not exist')
def get_repo_by_name(self,name):
'''
Get the repo which the name
:param name: [string]
:return: [Repo|None]
'''
#important: Only return one repo for multiple repos with the same.
repos_list = self.list_repos()
for repo in repos_list:
repo_name = repo.get_name()#.decode()
if repo_name == name:
return repo
return None
def list_shared_folders(self,shared_email=None):
'''
List Shared Folders
:param shared_email [string|None]According to the email to filter on the Shared folder. if None then no filter.
:return: [list(SeafDir)]
'''
repos_json = self.client.get('/api/v2.1/shared-folders/').json()
shared_folders = []
for t_folder in repos_json:
seaf_dir_obj = SeafDir.create_from_shared_folder(t_folder,self.client)
t_user_email = t_folder.get("user_email",None)
if shared_email:
if t_user_email == shared_email:
shared_folders.append(seaf_dir_obj)
else:
shared_folders.append(seaf_dir_obj)
return shared_folders
|
py | b4118ac2bed5174e8e6c9518f2db70561bcc6171 | import numpy as np
import lattice_symmetries as ls
import pytest
import math
ls.enable_logging()
import systems
def test_empty():
with pytest.raises(ls.LatticeSymmetriesException):
ls.SpinBasis(ls.Group([]), number_spins=0)
def test_huge():
with pytest.raises(ls.LatticeSymmetriesException):
ls.SpinBasis(ls.Group([]), number_spins=1000)
def test_1_spin():
basis = ls.SpinBasis(ls.Group([]), number_spins=1)
basis.build()
assert basis.states.tolist() == [0, 1]
assert basis.state_info(0) == (0, 1.0, 1.0)
assert basis.state_info(1) == (1, 1.0, 1.0)
basis = ls.SpinBasis(ls.Group([]), number_spins=1, hamming_weight=0)
basis.build()
assert basis.states.tolist() == [0]
assert basis.state_info(0) == (0, 1.0, 1.0)
basis = ls.SpinBasis(ls.Group([]), number_spins=1, spin_inversion=-1)
basis.build()
assert basis.states.tolist() == [0]
assert basis.state_info(0) == (0, 1.0, pytest.approx(1 / math.sqrt(2)))
assert basis.state_info(1) == (0, -1.0, pytest.approx(1 / math.sqrt(2)))
def test_2_spins():
basis = ls.SpinBasis(ls.Group([]), number_spins=2)
basis.build()
assert basis.states.tolist() == [0, 1, 2, 3]
with pytest.raises(ls.LatticeSymmetriesException):
ls.SpinBasis(ls.Group([]), number_spins=2, hamming_weight=2, spin_inversion=1)
with pytest.raises(ls.LatticeSymmetriesException):
ls.SpinBasis(ls.Group([]), number_spins=2, hamming_weight=2, spin_inversion=-1)
basis = ls.SpinBasis(ls.Group([ls.Symmetry([1, 0], sector=1)]), number_spins=2)
basis.build()
assert basis.states.tolist() == [1]
assert basis.state_info(0) == (0, 1.0, 0.0)
assert basis.state_info(1) == (1, 1.0, pytest.approx(1 / math.sqrt(2)))
assert basis.state_info(2) == (1, -1.0, pytest.approx(1 / math.sqrt(2)))
assert basis.state_info(3) == (3, 1.0, 0.0)
def test_4_spins():
# fmt: off
matrix = np.array([[1, 0, 0, 0],
[0, -1, 2, 0],
[0, 2, -1, 0],
[0, 0, 0, 1]])
# fmt: on
number_spins = 4
edges = [(i, (i + 1) % number_spins) for i in range(number_spins)]
basis = ls.SpinBasis(ls.Group([]), number_spins=4, hamming_weight=2)
basis.build()
assert basis.number_states == 6
operator = ls.Operator(basis, [ls.Interaction(matrix, edges)])
assert np.isclose(ls.diagonalize(operator, k=1)[0], -8)
basis = ls.SpinBasis(ls.Group([]), number_spins=4, hamming_weight=2, spin_inversion=1)
basis.build()
assert basis.number_states == 3
operator = ls.Operator(basis, [ls.Interaction(matrix, edges)])
assert np.isclose(ls.diagonalize(operator, k=1)[0], -8)
T = ls.Symmetry([1, 2, 3, 0], sector=0)
basis = ls.SpinBasis(ls.Group([T]), number_spins=4, hamming_weight=2, spin_inversion=1)
basis.build()
assert basis.number_states == 2
operator = ls.Operator(basis, [ls.Interaction(matrix, edges)])
assert np.isclose(ls.diagonalize(operator, k=1)[0], -8)
def test_index():
L_x, L_y = (4, 6)
backend = "ls"
symmetries = systems.square_lattice_symmetries(L_x, L_y)
nearest, _ = systems.square_lattice_edges(L_x, L_y)
basis = systems.make_basis(
symmetries,
backend=backend,
number_spins=L_x * L_y,
hamming_weight=(L_x * L_y) // 2,
)
# print(basis.number_states)
hamiltonian = systems.make_heisenberg(basis, nearest, backend=backend)
# indices = ls.batched_index(basis, basis.states)
# assert np.all(indices == np.arange(basis.number_states, dtype=np.uint64))
for i in range(basis.number_states):
index = basis.index(basis.states[i])
assert index == i
assert np.all(basis.batched_index(basis.states) == np.arange(basis.number_states))
spins = np.zeros((10000, 8), dtype=np.uint64)
spins[:, 0] = basis.states[:10000]
basis.batched_state_info(spins)
# evals, evecs = hamiltonian.eigsh(k=1, which='SA')
# evals, evecs = ls.diagonalize(hamiltonian)
# print(evals)
def notest_construction():
symmetries = [
# ls.Symmetry([18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], sector=5),
# ls.Symmetry([19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], sector=0)
]
basis = ls.SpinBasis(
ls.Group(symmetries), number_spins=20, hamming_weight=10, spin_inversion=None
)
basis.build()
# fmt: off
interactions = [
ls.Interaction([[0.25, 0, 0, 0], [0, -0.25, 0.5, 0], [0, 0.5 , -0.25, 0. ], [ 0. , 0.
, 0. , 0.25]], [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15],
[16, 17], [18, 19]]),
ls.Interaction([[ 0.25, 0. , 0. , 0. ],
[ 0. , -0.25, 0.5 , 0. ],
[ 0. , 0.5 , -0.25, 0. ],
[ 0. , 0. , 0. , 0.25]],
[[0, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], [6, 8], [7, 9], [8, 10], [9, 11], [10, 12], [11, 13], [12, 14], [13, 15], [14, 16], [15, 17], [16, 18], [17, 19], [18, 0], [19, 1]]
),
ls.Interaction([[-0.0625, -0. , -0. , -0. ],
[-0. , 0.0625, -0.125 , -0. ],
[-0. , -0.125 , 0.0625, -0. ],
[-0. , -0. , -0. , -0.0625]],
[[0, 4], [1, 5], [2, 6], [3, 7], [4, 8], [5, 9], [6, 10], [7, 11], [8, 12], [9, 13], [10, 14], [11, 15], [12, 16], [13, 17], [14, 18], [15, 19], [16, 0], [17, 1], [18, 2], [19, 3]]
),
ls.Interaction([[ 0.02777778, 0. , 0. , 0. ],
[ 0. , -0.02777778, 0.05555556, 0. ],
[ 0. , 0.05555556, -0.02777778, 0. ],
[ 0. , 0. , 0. , 0.02777778]],
[[0, 6], [1, 7], [2, 8], [3, 9], [4, 10], [5, 11], [6, 12], [7, 13], [8, 14], [9, 15],
[10, 16], [11, 17], [12, 18], [13, 19], [14, 0], [15, 1], [16, 2], [17, 3], [18, 4],
[19, 5]]),
ls.Interaction([[-0.015625, -0. , -0. , -0. ],
[-0. , 0.015625, -0.03125 , -0. ],
[-0. , -0.03125 , 0.015625, -0. ],
[-0. , -0. , -0. , -0.015625]],
[[0, 8], [1, 9], [2, 10], [3, 11], [4, 12], [5, 13], [6, 14], [7, 15], [8, 16], [9, 17],
[10, 18], [11, 19], [12, 0], [13, 1], [14, 2], [15, 3], [16, 4], [17, 5], [18, 6],
[19, 7]]),
ls.Interaction([[ 0.01, 0. , 0. , 0. ],
[ 0. , -0.01, 0.02, 0. ],
[ 0. , 0.02, -0.01, 0. ],
[ 0. , 0. , 0. , 0.01]],
[[0, 10], [1, 11], [2, 12], [3, 13], [4, 14], [5, 15], [6, 16], [7, 17], [8, 18], [9,
19], [10, 0], [11, 1], [12, 2], [13, 3], [14, 4], [15, 5], [16, 6], [17, 7], [18,
8], [19, 9]]),
ls.Interaction([[-0.00694444, -0. , -0. , -0. ],
[-0. , 0.00694444, -0.01388889, -0. ],
[-0. , -0.01388889, 0.00694444, -0. ],
[-0. , -0. , -0. , -0.00694444]],
[[0, 12], [1, 13], [2, 14], [3, 15], [4, 16], [5, 17], [6, 18], [7, 19], [8, 0], [9, 1],
[10, 2], [11, 3], [12, 4], [13, 5], [14, 6], [15, 7], [16, 8], [17, 9], [18, 10],
[19, 11]]),
ls.Interaction([[ 0.00510204, 0. , 0. , 0. ],
[ 0. , -0.00510204, 0.01020408, 0. ],
[ 0. , 0.01020408, -0.00510204, 0. ],
[ 0. , 0. , 0. , 0.00510204]],
[[0, 14], [1, 15], [2, 16], [3, 17], [4, 18], [5, 19], [6, 0], [7, 1], [8, 2], [9, 3],
[10, 4], [11, 5], [12, 6], [13, 7], [14, 8], [15, 9], [16, 10], [17, 11], [18, 12],
[19, 13]]),
ls.Interaction([[-0.00390625, -0. , -0. , -0. ],
[-0. , 0.00390625, -0.0078125 , -0. ],
[-0. , -0.0078125 , 0.00390625, -0. ],
[-0. , -0. , -0. , -0.00390625]],
[[0, 16], [1, 17], [2, 18], [3, 19], [4, 0], [5, 1], [6, 2], [7, 3], [8, 4], [9, 5],
[10, 6], [11, 7], [12, 8], [13, 9], [14, 10], [15, 11], [16, 12], [17, 13], [18,
14], [19, 15]]),
ls.Interaction([[ 0.00308642, 0. , 0. , 0. ],
[ 0. , -0.00308642, 0.00617284, 0. ],
[ 0. , 0.00617284, -0.00308642, 0. ],
[ 0. , 0. , 0. , 0.00308642]],
[[0, 18], [1, 19], [2, 0], [3, 1], [4, 2], [5, 3], [6, 4], [7, 5], [8, 6], [9, 7], [10,
8], [11, 9], [12, 10], [13, 11], [14, 12], [15, 13], [16, 14], [17, 15], [18, 16],
[19, 17]])
]
# fmt: on
operator = ls.Operator(basis, interactions)
e, _ = ls.diagonalize(operator, k=5)
print(e)
def test_construct_flat_basis():
basis = ls.SpinBasis(ls.Group([]), number_spins=4, hamming_weight=2)
flat_basis = ls.FlatSpinBasis(basis)
assert flat_basis.number_spins == 4
assert flat_basis.hamming_weight == 2
assert flat_basis.spin_inversion is None
basis = ls.SpinBasis(ls.Group([ls.Symmetry([1, 2, 3, 0], sector=1)]), number_spins=4, hamming_weight=2)
flat_basis = ls.FlatSpinBasis(basis)
assert flat_basis.number_spins == 4
assert flat_basis.hamming_weight == 2
assert flat_basis.spin_inversion is None
# print(flat_basis.serialize())
buf = flat_basis.serialize()
other_basis = ls.FlatSpinBasis.deserialize(buf)
assert other_basis.number_spins == 4
assert other_basis.hamming_weight == 2
assert other_basis.spin_inversion is None
assert np.all(other_basis.serialize() == buf)
def test_state_info_flat_basis():
basis = ls.SpinBasis(ls.Group([ls.Symmetry(list(range(1, 20)) + [0], sector=1)]), number_spins=20)
basis.build()
flat = ls.FlatSpinBasis(basis)
full = ls.SpinBasis(ls.Group([]), number_spins=20)
full.build()
r1, e1, n1 = basis.batched_state_info(
np.hstack((full.states.reshape(-1, 1), np.zeros((full.number_states, 7), dtype=np.uint64)))
)
r2, e2, n2 = flat.state_info(full.states)
assert np.all(r1[:, 0] == r2)
assert np.all(n1 == n2)
assert np.all(e1 == e2)
is_r2, n2 = flat.is_representative(full.states)
assert np.all(basis.states == full.states[is_r2.view(np.bool_)])
# assert np.app(n1 == n2)
# if not np.all(e1 == e2):
# for i in range(e1.shape[0]):
# if e1[i] != e2[i]:
# print(i, full.states[i], r1[i], n1[i], ":", e1[i], e2[i])
# # assert False
# test_construct_flat_basis()
test_state_info_flat_basis()
# test_index()
# test_4_spins()
# test_construction()
|
py | b4118afd7c43fc651d666330ef251b8449925f49 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tuning costs to prevent undesirable behavior if there are non-binding
constraints, problem is degenerate, etc.
Import_Carbon_Emissions_Tons must be non-negative and greater than the flow
on the line times the emissions intensity. In the case, this constraint is
non-binding -- and without a tuning cost, the optimization is allowed to
set Import_Carbon_Emissions higher than the product of flow and emissions
rate. Adding a tuning cost prevents that behavior as it pushes the emissions
variable down to be equal.
"""
from builtins import next
import csv
import os.path
from pyomo.environ import Param, Expression
from gridpath.auxiliary.dynamic_components import cost_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.import_carbon_tuning_cost_per_ton = Param(default=0)
def total_import_carbon_tuning_cost_rule(mod):
"""
Hurdle costs for all transmission lines across all timepoints
:param mod:
:return:
"""
return sum(
mod.Import_Carbon_Emissions_Tons[tx, tmp]
* mod.import_carbon_tuning_cost_per_ton
* mod.hrs_in_tmp[tmp]
* mod.tmp_weight[tmp]
* mod.number_years_represented[mod.period[tmp]]
* mod.discount_factor[mod.period[tmp]]
for (tx, tmp) in mod.CRB_TX_OPR_TMPS
)
m.Total_Import_Carbon_Tuning_Cost = Expression(
rule=total_import_carbon_tuning_cost_rule
)
record_dynamic_components(dynamic_components=d)
def record_dynamic_components(dynamic_components):
"""
:param dynamic_components:
Add carbon import tunings costs to cost components
"""
getattr(dynamic_components, cost_components).append(
"Total_Import_Carbon_Tuning_Cost"
)
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
Get tuning param value from file if file exists
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
tuning_param_file = os.path.join(
scenario_directory, subproblem, stage, "inputs", "tuning_params.tab"
)
if os.path.exists(tuning_param_file):
data_portal.load(
filename=tuning_param_file,
select=("import_carbon_tuning_cost_per_ton",),
param=m.import_carbon_tuning_cost_per_ton,
)
else:
pass
def get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn):
"""
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
subproblem = 1 if subproblem == "" else subproblem
stage = 1 if stage == "" else stage
c = conn.cursor()
import_carbon_tuning_cost = c.execute(
"""SELECT import_carbon_tuning_cost_per_ton
FROM inputs_tuning
WHERE tuning_scenario_id = {}""".format(
subscenarios.TUNING_SCENARIO_ID
)
).fetchone()[0]
# TODO: remove the fetch out of this function?
return import_carbon_tuning_cost
def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and validate the inputs
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
pass
# Validation to be added
# import_carbon_tuning_cost = get_inputs_from_database(
# scenario_id, subscenarios, subproblem, stage, conn)
def write_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
):
"""
Get inputs from database and write out the model input
tuning_params.tab file.
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
import_carbon_tuning_cost = get_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
)
# If tuning params file exists, add column to file, else create file and
# writer header and tuning param value
if os.path.isfile(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"tuning_params.tab",
)
):
with open(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"tuning_params.tab",
),
"r",
) as projects_file_in:
reader = csv.reader(projects_file_in, delimiter="\t", lineterminator="\n")
new_rows = list()
# Append column header
header = next(reader)
header.append("import_carbon_tuning_cost_per_ton")
new_rows.append(header)
# Append tuning param value
param_value = next(reader)
param_value.append(import_carbon_tuning_cost)
new_rows.append(param_value)
with open(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"tuning_params.tab",
),
"w",
newline="",
) as tuning_params_file_out:
writer = csv.writer(
tuning_params_file_out, delimiter="\t", lineterminator="\n"
)
writer.writerows(new_rows)
else:
with open(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"tuning_params.tab",
),
"w",
newline="",
) as tuning_params_file_out:
writer = csv.writer(
tuning_params_file_out, delimiter="\t", lineterminator="\n"
)
writer.writerows(["import_carbon_tuning_cost_per_ton"])
writer.writerows([import_carbon_tuning_cost])
|
py | b4118b89b3fcb3341c469d8f7759b1e688bdeb16 | #!/bin/env python3
import time
import gex
with gex.Client(gex.TrxSerialThread(port='/dev/ttyACM0')) as client:
pass
|
py | b4118ba36fb283d8ecdc4fcf11dce21f6618eb13 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ironic.conf import CONF
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
|
py | b4118ba87cca05fd3f3201ecae1119e48a9d2708 | from __future__ import division
from __future__ import print_function
from scipy.stats import norm
import numpy as np
import pandas as pd
from . import common_args
from ..util import read_param_file, compute_groups_matrix, ResultDict
from types import MethodType
from multiprocessing import Pool, cpu_count
from functools import partial
from itertools import combinations, zip_longest
def analyze(problem, Y, calc_second_order=True, num_resamples=100,
conf_level=0.95, print_to_console=False, parallel=False,
n_processors=None, seed=None):
"""Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
"""
if seed:
np.random.seed(seed)
# determining if groups are defined and adjusting the number
# of rows in the cross-sampled matrix accordingly
if not problem.get('groups'):
D = problem['num_vars']
else:
D = len(set(problem['groups']))
if calc_second_order and Y.size % (2 * D + 2) == 0:
N = int(Y.size / (2 * D + 2))
elif not calc_second_order and Y.size % (D + 2) == 0:
N = int(Y.size / (D + 2))
else:
raise RuntimeError("""
Incorrect number of samples in model output file.
Confirm that calc_second_order matches option used during sampling.""")
if conf_level < 0 or conf_level > 1:
raise RuntimeError("Confidence level must be between 0-1.")
# normalize the model output
Y = (Y - Y.mean()) / Y.std()
A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)
r = np.random.randint(N, size=(N, num_resamples))
Z = norm.ppf(0.5 + conf_level / 2)
if not parallel:
S = create_Si_dict(D, calc_second_order)
for j in range(D):
S['S1'][j] = first_order(A, AB[:, j], B)
S['S1_conf'][j] = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
S['ST'][j] = total_order(A, AB[:, j], B)
S['ST_conf'][j] = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
# Second order (+conf.)
if calc_second_order:
for j in range(D):
for k in range(j + 1, D):
S['S2'][j, k] = second_order(
A, AB[:, j], AB[:, k], BA[:, j], B)
S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j],
AB[r, k], BA[r, j],
B[r]).std(ddof=1)
else:
tasks, n_processors = create_task_list(
D, calc_second_order, n_processors)
func = partial(sobol_parallel, Z, A, AB, BA, B, r)
pool = Pool(n_processors)
S_list = pool.map_async(func, tasks)
pool.close()
pool.join()
S = Si_list_to_dict(S_list.get(), D, calc_second_order)
# Print results to console
if print_to_console:
print_indices(S, problem, calc_second_order)
# Add problem context and override conversion method for special case
S.problem = problem
S.to_df = MethodType(to_df, S)
return S
def first_order(A, AB, B):
# First order estimator following Saltelli et al. 2010 CPC, normalized by
# sample variance
return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
def total_order(A, AB, B):
# Total order estimator following Saltelli et al. 2010 CPC, normalized by
# sample variance
return 0.5 * np.mean((A - AB) ** 2, axis=0) / np.var(np.r_[A, B], axis=0)
def second_order(A, ABj, ABk, BAj, B):
# Second order estimator following Saltelli 2002
Vjk = np.mean(BAj * ABk - A * B, axis=0) / np.var(np.r_[A, B], axis=0)
Sj = first_order(A, ABj, B)
Sk = first_order(A, ABk, B)
return Vjk - Sj - Sk
def create_Si_dict(D, calc_second_order):
# initialize empty dict to store sensitivity indices
S = ResultDict((k, np.zeros(D))
for k in ('S1', 'S1_conf', 'ST', 'ST_conf'))
if calc_second_order:
S['S2'] = np.zeros((D, D))
S['S2'][:] = np.nan
S['S2_conf'] = np.zeros((D, D))
S['S2_conf'][:] = np.nan
return S
def separate_output_values(Y, D, N, calc_second_order):
AB = np.zeros((N, D))
BA = np.zeros((N, D)) if calc_second_order else None
step = 2 * D + 2 if calc_second_order else D + 2
A = Y[0:Y.size:step]
B = Y[(step - 1):Y.size:step]
for j in range(D):
AB[:, j] = Y[(j + 1):Y.size:step]
if calc_second_order:
BA[:, j] = Y[(j + 1 + D):Y.size:step]
return A, B, AB, BA
def sobol_parallel(Z, A, AB, BA, B, r, tasks):
sobol_indices = []
for d, j, k in tasks:
if d == 'S1':
s = first_order(A, AB[:, j], B)
elif d == 'S1_conf':
s = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'ST':
s = total_order(A, AB[:, j], B)
elif d == 'ST_conf':
s = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'S2':
s = second_order(A, AB[:, j], AB[:, k], BA[:, j], B)
elif d == 'S2_conf':
s = Z * second_order(A[r], AB[r, j], AB[r, k],
BA[r, j], B[r]).std(ddof=1)
sobol_indices.append([d, j, k, s])
return sobol_indices
def create_task_list(D, calc_second_order, n_processors):
# Create list with one entry (key, parameter 1, parameter 2) per sobol
# index (+conf.). This is used to supply parallel tasks to
# multiprocessing.Pool
tasks_first_order = [[d, j, None] for j in range(
D) for d in ('S1', 'S1_conf', 'ST', 'ST_conf')]
# Add second order (+conf.) to tasks
tasks_second_order = []
if calc_second_order:
tasks_second_order = [[d, j, k] for j in range(D) for k in
range(j + 1, D) for d in ('S2', 'S2_conf')]
if n_processors is None:
n_processors = min(cpu_count(), len(
tasks_first_order) + len(tasks_second_order))
if not calc_second_order:
tasks = np.array_split(tasks_first_order, n_processors)
else:
# merges both lists alternating its elements and splits the
# resulting lists into n_processors sublists
tasks = np.array_split([v for v in sum(
zip_longest(tasks_first_order[::-1], tasks_second_order), ())
if v is not None], n_processors)
return tasks, n_processors
def Si_list_to_dict(S_list, D, calc_second_order):
# Convert the parallel output into the regular dict format for
# printing/returning
S = create_Si_dict(D, calc_second_order)
L = []
for l in S_list: # first reformat to flatten
L += l
for s in L: # First order (+conf.)
if s[2] is None:
S[s[0]][s[1]] = s[3]
else:
S[s[0]][s[1], s[2]] = s[3]
return S
def Si_to_pandas_dict(S_dict):
"""Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of
(None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
"""
problem = S_dict.problem
total_order = {
'ST': S_dict['ST'],
'ST_conf': S_dict['ST_conf']
}
first_order = {
'S1': S_dict['S1'],
'S1_conf': S_dict['S1_conf']
}
idx = None
second_order = None
if 'S2' in S_dict:
names = problem['names']
idx = list(combinations(names, 2))
second_order = {
'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
for i in idx],
'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
def to_df(self):
'''Conversion method to Pandas DataFrame. To be attached to ResultDict.
Returns
========
List : of Pandas DataFrames in order of Total, First, Second
'''
total, first, (idx, second) = Si_to_pandas_dict(self)
names = self.problem['names']
ret = [pd.DataFrame(total, index=names),
pd.DataFrame(first, index=names)]
if second:
ret += [pd.DataFrame(second, index=idx)]
return ret
def print_indices(S, problem, calc_second_order):
# Output to console
if not problem.get('groups'):
title = 'Parameter'
names = problem['names']
D = problem['num_vars']
else:
title = 'Group'
_, names = compute_groups_matrix(problem['groups'])
D = len(names)
print('%s S1 S1_conf ST ST_conf' % title)
for j in range(D):
print('%s %f %f %f %f' % (names[j], S['S1'][
j], S['S1_conf'][j], S['ST'][j], S['ST_conf'][j]))
if calc_second_order:
print('\n%s_1 %s_2 S2 S2_conf' % (title, title))
for j in range(D):
for k in range(j + 1, D):
print("%s %s %f %f" % (names[j], names[k],
S['S2'][j, k], S['S2_conf'][j, k]))
def cli_parse(parser):
parser.add_argument('--max-order', type=int, required=False, default=2,
choices=[1, 2],
help='Maximum order of sensitivity indices to '
'calculate')
parser.add_argument('-r', '--resamples', type=int, required=False,
default=1000,
help='Number of bootstrap resamples for Sobol '
'confidence intervals')
parser.add_argument('--parallel', action='store_true', help='Makes '
'use of parallelization.',
dest='parallel')
parser.add_argument('--processors', type=int, required=False,
default=None,
help='Number of processors to be used with the ' +
'parallel option.', dest='n_processors')
return parser
def cli_action(args):
problem = read_param_file(args.paramfile)
Y = np.loadtxt(args.model_output_file, delimiter=args.delimiter,
usecols=(args.column,))
analyze(problem, Y, (args.max_order == 2),
num_resamples=args.resamples, print_to_console=True,
parallel=args.parallel, n_processors=args.n_processors,
seed=args.seed)
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
|
py | b4118bacef308bc77ea514f5466940b5cdd68888 | # coding=utf-8
# Copyleft 2019 project LXRT.
import argparse
import random
import numpy as np
import torch
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
print("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSprop
elif optim == 'adam':
print("Optimizer: Using Adam")
optimizer = torch.optim.Adam
elif optim == 'adamax':
print("Optimizer: Using Adamax")
optimizer = torch.optim.Adamax
elif optim == 'sgd':
print("Optimizer: sgd")
optimizer = torch.optim.SGD
elif 'bert' in optim:
optimizer = 'bert' # The bert optimizer will be bind later.
else:
assert False, "Please add your optimizer %s in the list." % optim
return optimizer
def parse_args():
parser = argparse.ArgumentParser()
# Data Splits
parser.add_argument("--train", default='train')
parser.add_argument("--valid", default='valid')
parser.add_argument("--test", default=None)
# Training Hyper-parameters
parser.add_argument('--batchSize', dest='batch_size', type=int, default=256)
parser.add_argument('--optim', default='bert')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9595, help='random seed')
# Debugging
parser.add_argument('--output', type=str, default='snap/test')
parser.add_argument("--fast", action='store_const', default=False, const=True)
parser.add_argument("--tiny", action='store_const', default=False, const=True)
parser.add_argument("--tqdm", action='store_const', default=False, const=True)
# Model Loading
parser.add_argument('--load', type=str, default=None,
help='Load the model (usually the fine-tuned model).')
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument('--loadLXMERTQA', dest='load_lxmert_qa', type=str, default=None,
help='Load the pre-trained LXMERT model with QA answer head.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
# Optimization
parser.add_argument("--mceLoss", dest='mce_loss', action='store_const', default=False, const=True)
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument("--llayers", default=9, type=int, help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int, help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int, help='Number of object Relationship layers.')
# LXMERT Pre-training Config
parser.add_argument("--taskMatched", dest='task_matched', action='store_const', default=False, const=True)
parser.add_argument("--taskMaskLM", dest='task_mask_lm', action='store_const', default=False, const=True)
parser.add_argument("--taskObjPredict", dest='task_obj_predict', action='store_const', default=False, const=True)
parser.add_argument("--taskQA", dest='task_qa', action='store_const', default=False, const=True)
parser.add_argument("--visualLosses", dest='visual_losses', default='obj,attr,feat', type=str)
parser.add_argument("--qaSets", dest='qa_sets', default=None, type=str)
parser.add_argument("--wordMaskRate", dest='word_mask_rate', default=0.15, type=float)
parser.add_argument("--objMaskRate", dest='obj_mask_rate', default=0.15, type=float)
# Training configuration
parser.add_argument("--multiGPU", action='store_const', default=False, const=True)
parser.add_argument("--numWorkers", dest='num_workers', default=0)
# Parse the arguments.
args = parser.parse_args()
# Bind optimizer class.
args.optimizer = get_optimizer(args.optim)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
return args
args = parse_args()
|
py | b4118c05c0341561950b773904f26dbff61897be | import logging
from localstack import config
from localstack.services import install
from localstack.utils.aws import aws_stack
from localstack.constants import DEFAULT_PORT_STEPFUNCTIONS_BACKEND, TEST_AWS_ACCOUNT_ID, DEFAULT_REGION
from localstack.services.infra import get_service_protocol, start_proxy_for_service, do_run
LOG = logging.getLogger(__name__)
# max heap size allocated for the Java process
MAX_HEAP_SIZE = '256m'
def start_stepfunctions(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_STEPFUNCTIONS
install.install_stepfunctions_local()
backend_port = DEFAULT_PORT_STEPFUNCTIONS_BACKEND
# TODO: local port is currently hard coded in Step Functions Local :/
backend_port = 8083
lambda_endpoint = aws_stack.get_local_service_url('lambda')
dynamodb_endpoint = aws_stack.get_local_service_url('dynamodb')
sns_endpoint = aws_stack.get_local_service_url('sns')
sqs_endpoint = aws_stack.get_local_service_url('sqs')
cmd = ('cd %s; java -Dcom.amazonaws.sdk.disableCertChecking -Xmx%s -jar StepFunctionsLocal.jar '
'--lambda-endpoint %s --dynamodb-endpoint %s --sns-endpoint %s '
'--sqs-endpoint %s --aws-region %s --aws-account %s') % (
install.INSTALL_DIR_STEPFUNCTIONS, MAX_HEAP_SIZE, lambda_endpoint, dynamodb_endpoint,
sns_endpoint, sqs_endpoint, DEFAULT_REGION, TEST_AWS_ACCOUNT_ID)
print('Starting mock StepFunctions (%s port %s)...' % (get_service_protocol(), port))
start_proxy_for_service('stepfunctions', port, backend_port, update_listener)
return do_run(cmd, asynchronous)
|
py | b4118ca959217bd004c6a53a6eac51e65b2cc97d | # -*- coding: utf-8 -*-
"""
Coxeter Groups implemented with Coxeter3
"""
#*****************************************************************************
# Copyright (C) 2009-2013 Mike Hansen <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.libs.coxeter3.coxeter import get_CoxGroup, CoxGroupElement
from sage.misc.cachefunc import cached_method
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.element_wrapper import ElementWrapper
from sage.structure.richcmp import richcmp
from sage.categories.all import CoxeterGroups
from sage.structure.parent import Parent
from sage.combinat.root_system.coxeter_matrix import CoxeterMatrix
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
class CoxeterGroup(UniqueRepresentation, Parent):
@staticmethod
def __classcall__(cls, cartan_type, *args, **options):
"""
TESTS::
sage: from sage.libs.coxeter3.coxeter_group import CoxeterGroup # optional - coxeter3
sage: CoxeterGroup(['B',2]) # optional - coxeter3
Coxeter group of type ['B', 2] implemented by Coxeter3
sage: CoxeterGroup(CartanType(['B', 3]).relabel({1: 3, 2: 2, 3: 1})) # optional - coxeter3
Coxeter group of type ['B', 3] relabelled by {1: 3, 2: 2, 3: 1} implemented by Coxeter3
"""
from sage.combinat.all import CartanType
ct = CartanType(cartan_type)
return super(CoxeterGroup, cls).__classcall__(cls, ct, *args, **options)
def __init__(self, cartan_type):
"""
TESTS::
sage: from sage.libs.coxeter3.coxeter_group import CoxeterGroup # optional - coxeter3
sage: CoxeterGroup(['A',2]) # optional - coxeter3
Coxeter group of type ['A', 2] implemented by Coxeter3
As degrees and codegrees are not implemented, they are skipped in the
testsuite::
sage: to_skip = ['_test_degrees', '_test_codegrees']
sage: TestSuite(CoxeterGroup(['A',2])).run(skip=to_skip) # optional - coxeter3
"""
category = CoxeterGroups()
if cartan_type.is_finite():
category = category.Finite()
Parent.__init__(self, category=category)
self._coxgroup = get_CoxGroup(cartan_type)
self._cartan_type = cartan_type
def _repr_(self):
"""
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3'); W # optional - coxeter3 # indirect doctest
Coxeter group of type ['A', 3] implemented by Coxeter3
sage: W = CoxeterGroup(['A', 3, 1], implementation='coxeter3'); W # optional - coxeter3
Coxeter group of type ['A', 3, 1] implemented by Coxeter3
"""
return "Coxeter group of type %s implemented by Coxeter3"%(self.cartan_type())
def __iter__(self):
"""
EXAMPLES::
sage: W = CoxeterGroup(['A', 2], implementation='coxeter3') # optional - coxeter3
sage: list(W) # optional - coxeter3
[[], [1], [2], [1, 2], [2, 1], [1, 2, 1]]
"""
for x in self._coxgroup:
yield CoxeterGroup.Element(self, x)
def cartan_type(self):
"""
Return the Cartan type for this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.cartan_type() # optional - coxeter3
['A', 3]
"""
return self._cartan_type
def index_set(self):
"""
Return the index set for the generators of this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.index_set() # optional - coxeter3
(1, 2, 3)
sage: C = CoxeterGroup(['A', 3,1], implementation='coxeter3') # optional - coxeter3
sage: C.index_set() # optional - coxeter3
(0, 1, 2, 3)
"""
return self.cartan_type().index_set()
def bruhat_interval(self, u, v):
"""
Return the Bruhat interval between ``u`` and ``v``.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.bruhat_interval([1],[3,1,2,3]) # optional - coxeter3
[[1], [1, 2], [1, 3], [1, 2, 3], [1, 3, 2], [1, 2, 3, 2]]
"""
u, v = self(u), self(v)
return self._coxgroup.bruhat_interval(u.value, v.value)
def cardinality(self):
"""
Return the cardinality of this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.cardinality() # optional - coxeter3
24
"""
return self._coxgroup.order()
def one(self):
"""
Return the identity element of this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.one() # optional - coxeter3
[]
"""
return self.element_class(self, [])
def simple_reflections(self):
"""
Return the family of generators for this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: s = W.simple_reflections() # optional - coxeter3
sage: s[2]*s[1]*s[2] # optional - coxeter3
[2, 1, 2]
"""
from sage.combinat.family import Family
return Family(self.index_set(), lambda i: self.element_class(self, [i]))
gens = simple_reflections
def rank(self):
"""
Return the rank of this Coxeter group, that is, the number of generators.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.rank() # optional - coxeter3
3
"""
return self._coxgroup.rank()
def is_finite(self):
"""
Return True if this is a finite Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.is_finite() # optional - coxeter3
True
"""
return self._coxgroup.is_finite()
def length(self, x):
"""
Return the length of an element ``x`` in this Coxeter group.
This is just the length of a reduced word for ``x``.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.length(W([1,2])) # optional - coxeter3
2
sage: W.length(W([1,1])) # optional - coxeter3
0
"""
return x.length()
@cached_method
def coxeter_matrix(self):
"""
Return the Coxeter matrix for this Coxeter group.
The columns and rows are ordered according to the result of
:meth:`index_set`.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: m = W.coxeter_matrix(); m # optional - coxeter3
[1 3 2]
[3 1 3]
[2 3 1]
sage: m.index_set() == W.index_set() # optional - coxeter3
True
"""
return CoxeterMatrix(self._coxgroup.coxeter_matrix(), self.index_set())
def root_system(self):
"""
Return the root system associated with this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: R = W.root_system(); R # optional - coxeter3
Root system of type ['A', 3]
sage: alpha = R.root_space().basis() # optional - coxeter3
sage: alpha[2] + alpha[3] # optional - coxeter3
alpha[2] + alpha[3]
"""
return self.cartan_type().root_system()
def _an_element_(self):
"""
Return an element of this Coxeter group.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W._an_element_() # optional - coxeter3
[]
"""
return self.element_class(self, [])
def m(self, i, j):
r"""
This is deprecated, use ``self.coxeter_matrix()[i,j]`` instead.
TESTS::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.m(1, 1) # optional - coxeter3
doctest:warning...:
DeprecationWarning: the .m(i, j) method has been deprecated; use .coxeter_matrix()[i,j] instead.
See https://trac.sagemath.org/30237 for details.
1
"""
from sage.misc.superseded import deprecation
deprecation(30237, "the .m(i, j) method has been deprecated; use .coxeter_matrix()[i,j] instead.")
return self.coxeter_matrix()[i,j]
def kazhdan_lusztig_polynomial(self, u, v, constant_term_one=True):
r"""
Return the Kazhdan-Lusztig polynomial `P_{u,v}`.
INPUT:
- ``u``, ``v`` -- elements of the underlying Coxeter group
- ``constant_term_one`` -- (default: True) True uses the constant equals one convention,
False uses the Leclerc-Thibon convention
.. SEEALSO::
- :class:`~sage.combinat.kazhdan_lusztig.KazhdanLusztigPolynomial`
- :meth:`parabolic_kazhdan_lusztig_polynomial`
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W.kazhdan_lusztig_polynomial([], [1,2, 1]) # optional - coxeter3
1
sage: W.kazhdan_lusztig_polynomial([1],[3,2]) # optional - coxeter3
0
sage: W = CoxeterGroup(['A',3],implementation='coxeter3') # optional - coxeter3
sage: W.kazhdan_lusztig_polynomial([2],[2,1,3,2]) # optional - coxeter3
q + 1
.. NOTE::
Coxeter3, as well as Sage's native implementation in
:class:`~sage.combinat.kazhdan_lusztig.KazhdanLusztigPolynomial`
use the convention under which Kazhdan-Lusztig
polynomials give the change of basis from the `(C_w)_{w\in W}`
basis to the `(T_w)_{w\in W}` of the Hecke algebra of `W` with
parameters `q` and `q^{-1}`:
.. MATH:: C_w = \sum_u P_{u,w} T_w
In particular, `P_{u,u}=1`::
sage: all(W.kazhdan_lusztig_polynomial(u,u) == 1 for u in W) # optional - coxeter3
True
This convention differs from Theorem 2.7 in [LT1998]_ by:
.. MATH::
{}^{LT} P_{y,w}(q) = q^{\ell(w)-\ell(y)} P_{y,w}(q^{-2})
To access the Leclerc-Thibon convention use::
sage: W = CoxeterGroup(['A',3],implementation='coxeter3') # optional - coxeter3
sage: W.kazhdan_lusztig_polynomial([2],[2,1,3,2],constant_term_one=False) # optional - coxeter3
q^3 + q
TESTS:
We check that Coxeter3 and Sage's implementation give the same results::
sage: C = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: W = WeylGroup("B3",prefix="s")
sage: [s1,s2,s3] = W.simple_reflections()
sage: R.<q> = LaurentPolynomialRing(QQ)
sage: KL = KazhdanLusztigPolynomial(W,q)
sage: all(KL.P(1,w) == C.kazhdan_lusztig_polynomial([],w.reduced_word()) for w in W) # optional - coxeter3 # long (15s)
True
"""
u, v = self(u), self(v)
p = u.value.kazhdan_lusztig_polynomial(v.value)
if constant_term_one:
return p
ZZq = PolynomialRing(ZZ, 'q', sparse=True)
# This is the same as q**len_diff * p(q**(-2))
len_diff = v.length()-u.length()
d = {-2*deg+len_diff: coeff for deg,coeff in enumerate(p) if coeff != 0}
return ZZq(d)
def parabolic_kazhdan_lusztig_polynomial(self, u, v, J, constant_term_one=True):
r"""
Return the parabolic Kazhdan-Lusztig polynomial `P_{u,v}^{-,J}`.
INPUT:
- ``u``, ``v`` -- minimal length coset representatives of `W/W_J` for this Coxeter group `W`
- ``J`` -- a subset of the index set of ``self`` specifying the parabolic subgroup
This method implements the parabolic Kazhdan-Lusztig polynomials
`P^{-,J}_{u,v}` of [Deo1987b]_, which are defined as
`P^{-,J}_{u,v} = \sum_{z\in W_J} (-1)^{\ell(z)} P_{yz,w}(q)`
with the conventions in Sage.
As for :meth:`kazhdan_lusztig_polynomial` the convention
differs from Theorem 2.7 in [LT1998]_ by:
.. MATH::
{}^{LT} P_{y,w}^{-,J}(q) = q^{\ell(w)-\ell(y)} P_{y,w}^{-,J}(q^{-2})
EXAMPLES::
sage: W = CoxeterGroup(['A',3], implementation='coxeter3') # optional - coxeter3
sage: W.parabolic_kazhdan_lusztig_polynomial([],[3,2],[1,3]) # optional - coxeter3
0
sage: W.parabolic_kazhdan_lusztig_polynomial([2],[2,1,3,2],[1,3]) # optional - coxeter3
q
sage: C = CoxeterGroup(['A',3,1], implementation='coxeter3') # optional - coxeter3
sage: C.parabolic_kazhdan_lusztig_polynomial([],[1],[0]) # optional - coxeter3
1
sage: C.parabolic_kazhdan_lusztig_polynomial([],[1,2,1],[0]) # optional - coxeter3
1
sage: C.parabolic_kazhdan_lusztig_polynomial([],[0,1,0,1,2,1],[0]) # optional - coxeter3
q
sage: w=[1, 2, 1, 3, 0, 2, 1, 0, 3, 0, 2]
sage: v=[1, 2, 1, 3, 0, 1, 2, 1, 0, 3, 0, 2, 1, 0, 3, 0, 2]
sage: C.parabolic_kazhdan_lusztig_polynomial(w,v,[1,3]) # optional - coxeter3
q^2 + q
sage: C.parabolic_kazhdan_lusztig_polynomial(w,v,[1,3],constant_term_one=False) # optional - coxeter3
q^4 + q^2
TESTS::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: type(W.parabolic_kazhdan_lusztig_polynomial([2],[],[1])) # optional - coxeter3
<type 'sage.rings.polynomial.polynomial_integer_dense_flint.Polynomial_integer_dense_flint'>
"""
u = self(u)
v = self(v)
if any(d in J for d in u.descents()) or any(d in J for d in v.descents()):
raise ValueError("u and v have to be minimal coset representatives")
J_set = set(J)
WOI = self.weak_order_ideal(lambda x: J_set.issuperset(x.descents()))
if constant_term_one:
P = PolynomialRing(ZZ, 'q')
return P.sum((-1)**(z.length()) * self.kazhdan_lusztig_polynomial(u*z,v)
for z in WOI if (u*z).bruhat_le(v))
P = PolynomialRing(ZZ, 'q', sparse=True)
return P.sum((-1)**(z.length()) * self.kazhdan_lusztig_polynomial(u*z,v, constant_term_one=False).shift(z.length())
for z in WOI if (u*z).bruhat_le(v))
class Element(ElementWrapper):
wrapped_class = CoxGroupElement
def __init__(self, parent, x):
"""
TESTS::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W([2,1,2]) # optional - coxeter3
[1, 2, 1]
"""
if not isinstance(x, CoxGroupElement):
x = CoxGroupElement(parent._coxgroup, x).reduced()
ElementWrapper.__init__(self, parent, x)
def __iter__(self):
"""
Return an iterator for the elements in the reduced word.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,1]) # optional - coxeter3
sage: list(iter(w)) # optional - coxeter3
[1, 2, 1]
"""
return iter(self.value)
def coatoms(self):
"""
Return the coatoms (or co-covers) of this element in the Bruhat order.
EXAMPLES::
sage: W = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,3]) # optional - coxeter3
sage: w.coatoms() # optional - coxeter3
[[2, 3], [1, 3], [1, 2]]
"""
W = self.parent()
return [W(w) for w in self.value.coatoms()]
def _richcmp_(self, other, op):
"""
Return lexicographic comparison of ``self`` and ``other``.
EXAMPLES::
sage: W = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,3]) # optional - coxeter3
sage: v = W([3,1,2]) # optional - coxeter3
sage: v < w # optional - coxeter3
False
sage: w < v # optional - coxeter3
True
Some tests for equality::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W([1,2,1]) == W([2,1,2]) # optional - coxeter3
True
sage: W([1,2,1]) == W([2,1]) # optional - coxeter3
False
"""
return richcmp(list(self), list(other), op)
def reduced_word(self):
"""
Return the reduced word of ``self``.
EXAMPLES::
sage: W = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,3]) # optional - coxeter3
sage: w.reduced_word() # optional - coxeter3
[1, 2, 3]
"""
return list(self)
def __invert__(self):
"""
Return the inverse of this Coxeter group element.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,3]) # optional - coxeter3
sage: ~w # optional - coxeter3
[3, 2, 1]
"""
return self.__class__(self.parent(), ~self.value)
inverse = __invert__
def __getitem__(self, i):
"""
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: w0 = W([1,2,1]) # optional - coxeter3
sage: w0[0] # optional - coxeter3
1
sage: w0[1] # optional - coxeter3
2
"""
# Allow the error message to be raised by the underlying element
return self.value[i]
def _mul_(self, y):
"""
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: s = W.gens() # optional - coxeter3
sage: s[1]._mul_(s[1]) # optional - coxeter3
[]
sage: s[1]*s[2]*s[1] # optional - coxeter3
[1, 2, 1]
"""
return self.__class__(self.parent(), self.value * y.value)
def __len__(self):
"""
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: w = W([1,2,1]) # optional - coxeter3
sage: w.length() # optional - coxeter3
3
sage: len(w) # optional - coxeter3
3
"""
return len(self.value)
length = __len__
def bruhat_le(self, v):
r"""
Return whether ``self`` `\le` ``v`` in Bruhat order.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W([]).bruhat_le([1,2,1]) # optional - coxeter3
True
"""
v = self.parent()(v)
return self.value.bruhat_le(v.value)
def poincare_polynomial(self):
"""
Return the Poincaré polynomial associated with this element.
EXAMPLES::
sage: W = CoxeterGroup(['A', 2], implementation='coxeter3') # optional - coxeter3
sage: W.long_element().poincare_polynomial() # optional - coxeter3
t^3 + 2*t^2 + 2*t + 1
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: W([2,1,3,2]).poincare_polynomial() # optional - coxeter3
t^4 + 4*t^3 + 5*t^2 + 3*t + 1
sage: W([1,2,3,2,1]).poincare_polynomial() # optional - coxeter3
t^5 + 4*t^4 + 6*t^3 + 5*t^2 + 3*t + 1
sage: rw = sage.combinat.permutation.from_reduced_word # optional - coxeter3
sage: p = [w.poincare_polynomial() for w in W] # optional - coxeter3
sage: [rw(w.reduced_word()) for i,w in enumerate(W) if p[i] != p[i].reverse()] # optional - coxeter3
[[3, 4, 1, 2], [4, 2, 3, 1]]
"""
return self.value.poincare_polynomial()
def has_right_descent(self, i):
"""
Return whether ``i`` is a right descent of this element.
EXAMPLES::
sage: W = CoxeterGroup(['A', 4], implementation='coxeter3') # optional - coxeter3
sage: W([1,2]).has_right_descent(1) # optional - coxeter3
False
sage: W([1,2]).has_right_descent(2) # optional - coxeter3
True
"""
return i in self.value.right_descents()
def has_left_descent(self, i):
"""
Return True if ``i`` is a left descent of this element.
EXAMPLES::
sage: W = CoxeterGroup(['A', 4], implementation='coxeter3') # optional - coxeter3
sage: W([1,2]).has_left_descent(1) # optional - coxeter3
True
sage: W([1,2]).has_left_descent(2) # optional - coxeter3
False
"""
return i in self.value.left_descents()
def action(self, v):
"""
Return the action of this Coxeter group element on the root space.
INPUT:
- ``v`` -- an element of the root space associated with the Coxeter group for ``self``
EXAMPLES::
sage: W = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: R = W.root_system().root_space() # optional - coxeter3
sage: v = R.an_element(); v # optional - coxeter3
2*alpha[1] + 2*alpha[2] + 3*alpha[3]
sage: w = W([1,2,3]) # optional - coxeter3
sage: w.action(v) # optional - coxeter3
-alpha[1] + alpha[2] + alpha[3]
"""
#TODO: Find a better way to do this
W = self.parent().root_system().root_space().weyl_group()
w = W.from_reduced_word(list(self))
return w.action(v)
def action_on_rational_function(self, f):
r"""
Return the natural action of this Coxeter group element on a
polynomial considered as an element of `S(\mathfrak{h}^*)`.
.. NOTE::
Note that the number of variables in the polynomial
ring must correspond to the rank of this Coxeter
group. The ordering of the variables is assumed to
coincide with the result of :meth:`index_set`.
EXAMPLES::
sage: W = CoxeterGroup(['A', 3], implementation='coxeter3') # optional - coxeter3
sage: S = PolynomialRing(QQ, 'x,y,z').fraction_field() # optional - coxeter3
sage: x,y,z = S.gens() # optional - coxeter3
sage: W([1]).action_on_rational_function(x+y+z) # optional - coxeter3
(x^2*y + x*z + 1)/x
sage: W([2]).action_on_rational_function(x+y+z) # optional - coxeter3
(x*y^2 + y^2*z + 1)/y
sage: W([3]).action_on_rational_function(x+y+z) # optional - coxeter3
(y*z^2 + x*z + 1)/z
"""
Q = f.parent()
Q_gens = Q.gens()
W = self.parent()
R = W.root_system().root_space()
alpha = R.basis()
n = W.rank()
if Q.ngens() != n:
raise ValueError("the number of generators for the polynomial ring must be the same as the rank of the root system")
basis_elements = [alpha[i] for i in W.index_set()]
basis_to_order = {s: i for i, s in enumerate(W.index_set())}
results = []
for poly in [f.numerator(), f.denominator()]:
result = 0
exponents = poly.exponents()
for exponent in exponents:
#Construct something in the root lattice from the exponent vector
exponent = sum(e*b for e, b in zip(exponent, basis_elements))
exponent = self.action(exponent)
monomial = 1
for s, c in exponent.monomial_coefficients().items():
monomial *= Q_gens[basis_to_order[s]]**int(c)
result += monomial
results.append(result)
numerator, denominator = results
return numerator / denominator
|
py | b4118cf502b36b6cfa212e36134b00b863deb66d | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import logging
import time
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union
import pandas as pd
from pydash import get
from gs_quant.api.gs.assets import GsAssetApi
from gs_quant.api.gs.carbon import CarbonCard, GsCarbonApi, CarbonTargetCoverageCategory, CarbonScope, \
CarbonEmissionsAllocationCategory, CarbonEmissionsIntensityType, CarbonCoverageCategory, CarbonEntityType, \
CarbonAnalyticsView
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.esg import ESGMeasure, GsEsgApi, ESGCard
from gs_quant.api.gs.indices import GsIndexApi
from gs_quant.api.gs.portfolios import GsPortfolioApi
from gs_quant.api.gs.reports import GsReportApi
from gs_quant.api.gs.thematics import ThematicMeasure, GsThematicApi, Region
from gs_quant.common import DateLimit, PositionType, Currency
from gs_quant.data import DataCoordinate, DataFrequency, DataMeasure
from gs_quant.data.coordinate import DataDimensions
from gs_quant.entities.entitlements import Entitlements
from gs_quant.errors import MqError, MqValueError
from gs_quant.markets.indices_utils import BasketType, IndicesDatasets
from gs_quant.markets.position_set import PositionSet, Position
from gs_quant.markets.report import PerformanceReport, FactorRiskReport, Report, ThematicReport, \
flatten_results_into_df, get_thematic_breakdown_as_df
from gs_quant.models.risk_model import FactorRiskModel
from gs_quant.session import GsSession
from gs_quant.target.data import DataQuery
from gs_quant.target.reports import ReportStatus, ReportType
_logger = logging.getLogger(__name__)
class EntityType(Enum):
ASSET = 'asset'
BACKTEST = 'backtest'
COUNTRY = 'country'
HEDGE = 'hedge'
KPI = 'kpi'
PORTFOLIO = 'portfolio'
REPORT = 'report'
RISK_MODEL = 'risk_model'
SUBDIVISION = 'subdivision'
DATASET = 'dataset'
@dataclass
class EntityKey:
id_: str
entity_type: EntityType
class EntityIdentifier(Enum):
pass
class Entity(metaclass=ABCMeta):
"""Base class for any first-class entity"""
_entity_to_endpoint = {
EntityType.ASSET: 'assets',
EntityType.COUNTRY: 'countries',
EntityType.SUBDIVISION: 'countries/subdivisions',
EntityType.KPI: 'kpis',
EntityType.PORTFOLIO: 'portfolios',
EntityType.RISK_MODEL: 'risk/models',
EntityType.DATASET: 'data/datasets'
}
def __init__(self,
id_: str,
entity_type: EntityType,
entity: Optional[Dict] = None):
self.__id: str = id_
self.__entity_type: EntityType = entity_type
self.__entity: Dict = entity
@property
@abstractmethod
def data_dimension(self) -> str:
pass
@classmethod
@abstractmethod
def entity_type(cls) -> EntityType:
pass
@classmethod
def get(cls,
id_value: str,
id_type: Union[EntityIdentifier, str],
entity_type: Optional[Union[EntityType, str]] = None):
id_type = id_type.value if isinstance(id_type, Enum) else id_type
if entity_type is None:
entity_type = cls.entity_type()
endpoint = cls._entity_to_endpoint[entity_type]
else:
entity_type = entity_type.value if isinstance(entity_type, Enum) else entity_type
endpoint = cls._entity_to_endpoint[EntityType(entity_type)]
if entity_type == 'asset':
from gs_quant.markets.securities import SecurityMaster, AssetIdentifier
return SecurityMaster.get_asset(id_value, AssetIdentifier.MARQUEE_ID)
if id_type == 'MQID':
result = GsSession.current._get(f'/{endpoint}/{id_value}')
else:
result = get(GsSession.current._get(f'/{endpoint}?{id_type.lower()}={id_value}'), 'results.0')
if result:
return cls._get_entity_from_type(result, EntityType(entity_type))
@classmethod
def _get_entity_from_type(cls,
entity: Dict,
entity_type: EntityType = None):
id_ = entity.get('id')
entity_type = entity_type or cls.entity_type()
if entity_type == EntityType.COUNTRY:
return Country(id_, entity=entity)
if entity_type == EntityType.KPI:
return KPI(id_, entity=entity)
if entity_type == EntityType.SUBDIVISION:
return Subdivision(id_, entity=entity)
if entity_type == EntityType.RISK_MODEL:
return RiskModelEntity(id_, entity=entity)
def get_marquee_id(self) -> str:
return self.__id
def get_entity(self) -> Optional[Dict]:
return self.__entity
def get_unique_entity_key(self) -> EntityKey:
return EntityKey(self.__id, self.__entity_type)
def get_data_coordinate(self,
measure: Union[DataMeasure, str],
dimensions: Optional[DataDimensions] = None,
frequency: DataFrequency = DataFrequency.DAILY,
availability=None) -> DataCoordinate:
id_ = self.get_marquee_id()
dimensions = dimensions or {}
dimensions[self.data_dimension] = id_
measure = measure if isinstance(measure, str) else measure.value
available: Dict = GsDataApi.get_data_providers(id_, availability).get(measure, {})
if frequency == DataFrequency.DAILY:
daily_dataset_id = available.get(DataFrequency.DAILY)
return DataCoordinate(dataset_id=daily_dataset_id, measure=measure, dimensions=dimensions,
frequency=frequency)
if frequency == DataFrequency.REAL_TIME:
rt_dataset_id = available.get(DataFrequency.REAL_TIME)
return DataCoordinate(dataset_id=rt_dataset_id, measure=measure, dimensions=dimensions, frequency=frequency)
def get_entitlements(self):
entitlements_dict = self.get_entity().get('entitlements')
if entitlements_dict is None:
raise ValueError('This entity does not have entitlements.')
return Entitlements.from_dict(entitlements_dict)
class Country(Entity):
class Identifier(EntityIdentifier):
MARQUEE_ID = 'MQID'
NAME = 'name'
def __init__(self,
id_: str,
entity: Optional[Dict] = None):
super().__init__(id_, EntityType.COUNTRY, entity)
@property
def data_dimension(self) -> str:
return 'countryId'
@classmethod
def entity_type(cls) -> EntityType:
return EntityType.COUNTRY
@classmethod
def get_by_identifier(cls,
id_value: str,
id_type: Identifier) -> Optional['Entity']:
super().get(id_value, id_type)
def get_name(self) -> Optional[str]:
return get(self.get_entity(), 'name')
def get_region(self) -> Optional[str]:
return get(self.get_entity(), 'region')
def get_sub_region(self):
return get(self.get_entity(), 'subRegion')
def get_region_code(self):
return get(self.get_entity(), 'regionCode')
def get_sub_region_code(self):
return get(self.get_entity(), 'subRegionCode')
def get_alpha3(self):
return get(self.get_entity(), 'xref.alpha3')
def get_bbid(self):
return get(self.get_entity(), 'xref.bbid')
def get_alpha2(self):
return get(self.get_entity(), 'xref.alpha2')
def get_country_code(self):
return get(self.get_entity(), 'xref.countryCode')
class Subdivision(Entity):
class Identifier(EntityIdentifier):
MARQUEE_ID = 'MQID'
name = 'name'
def __init__(self,
id_: str,
entity: Optional[Dict] = None):
super().__init__(id_, EntityType.SUBDIVISION, entity)
@property
def data_dimension(self) -> str:
return 'subdivisionId'
@classmethod
def entity_type(cls) -> EntityType:
return EntityType.SUBDIVISION
@classmethod
def get_by_identifier(cls,
id_value: str,
id_type: Identifier) -> Optional['Entity']:
super().get(id_value, id_type)
def get_name(self) -> Optional[str]:
return get(self.get_entity(), 'name')
class KPI(Entity):
class Identifier(EntityIdentifier):
MARQUEE_ID = "MQID"
name = 'name'
def __init__(self,
id_: str,
entity: Optional[Dict] = None):
super().__init__(id_, EntityType.KPI, entity)
@property
def data_dimension(self) -> str:
return 'kpiId'
@classmethod
def entity_type(cls) -> EntityType:
return EntityType.KPI
@classmethod
def get_by_identifier(cls,
id_value: str,
id_type: Identifier) -> Optional['Entity']:
super().get(id_value, id_type)
def get_name(self) -> Optional[str]:
return get(self.get_entity(), 'name')
def get_category(self) -> Optional[str]:
return get(self.get_entity(), 'category')
def get_sub_category(self):
return get(self.get_entity(), 'subCategory')
class RiskModelEntity(Entity):
class Identifier(EntityIdentifier):
MARQUEE_ID = "MQID"
name = 'name'
def __init__(self,
id_: str,
entity: Optional[Dict] = None):
super().__init__(id_, EntityType.RISK_MODEL, entity)
@property
def data_dimension(self) -> str:
return 'riskModel'
@classmethod
def entity_type(cls) -> EntityType:
return EntityType.RISK_MODEL
@classmethod
def get_by_identifier(cls,
id_value: str,
id_type: Identifier) -> Optional['Entity']:
super().get(id_value, id_type)
def get_name(self) -> Optional[str]:
return get(self.get_entity(), 'name')
def get_coverage(self) -> Optional[str]:
return get(self.get_entity(), 'coverage')
def get_term(self) -> Optional[str]:
return get(self.get_entity(), 'term')
def get_vendor(self) -> Optional[str]:
return get(self.get_entity(), 'vendor')
class PositionedEntity(metaclass=ABCMeta):
def __init__(self, id_: str, entity_type: EntityType):
self.__id: str = id_
self.__entity_type: EntityType = entity_type
@property
def id(self) -> str:
return self.__id
@property
def positioned_entity_type(self) -> EntityType:
return self.__entity_type
def get_entitlements(self) -> Entitlements:
if self.positioned_entity_type == EntityType.PORTFOLIO:
response = GsPortfolioApi.get_portfolio(self.id)
elif self.positioned_entity_type == EntityType.ASSET:
response = GsAssetApi.get_asset(self.id)
else:
raise NotImplementedError
return Entitlements.from_target(response.entitlements)
def get_latest_position_set(self,
position_type: PositionType = PositionType.CLOSE) -> PositionSet:
if self.positioned_entity_type == EntityType.ASSET:
response = GsAssetApi.get_latest_positions(self.id, position_type)
return PositionSet.from_target(response)
if self.positioned_entity_type == EntityType.PORTFOLIO:
response = GsPortfolioApi.get_latest_positions(portfolio_id=self.id,
position_type=position_type.value)
return PositionSet.from_target(response)
raise NotImplementedError
def get_position_set_for_date(self,
date: dt.date,
position_type: PositionType = PositionType.CLOSE) -> PositionSet:
if self.positioned_entity_type == EntityType.ASSET:
response = GsAssetApi.get_asset_positions_for_date(self.id, date, position_type)[0]
return PositionSet.from_target(response)
if self.positioned_entity_type == EntityType.PORTFOLIO:
response = GsPortfolioApi.get_positions_for_date(portfolio_id=self.id,
position_date=date,
position_type=position_type.value)
return PositionSet.from_target(response) if response else None
raise NotImplementedError
def get_position_sets(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
position_type: PositionType = PositionType.CLOSE) -> List[PositionSet]:
if self.positioned_entity_type == EntityType.ASSET:
response = GsAssetApi.get_asset_positions_for_dates(self.id, start, end, position_type)
return [PositionSet.from_target(position_set) for position_set in response]
if self.positioned_entity_type == EntityType.PORTFOLIO:
response = GsPortfolioApi.get_positions(portfolio_id=self.id,
start_date=start,
end_date=end)
return [PositionSet.from_target(position_set) for position_set in response]
raise NotImplementedError
def update_positions(self,
position_sets: List[PositionSet],
net_positions: bool = True):
if self.positioned_entity_type == EntityType.PORTFOLIO:
if not position_sets:
return
currency = GsPortfolioApi.get_portfolio(self.id).currency
new_sets = []
for pos_set in position_sets:
if pos_set.reference_notional is None:
incorrect_set = any([pos.quantity is None or pos.weight is not None for pos in pos_set.positions])
if incorrect_set:
raise MqValueError('If you would like to upload position sets without notionals, '
'every position must have a quantity and cannot have a weight.')
new_sets.append(pos_set)
else:
new_sets.append(self._convert_pos_set_with_weights(pos_set, currency))
GsPortfolioApi.update_positions(portfolio_id=self.id, position_sets=[p.to_target() for p in new_sets])
time.sleep(3)
else:
raise NotImplementedError
@staticmethod
def _convert_pos_set_with_weights(position_set: PositionSet, currency: Currency) -> PositionSet:
positions_to_price = []
for position in position_set.positions:
if position.weight is None:
raise MqValueError('If you are uploading a position set with a notional value, every position in that '
'set must have a weight')
if position.quantity is not None:
raise MqValueError('If you are uploading a position set with a notional value, no position in that '
'set can have a quantity')
positions_to_price.append({
'assetId': position.asset_id,
'weight': position.weight
})
payload = {
'positions': positions_to_price,
'parameters': {
'targetNotional': position_set.reference_notional,
'currency': currency.value,
'pricingDate': position_set.date.strftime('%Y-%m-%d'),
'assetDataSetId': 'GSEOD',
'notionalType': 'Gross'
}
}
try:
price_results = GsSession.current._post('/price/positions', payload)
except Exception as e:
raise MqValueError('There was an error pricing your positions. Please try uploading your positions as '
f'quantities instead: {e}')
positions = [Position(identifier=p['assetId'],
asset_id=p['assetId'],
quantity=p['quantity']) for p in price_results['positions']]
return PositionSet(date=position_set.date,
positions=positions)
def get_positions_data(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
fields: [str] = None,
position_type: PositionType = PositionType.CLOSE) -> List[Dict]:
if self.positioned_entity_type == EntityType.ASSET:
return GsIndexApi.get_positions_data(self.id, start, end, fields, position_type)
if self.positioned_entity_type == EntityType.PORTFOLIO:
return GsPortfolioApi.get_positions_data(self.id, start, end, fields, position_type)
raise NotImplementedError
def get_position_dates(self) -> Tuple[dt.date, ...]:
if self.positioned_entity_type == EntityType.PORTFOLIO:
return GsPortfolioApi.get_position_dates(portfolio_id=self.id)
if self.positioned_entity_type == EntityType.ASSET:
return GsAssetApi.get_position_dates(asset_id=self.id)
raise NotImplementedError
def get_reports(self) -> List[Report]:
if self.positioned_entity_type == EntityType.PORTFOLIO:
reports_as_target = GsPortfolioApi.get_reports(portfolio_id=self.id)
elif self.positioned_entity_type == EntityType.ASSET:
reports_as_target = GsAssetApi.get_reports(asset_id=self.id)
else:
raise NotImplementedError
report_objects = []
for report in reports_as_target:
if report.type == ReportType.Portfolio_Performance_Analytics:
report_objects.append(PerformanceReport.from_target(report))
elif report.type in [ReportType.Portfolio_Factor_Risk, ReportType.Asset_Factor_Risk]:
report_objects.append(FactorRiskReport.from_target(report))
else:
report_objects.append(Report.from_target(report))
return report_objects
def get_report_factor_risk_models(self) -> Tuple[FactorRiskModel, ...]:
all_reports = self.get_reports()
risk_model_ids = []
for report in all_reports:
if report.parameters.risk_model is not None and report.parameters.risk_model not in risk_model_ids:
risk_model_ids.append(report.parameters.risk_model)
if not len(risk_model_ids):
raise ValueError('No factor risk models available for ' + self.id)
risk_models = FactorRiskModel.get_many(risk_model_ids)
return risk_models
def get_status_of_reports(self) -> pd.DataFrame:
reports = self.get_reports()
reports_dict = {
'Name': [r.name for r in reports],
'ID': [r.id for r in reports],
'Latest Execution Time': [r.latest_execution_time for r in reports],
'Latest End Date': [r.latest_end_date for r in reports],
"Status": [r.status for r in reports],
'Percentage Complete': [r.percentage_complete for r in reports]
}
return pd.DataFrame.from_dict(reports_dict)
def get_factor_risk_reports(self, fx_hedged: bool = None) -> List[FactorRiskReport]:
if self.positioned_entity_type in [EntityType.PORTFOLIO, EntityType.ASSET]:
position_source_type = self.positioned_entity_type.value.capitalize()
reports = GsReportApi.get_reports(limit=100,
position_source_type=position_source_type,
position_source_id=self.id,
report_type=f'{position_source_type} Factor Risk')
if fx_hedged:
reports = [report for report in reports if report.parameters.fx_hedged == fx_hedged]
if len(reports) == 0:
raise MqError(f'This {position_source_type} has no factor risk reports that match your parameters.')
return [FactorRiskReport.from_target(report) for report in reports]
raise NotImplementedError
def get_factor_risk_report(self,
risk_model_id: str = None,
fx_hedged: bool = None) -> FactorRiskReport:
position_source_type = self.positioned_entity_type.value.capitalize()
reports = self.get_factor_risk_reports(fx_hedged=fx_hedged)
if risk_model_id:
reports = [report for report in reports if report.parameters.risk_model == risk_model_id]
if len(reports) > 1:
raise MqError(f'This {position_source_type} has more than one factor risk report that matches '
'your parameters. Please specify the risk model ID and fxHedged value in the '
'function parameters.')
return reports[0]
def get_thematic_report(self) -> ThematicReport:
if self.positioned_entity_type in [EntityType.PORTFOLIO, EntityType.ASSET]:
position_source_type = self.positioned_entity_type.value.capitalize()
reports = GsReportApi.get_reports(limit=100,
position_source_type=position_source_type,
position_source_id=self.id,
report_type=f'{position_source_type} Thematic Analytics')
if len(reports) == 0:
raise MqError(f'This {position_source_type} has no thematic analytics report.')
return ThematicReport.from_target(reports[0])
raise NotImplementedError
def poll_report(self, report_id: str, timeout: int = 600, step: int = 30) -> ReportStatus:
poll = True
timeout = 1800 if timeout > 1800 else timeout
step = 15 if step < 15 else step
end = dt.datetime.now() + dt.timedelta(seconds=timeout)
while poll and dt.datetime.now() <= end:
try:
status = Report.get(report_id).status
if status not in {ReportStatus.error, ReportStatus.cancelled, ReportStatus.done}:
_logger.info(f'Report is {status} as of {dt.datetime.now().isoformat()}')
time.sleep(step)
else:
poll = False
if status == ReportStatus.error:
raise MqError(f'Report {report_id} has failed for {self.id}. \
Please reach out to the Marquee team for assistance.')
elif status == ReportStatus.cancelled:
_logger.info(f'Report {report_id} has been cancelled. Please reach out to the \
Marquee team if you believe this is a mistake.')
return status
else:
_logger.info(f'Report {report_id} is now complete')
return status
except Exception as err:
raise MqError(f'Could not fetch report status with error {err}')
raise MqError('The report is taking longer than expected to complete. \
Please check again later or reach out to the Marquee team for assistance.')
def get_all_esg_data(self,
measures: List[ESGMeasure] = None,
cards: List[ESGCard] = None,
pricing_date: dt.date = None,
benchmark_id: str = None) -> Dict:
"""
Get all ESG Data
:param measures: list of ESG Measures to include in results
:param cards: list of ESG Cards to include in results
:param pricing_date: optional pricing date; defaults to last previous business day
:param benchmark_id: optional benchmark asset ID to include in results
:return: a dictionary of results
"""
return GsEsgApi.get_esg(entity_id=self.id,
pricing_date=pricing_date,
cards=cards if cards else [c for c in ESGCard],
measures=measures if measures else [m for m in ESGMeasure],
benchmark_id=benchmark_id)
def get_esg_summary(self,
pricing_date: dt.date = None) -> pd.DataFrame:
summary_data = GsEsgApi.get_esg(entity_id=self.id,
pricing_date=pricing_date,
cards=[ESGCard.SUMMARY]).get('summary')
return pd.DataFrame(summary_data)
def get_esg_quintiles(self,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
"""
Get breakdown of entity by weight in each percentile quintile for requested ESG measure
:param measure: ESG Measure
:param pricing_date: optional pricing date; defaults to last previous business day
:return: a Pandas DataFrame with results
"""
quintile_data = GsEsgApi.get_esg(entity_id=self.id,
pricing_date=pricing_date,
cards=[ESGCard.QUINTILES],
measures=[measure]).get('quintiles')[0].get('results')
df = pd.DataFrame(quintile_data)
df = df.filter(items=['description', 'gross', 'long', 'short'])
return df.set_index('description')
def get_esg_by_sector(self,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
"""
Get breakdown of entity by sector, along with the weighted average score of the compositions in each sector
:param measure: ESG Measure
:param pricing_date: optional pricing date; defaults to last previous business day
:return: a Pandas DataFrame with results
"""
return self._get_esg_breakdown(ESGCard.MEASURES_BY_SECTOR, measure, pricing_date)
def get_esg_by_region(self,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
"""
Get breakdown of entity by region, along with the weighted average score of the compositions in each region
:param measure: ESG Measure
:param pricing_date: optional pricing date; defaults to last previous business day
:return: a Pandas DataFrame with results
"""
return self._get_esg_breakdown(ESGCard.MEASURES_BY_REGION, measure, pricing_date)
def get_esg_top_ten(self,
measure: ESGMeasure,
pricing_date: dt.date = None):
"""
Get entity constituents with the ten highest ESG percentile values
:param measure: ESG Measure
:param pricing_date: optional pricing date; defaults to last previous business day
:return: a Pandas DataFrame with results
"""
return self._get_esg_ranked_card(ESGCard.TOP_TEN_RANKED, measure, pricing_date)
def get_esg_bottom_ten(self,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
"""
Get entity constituents with the ten lowest ESG percentile values
:param measure: ESG Measure
:param pricing_date: optional pricing date; defaults to last previous business day
:return: a Pandas DataFrame with results
"""
return self._get_esg_ranked_card(ESGCard.BOTTOM_TEN_RANKED, measure, pricing_date)
def _get_esg_ranked_card(self,
card: ESGCard,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
data = GsEsgApi.get_esg(entity_id=self.id,
pricing_date=pricing_date,
cards=[card],
measures=[measure]).get(card.value)[0].get('results')
df = pd.DataFrame(data)
return df.set_index('assetId')
def _get_esg_breakdown(self,
card: ESGCard,
measure: ESGMeasure,
pricing_date: dt.date = None) -> pd.DataFrame:
sector_data = GsEsgApi.get_esg(entity_id=self.id,
pricing_date=pricing_date,
cards=[card],
measures=[measure]).get(card.value)[0].get('results')
df = pd.DataFrame(sector_data)
return df.set_index('name')
def get_carbon_analytics(self,
benchmark_id: str = None,
reporting_year: str = 'Latest',
currency: Currency = None,
include_estimates: bool = False,
use_historical_data: bool = False,
normalize_emissions: bool = False,
cards: List[CarbonCard] = [c for c in CarbonCard],
analytics_view: CarbonAnalyticsView = CarbonAnalyticsView.LONG) -> Dict:
return GsCarbonApi.get_carbon_analytics(entity_id=self.id,
benchmark_id=benchmark_id,
reporting_year=reporting_year,
currency=currency,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
normalize_emissions=normalize_emissions,
cards=cards,
analytics_view=analytics_view)
def get_carbon_coverage(self,
reporting_year: str = 'Latest',
include_estimates: bool = False,
use_historical_data: bool = False,
coverage_category: CarbonCoverageCategory = CarbonCoverageCategory.WEIGHTS,
analytics_view: CarbonAnalyticsView = CarbonAnalyticsView.LONG) -> pd.DataFrame:
coverage = self.get_carbon_analytics(reporting_year=reporting_year,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
cards=[CarbonCard.COVERAGE],
analytics_view=analytics_view).get(CarbonCard.COVERAGE.value).get(
coverage_category.value, {}).get(CarbonEntityType.PORTFOLIO.value, {})
return pd.DataFrame(coverage)
def get_carbon_sbti_netzero_coverage(self,
reporting_year: str = 'Latest',
include_estimates: bool = False,
use_historical_data: bool = False,
target_coverage_category: CarbonTargetCoverageCategory =
CarbonTargetCoverageCategory.PORTFOLIO_EMISSIONS,
analytics_view: CarbonAnalyticsView =
CarbonAnalyticsView.LONG) -> pd.DataFrame:
coverage = self.get_carbon_analytics(reporting_year=reporting_year,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
cards=[CarbonCard.SBTI_AND_NET_ZERO_TARGETS],
analytics_view=analytics_view).get(
CarbonCard.SBTI_AND_NET_ZERO_TARGETS.value).get(target_coverage_category.value, {})
coverage = {target: target_coverage.get(CarbonEntityType.PORTFOLIO.value, {}) for target, target_coverage in
coverage.items()}
return pd.DataFrame(coverage)
def get_carbon_emissions(self,
currency: Currency = None,
include_estimates: bool = False,
use_historical_data: bool = False,
normalize_emissions: bool = False,
scope: CarbonScope = CarbonScope.TOTAL_GHG,
analytics_view: CarbonAnalyticsView = CarbonAnalyticsView.LONG) -> pd.DataFrame:
emissions = self.get_carbon_analytics(currency=currency,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
normalize_emissions=normalize_emissions,
cards=[CarbonCard.EMISSIONS],
analytics_view=analytics_view).get(CarbonCard.EMISSIONS.value).get(
scope.value, {}).get(CarbonEntityType.PORTFOLIO.value, [])
return pd.DataFrame(emissions)
def get_carbon_emissions_allocation(self,
reporting_year: str = 'Latest',
currency: Currency = None,
include_estimates: bool = False,
use_historical_data: bool = False,
normalize_emissions: bool = False,
scope: CarbonScope = CarbonScope.TOTAL_GHG,
classification: CarbonEmissionsAllocationCategory =
CarbonEmissionsAllocationCategory.GICS_SECTOR,
analytics_view: CarbonAnalyticsView = CarbonAnalyticsView.LONG) -> pd.DataFrame:
allocation = self.get_carbon_analytics(reporting_year=reporting_year,
currency=currency,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
normalize_emissions=normalize_emissions,
cards=[CarbonCard.ALLOCATIONS],
analytics_view=analytics_view).get(CarbonCard.ALLOCATIONS.value).get(
scope.value, {}).get(CarbonEntityType.PORTFOLIO.value, {}).get(classification.value)
return pd.DataFrame(allocation).rename(columns={'name': classification.value})
def get_carbon_attribution_table(self,
benchmark_id: str,
reporting_year: str = 'Latest',
currency: Currency = None,
include_estimates: bool = False,
use_historical_data: bool = False,
scope: CarbonScope = CarbonScope.TOTAL_GHG,
intensity_metric: CarbonEmissionsIntensityType =
CarbonEmissionsIntensityType.EI_ENTERPRISE_VALUE,
analytics_view: CarbonAnalyticsView = CarbonAnalyticsView.LONG) -> pd.DataFrame:
attribution = self.get_carbon_analytics(benchmark_id=benchmark_id,
reporting_year=reporting_year,
currency=currency,
include_estimates=include_estimates,
use_historical_data=use_historical_data,
cards=[CarbonCard.ATTRIBUTION],
analytics_view=analytics_view).get(CarbonCard.ATTRIBUTION.value).get(
scope.value, [])
attribution_table = []
for entry in attribution:
new_entry = {
'sector': entry.get('sector'),
'weightPortfolio': entry.get('weightPortfolio'),
'weightBenchmark': entry.get('weightBenchmark'),
'weightComparison': entry.get('weightComparison')
}
new_entry.update(entry.get(intensity_metric.value, {}))
attribution_table.append(new_entry)
return pd.DataFrame(attribution_table)
def get_thematic_exposure(self,
basket_identifier: str,
notional: int = 10000000,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today()) -> pd.DataFrame:
if not self.positioned_entity_type == EntityType.ASSET:
raise NotImplementedError
response = GsAssetApi.resolve_assets(identifier=[basket_identifier],
fields=['id', 'type'], limit=1)[basket_identifier]
_id, _type = get(response, '0.id'), get(response, '0.type')
if len(response) == 0 or _id is None:
raise MqValueError(f'Basket could not be found using identifier {basket_identifier}.')
if _type not in BasketType.to_list():
raise MqValueError(f'Asset {basket_identifier} of type {_type} is not a Custom or Research Basket.')
query = DataQuery(where={'assetId': self.id, 'basketId': _id}, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.COMPOSITE_THEMATIC_BETAS.value)
df = []
for r in response:
df.append({'date': r['date'], 'assetId': r['assetId'], 'basketId': r['basketId'],
'thematicExposure': r['beta'] * notional})
df = pd.DataFrame(df)
return df.set_index('date')
def get_thematic_beta(self,
basket_identifier: str,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today()) -> pd.DataFrame:
if not self.positioned_entity_type == EntityType.ASSET:
raise NotImplementedError
response = GsAssetApi.resolve_assets(identifier=[basket_identifier],
fields=['id', 'type'], limit=1)[basket_identifier]
_id, _type = get(response, '0.id'), get(response, '0.type')
if len(response) == 0 or _id is None:
raise MqValueError(f'Basket could not be found using identifier {basket_identifier}.')
if _type not in BasketType.to_list():
raise MqValueError(f'Asset {basket_identifier} of type {_type} is not a Custom or Research Basket.')
query = DataQuery(where={'assetId': self.id, 'basketId': _id}, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.COMPOSITE_THEMATIC_BETAS.value)
df = []
for r in response:
df.append({'date': r['date'], 'assetId': r['assetId'], 'basketId': r['basketId'],
'thematicBeta': r['beta']})
df = pd.DataFrame(df)
return df.set_index('date')
def get_all_thematic_exposures(self,
start_date: dt.date = None,
end_date: dt.date = None,
basket_ids: List[str] = None,
regions: List[Region] = None) -> pd.DataFrame:
if self.positioned_entity_type == EntityType.PORTFOLIO:
raise NotImplementedError
results = GsThematicApi.get_thematics(entity_id=self.id,
start_date=start_date,
end_date=end_date,
basket_ids=basket_ids,
regions=regions,
measures=[ThematicMeasure.ALL_THEMATIC_EXPOSURES])
return flatten_results_into_df(results)
def get_top_five_thematic_exposures(self,
start_date: dt.date = None,
end_date: dt.date = None,
basket_ids: List[str] = None,
regions: List[Region] = None) -> pd.DataFrame:
if self.positioned_entity_type == EntityType.PORTFOLIO:
raise NotImplementedError
results = GsThematicApi.get_thematics(entity_id=self.id,
start_date=start_date,
end_date=end_date,
basket_ids=basket_ids,
regions=regions,
measures=[ThematicMeasure.TOP_FIVE_THEMATIC_EXPOSURES])
return flatten_results_into_df(results)
def get_bottom_five_thematic_exposures(self,
start_date: dt.date = None,
end_date: dt.date = None,
basket_ids: List[str] = None,
regions: List[Region] = None) -> pd.DataFrame:
if self.positioned_entity_type == EntityType.PORTFOLIO:
raise NotImplementedError
results = GsThematicApi.get_thematics(entity_id=self.id,
start_date=start_date,
end_date=end_date,
basket_ids=basket_ids,
regions=regions,
measures=[ThematicMeasure.BOTTOM_FIVE_THEMATIC_EXPOSURES])
return flatten_results_into_df(results)
def get_thematic_breakdown(self,
date: dt.date,
basket_id: str) -> pd.DataFrame:
"""
Get a by-asset breakdown of a portfolio or basket's thematic exposure to a particular flagship basket on a
particular date
:param date: date
:param basket_id: GS flagship basket's unique Marquee ID
:return: a Pandas DataFrame with results
"""
if self.positioned_entity_type == EntityType.PORTFOLIO:
raise NotImplementedError
return get_thematic_breakdown_as_df(entity_id=self.id, date=date, basket_id=basket_id)
|
py | b4118d18a3aeba2710136f9212c4b935710736e5 | from ramda.curry import curry
map = curry(lambda f, xs: [f(x) for x in xs])
|
py | b4118d4697e05ea86c699f73977b4f02331e84f9 | import time
from collections import defaultdict
from datetime import datetime
from django.shortcuts import render
from selfservice.aggregator_adapter import get_aggregator_adapter
from django.http import HttpResponse
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
from .admin import ChoreAdmin
from .models import ChoreVolunteer, Chore
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string, get_template
import logging
logger = logging.getLogger(__name__)
@login_required
def index(request):
current_user_id = request.user.id
aggregator_adapter = get_aggregator_adapter()
if not aggregator_adapter:
return HttpResponse("No aggregator configuration found", status=500, content_type="text/plain")
now = time.time()
volunteers_turns = ChoreVolunteer.objects.filter(timestamp__gte=now)
volunteers_by_key = defaultdict(list)
for turn in volunteers_turns:
key = f'{turn.chore.id}-{turn.timestamp}'
volunteers_by_key[key].append(turn.user)
data = aggregator_adapter.get_chores()
event_groups = []
ts = None
if data != None:
for event in data['events']:
event_ts = datetime.fromtimestamp(event['when']['timestamp'])
event_ts_str = event_ts.strftime('%d%m%Y')
event['time_str'] = event_ts.strftime('%H:%M')
chore_id = event['chore']['chore_id']
timestamp = event['when']['timestamp']
event['volunteers'] = volunteers_by_key[f'{chore_id}-{timestamp}']
num_missing_volunteers = event['chore']['min_required_people'] - len(event['volunteers'])
this_user_volunteered = current_user_id in [user.id for user in event['volunteers']]
if num_missing_volunteers > 0:
for idx in range(num_missing_volunteers):
if idx == 0 and not this_user_volunteered:
event['volunteers'].append('offer_volunteering')
else:
event['volunteers'].append(None)
if event_ts_str != ts:
ts = event_ts_str
event_groups.append({'ts_str': event_ts.strftime('%A %d/%m/%Y'), 'events': []})
event_groups[-1]['events'].append(event)
context = {
'title': 'Chores',
'event_groups': event_groups,
}
return render(request, 'chores.html', context)
@login_required
def signup(request, chore_id, ts):
try:
chore = Chore.objects.get(pk=chore_id)
except ObjectDoesNotExist as e:
return HttpResponse("Chore not found", status=404, content_type="text/plain")
try:
ChoreVolunteer.objects.create(user=request.user, chore=chore, timestamp=ts)
except Exception as e:
logger.error("Something else went wrong during create: {0}".format(e))
raise e
try:
context = {
'chore': chore,
'volunteer': request.user
}
subject = render_to_string('notify_email.subject.txt', context).strip()
body = render_to_string('notify_email.txt', context)
EmailMessage(subject, body,
to=[request.user.email, settings.MAILINGLIST ],
from_email=settings.DEFAULT_FROM_EMAIL
).send()
except Exception as e:
logger.error("Something else went wrong during mail sent: {0}".format(e))
return redirect('chores')
@login_required
def remove_signup(request, chore_id, ts):
try:
chore = Chore.objects.get(pk=chore_id)
except ObjectDoesNotExist as e:
return HttpResponse("Chore not found", status=404, content_type="text/plain")
try:
ChoreVolunteer.objects.filter(user=request.user, chore=chore, timestamp=ts).delete()
except Exception as e:
logger.error("Something else went wrong during delete: {0}".format(e))
raise e
try:
context = {
'chore': chore,
'volunteer': request.user
}
subject = render_to_string('notify_email_nope.subject.txt', context).strip()
body = render_to_string('notify_email_nope.txt', context)
EmailMessage(subject, body,
to=[request.user.email, settings.MAILINGLIST ],
from_email=settings.DEFAULT_FROM_EMAIL
).send()
except Exception as e:
logger.error("Something else went wrong during remove mail sent: {0}".format(e))
return redirect('chores')
|
py | b4118d933828ec416d26e603446322445d798987 |
from pyhanlp import *
import zipfile
import os
from pyhanlp.static import download, remove_file, HANLP_DATA_PATH
def test_data_path():
"""
获取测试数据路径,位于$root/data/test,根目录由配置文件指定。
:return:
"""
data_path = os.path.join(HANLP_DATA_PATH, 'test')
if not os.path.isdir(data_path):
os.mkdir(data_path)
return data_path
## 验证是否存在 MSR语料库,如果没有自动下载
def ensure_data(data_name, data_url):
root_path = test_data_path()
dest_path = os.path.join(root_path, data_name)
if os.path.exists(dest_path):
return dest_path
if data_url.endswith('.zip'):
dest_path += '.zip'
download(data_url, dest_path)
if data_url.endswith('.zip'):
with zipfile.ZipFile(dest_path, "r") as archive:
archive.extractall(root_path)
remove_file(dest_path)
dest_path = dest_path[:-len('.zip')]
return dest_path
## 指定 PKU 语料库
PKU98 = ensure_data("pku98", "http://file.hankcs.com/corpus/pku98.zip")
PKU199801 = os.path.join(PKU98, '199801.txt')
PKU199801_TRAIN = os.path.join(PKU98, '199801-train.txt')
PKU199801_TEST = os.path.join(PKU98, '199801-test.txt')
POS_MODEL = os.path.join(PKU98, 'pos.bin')
NER_MODEL = os.path.join(PKU98, 'ner.bin')
## ===============================================
## 以下开始 感知机 命名实体识别
NERTrainer = JClass('com.hankcs.hanlp.model.perceptron.NERTrainer')
PerceptronNERecognizer = JClass('com.hankcs.hanlp.model.perceptron.PerceptronNERecognizer')
PerceptronSegmenter = JClass('com.hankcs.hanlp.model.perceptron.PerceptronSegmenter')
PerceptronPOSTagger = JClass('com.hankcs.hanlp.model.perceptron.PerceptronPOSTagger')
Sentence = JClass('com.hankcs.hanlp.corpus.document.sentence.Sentence')
AbstractLexicalAnalyzer = JClass('com.hankcs.hanlp.tokenizer.lexical.AbstractLexicalAnalyzer')
Utility = JClass('com.hankcs.hanlp.model.perceptron.utility.Utility')
def train(corpus, model):
trainer = NERTrainer()
return PerceptronNERecognizer(trainer.train(corpus, model).getModel())
def test(recognizer):
# 包装了感知机分词器和词性标注器的词法分析器
analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(), PerceptronPOSTagger(), recognizer)
print(analyzer.analyze("华北电力公司董事长谭旭光和秘书胡花蕊来到美国纽约现代艺术博物馆参观"))
scores = Utility.evaluateNER(recognizer, PKU199801_TEST)
Utility.printNERScore(scores)
if __name__ == '__main__':
recognizer = train(PKU199801_TRAIN, NER_MODEL)
test(recognizer)
## 支持在线学习
# 创建了感知机词法分析器
analyzer = PerceptronLexicalAnalyzer(PerceptronSegmenter(), PerceptronPOSTagger(), recognizer) # ①
# 根据标注样本的字符串形式创建等价的 Sentence对象
sentence = Sentence.create("与/c 特朗普/nr 通/v 电话/n 讨论/v [太空/s 探索/vn 技术/n 公司/n]/nt") # ②
# 测试词法分析器对样本的分析结果是否与标注一致,若不一致重复在线学习,直到两者一致。
while not analyzer.analyze(sentence.text()).equals(sentence): # ③
analyzer.learn(sentence)
|
py | b4118da483a217221a9bc689cbfdaef38c4b11af | import json
def with_token_query_string(func):
def wrapper(self):
resp = self.simulate_post('/user')
token = resp.json.get('token')
return func(self, "token=%s" % token)
return wrapper
def create_waifu(tast_case, token_qs):
body = json.dumps({'name': 'foo', 'description': 'bar', 'pic': 'baz'})
resp = tast_case.simulate_post('/waifu', query_string=token_qs, body=body)
return resp.json.get('id'), body
|
py | b4118e2c716b328a984067a9706ba3b80a8ac3ab | #coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class BgSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if Flag not in Tmp.Args:
Tmp.Args[Flag]={}
for tag in ['m','s','w']:
Tmp.Args[Flag][tag]=None
Changed=False
if Tmp.Args[Flag]['m']!=name:
Changed=True
for tag in ['s','w']:
if Tmp.Args[Flag][tag]!=Attrs[tag]:
Changed=True
if name!='Black' and Changed:
if Tmp.Args.get('date') and Tmp.Args['date']['Auto']=='On':
sw+=' hide screen '+US.Args['pathmode']['DateScreen']+'\n'
sw+=' scene bg Black01A with '+Attrs['t']+'\n'
sw+=' scene bg '+name+Attrs['s']+Attrs['w']+' at '+Attrs['l']+'\n'
sw+=' with '+Attrs['t']+'\n'
if Tmp.Args.get('date') and Tmp.Args['date']['Auto']=='On':
sw+=' show screen '+US.Args['pathmode']['DateScreen']+'\n'
Tmp.Args[Flag]['m']=name
for tag in ['s','w']:
Tmp.Args[Flag][tag]=Attrs[tag]
return sw |
py | b4118ed94e940745a34f0bc5cb0ac1887ccba664 | import pytest
from drf_yasg.openapi import ReferenceResolver
def test_basic():
scopes = ['s1', 's2']
rr = ReferenceResolver(*scopes)
assert scopes == rr.scopes == list(rr.keys()) == list(rr)
rr.set('o1', 1, scope='s1')
assert rr.has('o1', scope='s1')
assert rr.get('o1', scope='s1') == 1
rr.setdefault('o1', lambda: 2, scope='s1')
assert rr.get('o1', scope='s1') == 1
assert not rr.has('o1', scope='s2')
rr.setdefault('o3', lambda: 3, scope='s2')
assert rr.get('o3', scope='s2') == 3
assert rr['s1'] == {'o1': 1}
assert dict(rr) == {'s1': {'o1': 1}, 's2': {'o3': 3}}
assert str(rr) == str(dict(rr))
def test_scoped():
scopes = ['s1', 's2']
rr = ReferenceResolver(*scopes)
r1 = rr.with_scope('s1')
r2 = rr.with_scope('s2')
with pytest.raises(AssertionError):
rr.with_scope('bad')
assert r1.scopes == ['s1']
assert list(r1.keys()) == list(r1) == []
r2.set('o2', 2)
assert r2.scopes == ['s2']
assert list(r2.keys()) == list(r2) == ['o2']
assert r2['o2'] == 2
with pytest.raises(AssertionError):
r2.get('o2', scope='s1')
assert rr.get('o2', scope='s2') == 2
|
py | b41191311f3bc281fc123acc14b6ba0551b19485 | import os
import io
import subprocess
from django.core.management.base import BaseCommand
from instant.conf import CENTRIFUGO_PORT
class Command(BaseCommand):
help = "Run the Centrifugo websockets server"
def handle(self, *args, **options):
basepath = os.getcwd()
c = basepath + "/centrifugo/centrifugo"
conf = basepath + "/centrifugo/config.json"
cmd = [c, "--config", conf, "--port", str(CENTRIFUGO_PORT)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"): # type: ignore
msg = str(line).replace("b'", "")
msg = msg[0:-3]
print(msg)
p.wait()
|
py | b4119173846329409e2b283026f7894a9c3b98e3 | import logging
def getLogger(name):
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
fmt='%(asctime)s %(levelname)-8s %(module)-18s %(funcName)-10s %(lineno)4s: %(message)s'
))
log = logging.getLogger(name)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
return log
|
py | b41191c6d2fb18d836e37819c25f5d47af84eb1c | """
Drives. Can accept input from joysticks or values [-1, 1].
"""
import helpers
import wpilib
import math
from networktables import NetworkTables
class Chassis(object):
def __init__(self, drive, gyro, encoderY):
self.drive = drive
self.gyro = gyro
self.encoderY = encoderY
self.jDeadband = 0.06
self.sd = NetworkTables.getTable('SmartDashboard')
# PID loop for angle
self.pidAngleDefault = {'p': 0.01, 'i': 0, 'd': 0.004}
self.sd.putNumber('pidAngleP', self.pidAngleDefault['p'])
self.sd.putNumber('pidAngleI', self.pidAngleDefault['i'])
self.sd.putNumber('pidAngleD', self.pidAngleDefault['d'])
self.pidAngle = wpilib.PIDController(self.pidAngleDefault['p'], self.pidAngleDefault['i'], self.pidAngleDefault['d'], self.gyro, self.updateAnglePID)
self.pidAngle.setAbsoluteTolerance(2)
self.pidRotateRate = 0
self.wasRotating = False
# PID loop for Cartesian Y direction
self.pidYDefault = {'p': 0.15, 'i': 0, 'd': 0.05}
self.sd.putNumber('pidYP', self.pidYDefault['p'])
self.sd.putNumber('pidYI', self.pidYDefault['i'])
self.sd.putNumber('pidYD', self.pidYDefault['d'])
self.pidY = wpilib.PIDController(self.pidYDefault['p'], self.pidYDefault['i'], self.pidYDefault['d'], self.encoderY.getDistance, self.updateYPID)
self.pidYRate = 0
self.toDistanceFirstCall = True
self.toAngleFirstCall = True
self.toTimeFirstCall = True
self.lastAngle = 0
self.timer = wpilib.Timer()
def run(self, x, y, rotation):
'''Intended for use in telelop. Use .cartesian() for auto.'''
# Map joystick values to curve
x = self.curve(helpers.deadband(x, 0.1))
y = self.curve(helpers.deadband(y, 0.1))
rotation = helpers.deadband(-rotation * 0.5, 0.1)
# write manipulated values to motors
self.cartesian(-x, y, rotation)
def cartesian(self, x, y, rotation):
# assign speeds
speeds = [0] * 4
speeds[0] = x + y + rotation # front left
speeds[1] = -x + y - rotation # front right
speeds[2] = -x + y + rotation # back left
speeds[3] = x + y - rotation # back right
# scales all speeds if one is in range
# (-inf, -1) U (1, inf)
maxSpeed = max(abs(x) for x in speeds)
if maxSpeed > 1.0:
for i in range(0, 4):
speeds[i] = speeds[i] / maxSpeed
# write speeds to controllers
for i in range(0, 4):
self.drive[i].set(speeds[i])
def updateAnglePID(self, value):
self.pidAngle.setP(self.sd.getNumber('pidAngleP', self.pidAngleDefault['p']))
self.pidAngle.setI(self.sd.getNumber('pidAngleI', self.pidAngleDefault['i']))
self.pidAngle.setD(self.sd.getNumber('pidAngleD', self.pidAngleDefault['d']))
self.pidRotateRate = value
def updateYPID(self, value):
self.pidY.setP(self.sd.getNumber('pidYP', self.pidYDefault['p']))
self.pidY.setI(self.sd.getNumber('pidYI', self.pidYDefault['i']))
self.pidY.setD(self.sd.getNumber('pidYD', self.pidYDefault['d']))
self.pidYRate = value
def curve(self, value):
"""Because this divides by sin(1), an input
in range [-1, 1] will always have an output
range of [-1, 1]. """
value = helpers.deadband(helpers.raiseKeepSign(value, 1), self.jDeadband)
return (math.sin(value) / math.sin(1));
def toAngle(self, angle, reset=False):
"""Intended for use in auto."""
if (self.toAngleFirstCall and reset == True):
self.gyro.reset()
self.toAngleFirstCall = False
self.pidAngle.setSetpoint(angle)
self.pidAngle.enable()
print(self.pidAngle.getError())
if (self.pidAngle.getError() < 0.5):
self.pidAngle.disable()
self.toAngleFirstCall = True
self.lastAngle = angle
return True
else:
self.cartesian(0, 0, -self.pidRotateRate)
return False
def toDistance(self, distance):
"""Intended for use in auto."""
if (self.toDistanceFirstCall):
self.encoderY.reset()
self.toDistanceFirstCall = False
self.pidY.setContinuous(False)
self.pidY.setSetpoint(distance)
self.pidY.enable()
# simple P for rotation
rotation = helpers.remap((self.lastAngle - self.gyro.getAngle()), -180, 180, -1, 1)
rotation = rotation * 1
print(self.pidY.getError())
rotation = 0
if (self.pidY.getError() < 0.05):
self.pidY.disable()
self.cartesian(0, 0, 0)
self.toDistanceFirstCall = True
return True
else:
self.cartesian(0, -self.pidYRate, -rotation)
return False
def toTime(self, time, power):
if (self.toTimeFirstCall):
self.timer.start()
self.toTimeFirstCall = False
if (self.timer.hasPeriodPassed(time)):
self.cartesian(0, 0, 0)
return True
else:
self.cartesian(0, -power, 0)
return False
|
py | b4119372666b62f670b7112150f7a8cd65dde477 | """Lambda function to copy a binary from CarbonBlack into the BinaryAlert input S3 bucket."""
# Expects the following environment variables:
# CARBON_BLACK_URL: URL of the CarbonBlack server.
# ENCRYPTED_CARBON_BLACK_API_TOKEN: API token, encrypted with KMS.
# TARGET_S3_BUCKET: Name of the S3 bucket in which to save the copied binary.
import base64
import collections
import json
import logging
import os
import shutil
import subprocess
import tempfile
from typing import Any, Dict, Generator, List
import zipfile
import boto3
from botocore.exceptions import BotoCoreError
import cbapi
from cbapi.errors import ObjectNotFoundError, ServerError
from cbapi.response.models import Binary
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
logging.getLogger('backoff').addHandler(logging.StreamHandler()) # Enable backoff logger.
ENCRYPTED_TOKEN = os.environ['ENCRYPTED_CARBON_BLACK_API_TOKEN']
DECRYPTED_TOKEN = boto3.client('kms').decrypt(
CiphertextBlob=base64.b64decode(ENCRYPTED_TOKEN)
)['Plaintext']
# Establish boto3 and S3 clients at import time so Lambda can cache them for re-use.
CARBON_BLACK = cbapi.CbResponseAPI(
url=os.environ['CARBON_BLACK_URL'], token=DECRYPTED_TOKEN)
CLOUDWATCH = boto3.client('cloudwatch')
S3_BUCKET = boto3.resource('s3').Bucket(os.environ['TARGET_S3_BUCKET'])
SQS = boto3.resource('sqs')
# The download invocation event is parsed into a tuple with MD5 and a Receipt
DownloadRecord = collections.namedtuple('DownloadRecord', ['md5', 'sqs_receipt', 'receive_count'])
def _iter_download_records(event: Any) -> Generator[DownloadRecord, None, None]:
"""Generate DownloadRecords from the invocation event."""
for message in event['messages']:
try:
md5 = json.loads(message['body'])['md5']
yield DownloadRecord(md5, message['receipt'], message['receive_count'])
except (json.JSONDecodeError, KeyError, TypeError):
LOGGER.exception('Skipping invalid SQS record: %s', message)
continue
def _download_from_carbon_black(binary: Binary) -> str:
"""Download the binary from CarbonBlack into /tmp.
WARNING: CarbonBlack truncates binaries to 25MB. The MD5 will cover the entire file, but only
the first 25MB of the binary will be downloaded.
Args:
binary: CarbonBlack binary instance.
Returns:
Path where file was downloaded.
"""
download_path = os.path.join(tempfile.gettempdir(), 'carbonblack_{}'.format(binary.md5))
LOGGER.info('Downloading %s to %s', binary.webui_link, download_path)
with binary.file as cb_file, open(download_path, 'wb') as target_file:
shutil.copyfileobj(cb_file, target_file)
return download_path
def _build_metadata(binary: Binary) -> Dict[str, str]:
"""Return basic metadata to make it easier to triage YARA match alerts."""
return {
'carbon_black_group': (
','.join(binary.group) if isinstance(binary.group, list) else binary.group),
'carbon_black_last_seen': binary.last_seen,
'carbon_black_md5': binary.md5,
'carbon_black_os_type': binary.os_type,
'carbon_black_virustotal_score': str(binary.virustotal.score),
'carbon_black_webui_link': binary.webui_link,
'filepath': (
# Throw out any non-ascii characters (S3 metadata must be ascii).
binary.observed_filenames[0].encode('ascii', 'ignore').decode('ascii')
if binary.observed_filenames else '(unknown)'
)
}
def _upload_to_s3(md5: str, local_file_path: str, metadata: Dict[str, str]) -> None:
"""Upload the binary contents to S3 along with the given object metadata.
Args:
md5: CarbonBlack MD5 key (used as the S3 object key).
local_file_path: Path to the file to upload.
metadata: Binary metadata to attach to the S3 object.
Returns:
The newly added S3 object key (based on CarbonBlack's MD5).
"""
s3_object_key = 'carbonblack/{}'.format(md5)
LOGGER.info('Uploading to S3 with key %s', s3_object_key)
with open(local_file_path, 'rb') as target_file:
S3_BUCKET.put_object(Body=target_file, Key=s3_object_key, Metadata=metadata)
def _process_md5(md5: str) -> bool:
"""Download the given file from CarbonBlack and upload to S3, returning True if successful."""
download_path = None
try:
binary = CARBON_BLACK.select(Binary, md5)
download_path = _download_from_carbon_black(binary)
metadata = _build_metadata(binary)
_upload_to_s3(binary.md5, download_path, metadata)
return True
except (BotoCoreError, ObjectNotFoundError, ServerError, zipfile.BadZipFile):
LOGGER.exception('Error downloading %s', md5)
return False
finally:
if download_path:
# Shred downloaded file before exiting.
subprocess.check_call(['shred', '--remove', download_path])
def _delete_sqs_messages(queue_url: str, receipts: List[str], ) -> None:
"""Mark a batch of SQS receipts as completed (removing them from the queue)."""
LOGGER.info('Deleting %d SQS receipt(s)', len(receipts))
SQS.Queue(queue_url).delete_messages(
Entries=[
{'Id': str(index), 'ReceiptHandle': receipt} for index, receipt in enumerate(receipts)
]
)
def _publish_metrics(receive_counts: List[int]) -> None:
"""Send a statistic summary of receive counts."""
LOGGER.info('Sending ReceiveCount metrics')
CLOUDWATCH.put_metric_data(
Namespace='BinaryAlert', MetricData=[{
'MetricName': 'DownloadQueueReceiveCount',
'StatisticValues': {
'Minimum': min(receive_counts),
'Maximum': max(receive_counts),
'SampleCount': len(receive_counts),
'Sum': sum(receive_counts)
},
'Unit': 'Count'
}]
)
def download_lambda_handler(event: Dict[str, Any], _: Any) -> None:
"""Lambda function entry point - copy a binary from CarbonBlack into the BinaryAlert S3 bucket.
Args:
event: SQS message batch sent by the dispatcher: {
'messages': [
{
'body': (str) '{"md5": "FILE_MD5"}',
'receipt': (str) SQS message receipt handle,
'receive_count': (int) Approximate number of times this has been received
},
...
],
'queue_url': (str) SQS queue url from which the message originated
}
_: Unused Lambda context
"""
receipts_to_delete = [] # SQS receipts which can be deleted.
receive_counts = [] # A list of message receive counts.
for record in _iter_download_records(event):
if _process_md5(record.md5):
# File was copied successfully - the receipt can be deleted
receipts_to_delete.append(record.sqs_receipt)
receive_counts.append(record.receive_count)
if receipts_to_delete:
_delete_sqs_messages(event['queue_url'], receipts_to_delete)
if receive_counts:
_publish_metrics(receive_counts)
|
py | b41193cbab2a34be1ab01adede89467c0752f02c | """
Module to handle data operations
"""
# limit what's imported when using `from finrl.data import *`
__all__ = [
'converter'
]
|
py | b41194e5a9207794914ec913c34531abf1882b6e | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import LanguagePairDataset
from fairseq import utils
from .translation import load_langpair_dataset, TranslationTask
from . import register_task
@register_task('translation_from_pretrained_bart')
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', required=True, metavar='LANG',
help='comma-separated list of monolingual language, '
'for example, "en,de,fr". These should match the '
'langs from pretraining (and be in the same order). '
'You should always add all pretraining language idx '
'during finetuning.')
parser.add_argument('--prepend-bos', action='store_true',
help='prepend bos token to each sentence, which matches '
'mBART pretraining')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(',')
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol('[{}]'.format(l))
d.add_symbol('<mask>')
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 1024),
max_target_positions=getattr(self.args, 'max_target_positions', 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, 'prepend_bos', False),
append_source_id=True,
args=self.args,
)
def build_generator(self, models, args):
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
src_lang_id = self.source_dictionary.index('[{}]'.format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(source_tokens, src_lengths, self.source_dictionary)
return dataset
|
py | b411952b9319ec4301c687186ee154a4727ea176 | from uuid import uuid4
import pytest
from pydantic import ValidationError
from orchestrator.db import db
from orchestrator.types import SubscriptionLifecycle
def test_product_model_with_list_union_type_directly_below(
test_product_list_union,
test_product_type_list_union,
test_product_sub_block_two,
sub_two_subscription_1,
):
ProductListUnionInactive, _, ProductListUnion = test_product_type_list_union
_, _, SubBlockTwoForTest = test_product_sub_block_two
list_union_subscription_inactive = ProductListUnionInactive.from_product_id(
product_id=test_product_list_union, customer_id=uuid4()
)
with pytest.raises(ValidationError):
ProductListUnion.from_other_lifecycle(list_union_subscription_inactive, SubscriptionLifecycle.ACTIVE)
new_sub_block_1 = SubBlockTwoForTest.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field_2=1
)
new_sub_block_2 = SubBlockTwoForTest.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field_2=2
)
list_union_subscription_inactive.list_union_blocks = [new_sub_block_1, new_sub_block_2]
list_union_subscription_inactive.save()
assert (
list_union_subscription_inactive.diff_product_in_database(list_union_subscription_inactive.product.product_id)
== {}
)
list_union_subscription = ProductListUnion.from_other_lifecycle(
list_union_subscription_inactive, SubscriptionLifecycle.ACTIVE
)
list_union_subscription.save()
list_union_sub_from_database = ProductListUnion.from_subscription(list_union_subscription.subscription_id)
assert type(list_union_sub_from_database) == type(list_union_subscription)
sorted_db_list = sorted(
list_union_sub_from_database.list_union_blocks, key=lambda x: x.owner_subscription_id, reverse=True
)
sorted_sub_list = sorted(
list_union_subscription.list_union_blocks, key=lambda x: x.owner_subscription_id, reverse=True
)
assert sorted_db_list == sorted_sub_list
list_union_subscription.list_union_blocks = [sub_two_subscription_1.test_block]
with pytest.raises(ValueError) as exc:
list_union_subscription.save()
assert (
str(exc)
== "Attempting to save a Foreign `Subscription Instance` directly below a subscription. This is not allowed."
)
def test_product_model_with_list_union_type_directly_below_with_relation_overlap(
test_product_list_union_overlap,
test_product_type_list_union_overlap,
test_product_sub_block_one,
test_product_block_one,
sub_one_subscription_1,
):
ProductListUnionInactive, _, ProductListUnion = test_product_type_list_union_overlap
SubBlockOneForTestInactive, _, _ = test_product_sub_block_one
ProductBlockOneForTestInactive, _, _ = test_product_block_one
list_union_subscription_inactive = ProductListUnionInactive.from_product_id(
product_id=test_product_list_union_overlap, customer_id=uuid4()
)
list_union_subscription_inactive.test_block = ProductBlockOneForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id,
int_field=3,
str_field="",
list_field=[1],
sub_block=SubBlockOneForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field=3, str_field="2"
),
sub_block_2=SubBlockOneForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field=3, str_field="2"
),
)
with pytest.raises(ValidationError):
ProductListUnion.from_other_lifecycle(list_union_subscription_inactive, SubscriptionLifecycle.ACTIVE)
new_sub_block_1 = SubBlockOneForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field=11, str_field="111"
)
new_sub_block_2 = SubBlockOneForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id, int_field=12, str_field="121"
)
list_union_subscription_inactive.list_union_blocks = [new_sub_block_1, new_sub_block_2]
list_union_subscription_inactive.save()
assert (
list_union_subscription_inactive.diff_product_in_database(list_union_subscription_inactive.product.product_id)
== {}
)
list_union_subscription = ProductListUnion.from_other_lifecycle(
list_union_subscription_inactive, SubscriptionLifecycle.ACTIVE
)
list_union_subscription.save()
list_union_sub_from_database = ProductListUnion.from_subscription(list_union_subscription.subscription_id)
assert type(list_union_sub_from_database) == type(list_union_subscription)
assert list_union_sub_from_database.test_block == list_union_subscription.test_block
sorted_db_list_len = len(list_union_sub_from_database.list_union_blocks)
sorted_sub_list_len = len(list_union_subscription.list_union_blocks)
assert sorted_db_list_len != sorted_sub_list_len
assert sorted_db_list_len == 5 # 3 were made with test_block, which also get included.
list_union_subscription.list_union_blocks = [sub_one_subscription_1.test_block]
with pytest.raises(ValueError) as exc:
list_union_subscription.save()
assert (
str(exc)
== "Attempting to save a Foreign `Subscription Instance` directly below a subscription. This is not allowed."
)
def test_list_union_product_block_as_sub(
test_product_sub_list_union,
test_product_type_sub_list_union,
test_product_block_with_list_union,
test_product_type_sub_one,
sub_one_subscription_1,
sub_two_subscription_1,
):
ProductSubListUnionInactive, _, ProductSubListUnion = test_product_type_sub_list_union
ProductListUnionBlockForTestInactive, _, _ = test_product_block_with_list_union
_, _, ProductSubOne = test_product_type_sub_one
list_union_subscription_inactive = ProductSubListUnionInactive.from_product_id(
product_id=test_product_sub_list_union, customer_id=uuid4()
)
list_union_subscription_inactive.test_block = ProductListUnionBlockForTestInactive.new(
subscription_id=list_union_subscription_inactive.subscription_id
)
list_union_subscription_inactive.save()
list_union_subscription_inactive.test_block.int_field = 1
list_union_subscription_inactive.test_block.str_field = "blah"
list_union_subscription_inactive.test_block.list_union_blocks = [
sub_one_subscription_1.test_block,
sub_two_subscription_1.test_block,
]
list_union_subscription_inactive.test_block.list_field = [2]
list_union_subscription = ProductSubListUnion.from_other_lifecycle(
list_union_subscription_inactive, status=SubscriptionLifecycle.ACTIVE
)
list_union_subscription.save()
# This needs to happen in the test due to the fact it is using cached objects.
db.session.commit()
assert list_union_subscription.diff_product_in_database(test_product_sub_list_union) == {}
list_union_sub_from_database = ProductSubListUnion.from_subscription(list_union_subscription.subscription_id)
assert type(list_union_sub_from_database) == type(list_union_subscription)
assert list_union_sub_from_database.test_block.int_field == list_union_subscription.test_block.int_field
assert list_union_sub_from_database.test_block.str_field == list_union_subscription.test_block.str_field
sorted_db_list = sorted(
list_union_sub_from_database.test_block.list_union_blocks, key=lambda x: x.owner_subscription_id, reverse=True
)
sorted_sub_list = sorted(
list_union_subscription_inactive.test_block.list_union_blocks,
key=lambda x: x.owner_subscription_id,
reverse=True,
)
assert sorted_db_list == sorted_sub_list
# TODO #1321: uncomment test code below after SAFE_PARENT_TRANSITIONS_FOR_STATUS check has been re-done
# sub_one_subscription_terminated = ProductSubOne.from_other_lifecycle(
# sub_one_subscription_1, SubscriptionLifecycle.TERMINATED
# )
# # Do not allow subscriptions that have a parent make an unsafe transition.
# with pytest.raises(ValueError):
# sub_one_subscription_terminated.save()
|
py | b41195663a3d6015f9d76a8d3729edfdfff570b6 | # -*- coding: utf-8 -*-
"""
babel.messages.frontend
~~~~~~~~~~~~~~~~~~~~~~~
Frontends for the message extraction functionality.
:copyright: (c) 2013-2021 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import logging
import optparse
import os
import re
import shutil
import sys
import tempfile
from collections import OrderedDict
from configparser import RawConfigParser
from datetime import datetime
from io import StringIO
from locale import getpreferredencoding
from src.babelmsg.core import Locale, UnknownLocaleError
from .catalog import Catalog
from .extract import (
DEFAULT_KEYWORDS,
DEFAULT_MAPPING,
check_and_call_extract_file,
extract_from_dir,
)
from .mofile import write_mo
from .pofile import read_po, write_po
from distutils import log as distutils_log
from distutils.cmd import Command as _Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
def listify_value(arg, split=None):
"""
Make a list out of an argument.
Values from `distutils` argument parsing are always single strings;
values from `optparse` parsing may be lists of strings that may need
to be further split.
No matter the input, this function returns a flat list of whitespace-trimmed
strings, with `None` values filtered out.
>>> listify_value("foo bar")
['foo', 'bar']
>>> listify_value(["foo bar"])
['foo', 'bar']
>>> listify_value([["foo"], "bar"])
['foo', 'bar']
>>> listify_value([["foo"], ["bar", None, "foo"]])
['foo', 'bar', 'foo']
>>> listify_value("foo, bar, quux", ",")
['foo', 'bar', 'quux']
:param arg: A string or a list of strings
:param split: The argument to pass to `str.split()`.
:return:
"""
out = []
if not isinstance(arg, (list, tuple)):
arg = [arg]
for val in arg:
if val is None:
continue
if isinstance(val, (list, tuple)):
out.extend(listify_value(val, split=split))
continue
out.extend(s.strip() for s in str(val).split(split))
assert all(isinstance(val, str) for val in out)
return out
class Command(_Command):
# This class is a small shim between Distutils commands and
# optparse option parsing in the frontend command line.
#: Option name to be input as `args` on the script command line.
as_args = None
#: Options which allow multiple values.
#: This is used by the `optparse` transmogrification code.
multiple_value_options = ()
#: Options which are booleans.
#: This is used by the `optparse` transmogrification code.
# (This is actually used by distutils code too, but is never
# declared in the base class.)
boolean_options = ()
#: Option aliases, to retain standalone command compatibility.
#: Distutils does not support option aliases, but optparse does.
#: This maps the distutils argument name to an iterable of aliases
#: that are usable with optparse.
option_aliases = {}
#: Choices for options that needed to be restricted to specific
#: list of choices.
option_choices = {}
#: Log object. To allow replacement in the script command line runner.
log = distutils_log
def __init__(self, dist=None):
# A less strict version of distutils' `__init__`.
self.distribution = dist
self.initialize_options()
self._dry_run = None
self.verbose = False
self.force = None
self.help = 0
self.finalized = 0
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
.. versionadded:: 0.9
"""
description = "compile message catalogs to binary MO files"
user_options = [
(
"domain=",
"D",
"domains of PO files (space separated list, default 'messages')",
),
("directory=", "d", "path to base directory containing the catalogs"),
("input-file=", "i", "name of the input file"),
(
"output-file=",
"o",
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')",
),
("locale=", "l", "locale of the catalog to compile"),
("use-fuzzy", "f", "also include fuzzy translations"),
("statistics", None, "print statistics about translations"),
]
boolean_options = ["use-fuzzy", "statistics"]
def initialize_options(self):
self.domain = "messages"
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
self.domain = listify_value(self.domain)
if not self.input_file and not self.directory:
raise DistutilsOptionError(
"you must specify either the input file " "or the base directory"
)
if not self.output_file and not self.directory:
raise DistutilsOptionError(
"you must specify either the output file " "or the base directory"
)
def run(self):
n_errors = 0
for domain in self.domain:
for catalog, errors in self._run_domain(domain).items():
n_errors += len(errors)
if n_errors:
self.log.error("%d errors encountered." % n_errors)
return 1 if n_errors else 0
def _run_domain(self, domain):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append(
(
self.locale,
os.path.join(
self.directory, self.locale, "LC_MESSAGES", domain + ".po"
),
)
)
mo_files.append(
os.path.join(
self.directory, self.locale, "LC_MESSAGES", domain + ".mo"
)
)
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(
self.directory, locale, "LC_MESSAGES", domain + ".po"
)
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(
os.path.join(
self.directory, locale, "LC_MESSAGES", domain + ".mo"
)
)
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(
os.path.join(
self.directory, self.locale, "LC_MESSAGES", domain + ".mo"
)
)
if not po_files:
raise DistutilsOptionError("no message catalogs found")
catalogs_and_errors = {}
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
with open(po_file, "rb") as infile:
catalog = read_po(infile, locale)
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated += 1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info(
"%d of %d messages (%d%%) translated in %s",
translated,
len(catalog),
percentage,
po_file,
)
if catalog.fuzzy and not self.use_fuzzy:
self.log.info("catalog %s is marked as fuzzy, skipping", po_file)
continue
catalogs_and_errors[catalog] = catalog_errors = list(catalog.check())
for message, errors in catalog_errors:
for error in errors:
self.log.error("error: %s:%d: %s", po_file, message.lineno, error)
self.log.info("compiling catalog %s to %s", po_file, mo_file)
with open(mo_file, "wb") as outfile:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
return catalogs_and_errors
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
"""
description = "extract localizable strings from the project code"
user_options = [
("charset=", None, 'charset to use in the output file (default "utf-8")'),
(
"keywords=",
"k",
"space-separated list of keywords to look for in addition to the "
"defaults (may be repeated multiple times)",
),
("no-default-keywords", None, "do not include the default keywords"),
("mapping-file=", "F", "path to the mapping configuration file"),
(
"no-location",
None,
"do not include location comments with filename and line number",
),
(
"add-location=",
None,
'location lines format. If it is not given or "full", it generates '
'the lines with both file name and line number. If it is "file", '
'the line number part is omitted. If it is "never", it completely '
"suppresses the lines (same as --no-location).",
),
("omit-header", None, 'do not include msgid "" entry in header'),
("output-file=", "o", "name of the output file"),
("width=", "w", "set output line width (default 76)"),
(
"no-wrap",
None,
"do not break long message lines, longer than the output line width, "
"into several lines",
),
("sort-output", None, "generate sorted output (default False)"),
("sort-by-file", None, "sort output by file location (default False)"),
("msgid-bugs-address=", None, "set report address for msgid"),
("copyright-holder=", None, "set copyright holder in output"),
("project=", None, "set project name in output"),
("version=", None, "set project version in output"),
(
"add-comments=",
"c",
"place comment block with TAG (or those preceding keyword lines) in "
"output file. Separate multiple TAGs with commas(,)",
), # TODO: Support repetition of this argument
("strip-comments", "s", "strip the comment TAGs from the comments."),
(
"input-paths=",
None,
"files or directories that should be scanned for messages. Separate multiple "
"files or directories with commas(,)",
), # TODO: Support repetition of this argument
(
"input-dirs=",
None, # TODO (3.x): Remove me.
"alias for input-paths (does allow files as well as directories).",
),
]
boolean_options = [
"no-default-keywords",
"no-location",
"omit-header",
"no-wrap",
"sort-output",
"sort-by-file",
"strip-comments",
]
as_args = "input-paths"
multiple_value_options = ("add-comments", "keywords")
option_aliases = {
"keywords": ("--keyword",),
"mapping-file": ("--mapping",),
"output-file": ("--output",),
"strip-comments": ("--strip-comment-tags",),
}
option_choices = {
"add-location": (
"full",
"file",
"never",
),
}
def initialize_options(self):
self.charset = "utf-8"
self.keywords = None
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.add_location = None
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.input_paths = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.project = None
self.version = None
self.add_comments = None
self.strip_comments = False
self.include_lineno = True
def finalize_options(self):
if self.input_dirs:
if not self.input_paths:
self.input_paths = self.input_dirs
else:
raise DistutilsOptionError(
"input-dirs and input-paths are mutually exclusive"
)
if self.no_default_keywords:
keywords = {}
else:
keywords = DEFAULT_KEYWORDS.copy()
keywords.update(parse_keywords(listify_value(self.keywords)))
self.keywords = keywords
if not self.keywords:
raise DistutilsOptionError(
"you must specify new keywords if you " "disable the default ones"
)
if not self.output_file:
raise DistutilsOptionError("no output file specified")
if self.no_wrap and self.width:
raise DistutilsOptionError(
"'--no-wrap' and '--width' are mutually " "exclusive"
)
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError(
"'--sort-output' and '--sort-by-file' " "are mutually exclusive"
)
if self.input_paths:
if isinstance(self.input_paths, str):
self.input_paths = re.split(r",\s*", self.input_paths)
elif self.distribution is not None:
self.input_paths = dict.fromkeys(
[k.split(".", 1)[0] for k in (self.distribution.packages or ())]
).keys()
else:
self.input_paths = []
if not self.input_paths:
raise DistutilsOptionError("no input files or directories specified")
for path in self.input_paths:
if not os.path.exists(path):
raise DistutilsOptionError("Input path: %s does not exist" % path)
self.add_comments = listify_value(self.add_comments or (), ",")
if self.distribution:
if not self.project:
self.project = self.distribution.get_name()
if not self.version:
self.version = self.distribution.get_version()
if self.add_location == "never":
self.no_location = True
elif self.add_location == "file":
self.include_lineno = False
def run(self):
mappings = self._get_mappings()
with open(self.output_file, "wb") as outfile:
catalog = Catalog(
project=self.project,
version=self.version,
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset,
)
for path, method_map, options_map in mappings:
def callback(filename, method, options):
if method == "ignore":
return
# If we explicitly provide a full filepath, just use that.
# Otherwise, path will be the directory path and filename
# is the relative path from that dir to the file.
# So we can join those to get the full filepath.
if os.path.isfile(path):
filepath = path
else:
filepath = os.path.normpath(os.path.join(path, filename))
optstr = ""
if options:
optstr = " (%s)" % ", ".join(
['%s="%s"' % (k, v) for k, v in options.items()]
)
self.log.info("extracting messages from %s%s", filepath, optstr)
if os.path.isfile(path):
current_dir = os.getcwd()
extracted = check_and_call_extract_file(
path,
method_map,
options_map,
callback,
self.keywords,
self.add_comments,
self.strip_comments,
current_dir,
)
else:
extracted = extract_from_dir(
path,
method_map,
options_map,
keywords=self.keywords,
comment_tags=self.add_comments,
callback=callback,
strip_comment_tags=self.strip_comments,
)
for filename, lineno, message, comments, context in extracted:
if os.path.isfile(path):
filepath = filename # already normalized
else:
filepath = os.path.normpath(os.path.join(path, filename))
catalog.add(
message,
None,
[(filepath, lineno)],
auto_comments=comments,
context=context,
)
self.log.info("writing PO template file to %s", self.output_file)
write_po(
outfile,
catalog,
width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file,
include_lineno=self.include_lineno,
)
def _get_mappings(self):
mappings = []
if self.mapping_file:
with open(self.mapping_file) as fileobj:
method_map, options_map = parse_mapping(fileobj)
for path in self.input_paths:
mappings.append((path, method_map, options_map))
elif getattr(self.distribution, "message_extractors", None):
message_extractors = self.distribution.message_extractors
for path, mapping in message_extractors.items():
if isinstance(mapping, str):
method_map, options_map = parse_mapping(StringIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings.append((path, method_map, options_map))
else:
for path in self.input_paths:
mappings.append((path, DEFAULT_MAPPING, {}))
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
"""
assert name == "message_extractors"
if not isinstance(value, dict):
raise DistutilsSetupError(
'the value of the "message_extractors" ' "parameter must be a dictionary"
)
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
"""
description = "create a new catalog based on a POT file"
user_options = [
("domain=", "D", "domain of PO file (default 'messages')"),
("input-file=", "i", "name of the input file"),
("output-dir=", "d", "path to output directory"),
(
"output-file=",
"o",
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')",
),
("locale=", "l", "locale for the new localized catalog"),
("width=", "w", "set output line width (default 76)"),
(
"no-wrap",
None,
"do not break long message lines, longer than the output line width, "
"into several lines",
),
]
boolean_options = ["no-wrap"]
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = "messages"
self.no_wrap = False
self.width = None
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError("you must specify the input file")
if not self.locale:
raise DistutilsOptionError(
"you must provide a locale for the " "new catalog"
)
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError as e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError("you must specify the output directory")
if not self.output_file:
self.output_file = os.path.join(
self.output_dir, self.locale, "LC_MESSAGES", self.domain + ".po"
)
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
if self.no_wrap and self.width:
raise DistutilsOptionError(
"'--no-wrap' and '--width' are mutually " "exclusive"
)
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
def run(self):
self.log.info(
"creating catalog %s based on %s", self.output_file, self.input_file
)
with open(self.input_file, "rb") as infile:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
catalog.locale = self._locale
catalog.revision_date = datetime.now()
catalog.fuzzy = False
with open(self.output_file, "wb") as outfile:
write_po(outfile, catalog, width=self.width)
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
.. versionadded:: 0.9
"""
description = "update message catalogs from a POT file"
user_options = [
("domain=", "D", "domain of PO file (default 'messages')"),
("input-file=", "i", "name of the input file"),
("output-dir=", "d", "path to base directory containing the catalogs"),
(
"output-file=",
"o",
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')",
),
("omit-header", None, "do not include msgid " " entry in header"),
("locale=", "l", "locale of the catalog to compile"),
("width=", "w", "set output line width (default 76)"),
(
"no-wrap",
None,
"do not break long message lines, longer than the output line width, "
"into several lines",
),
("ignore-obsolete=", None, "whether to omit obsolete messages from the output"),
(
"init-missing=",
None,
"if any output files are missing, initialize them first",
),
("no-fuzzy-matching", "N", "do not use fuzzy matching"),
("update-header-comment", None, "update target header comment"),
("previous", None, "keep previous msgids of translated messages"),
]
boolean_options = [
"omit-header",
"no-wrap",
"ignore-obsolete",
"init-missing",
"no-fuzzy-matching",
"previous",
"update-header-comment",
]
def initialize_options(self):
self.domain = "messages"
self.input_file = None
self.output_dir = None
self.output_file = None
self.omit_header = False
self.locale = None
self.width = None
self.no_wrap = False
self.ignore_obsolete = False
self.init_missing = False
self.no_fuzzy_matching = False
self.update_header_comment = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError("you must specify the input file")
if not self.output_file and not self.output_dir:
raise DistutilsOptionError(
"you must specify the output file or " "directory"
)
if self.output_file and not self.locale:
raise DistutilsOptionError("you must specify the locale")
if self.init_missing:
if not self.locale:
raise DistutilsOptionError(
"you must specify the locale for " "the init-missing option to work"
)
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError as e:
raise DistutilsOptionError(e)
else:
self._locale = None
if self.no_wrap and self.width:
raise DistutilsOptionError(
"'--no-wrap' and '--width' are mutually " "exclusive"
)
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append(
(
self.locale,
os.path.join(
self.output_dir,
self.locale,
"LC_MESSAGES",
self.domain + ".po",
),
)
)
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(
self.output_dir, locale, "LC_MESSAGES", self.domain + ".po"
)
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
if not po_files:
raise DistutilsOptionError("no message catalogs found")
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
with open(self.input_file, "rb") as infile:
template = read_po(infile)
for locale, filename in po_files:
if self.init_missing and not os.path.exists(filename):
self.log.info(
"creating catalog %s based on %s", filename, self.input_file
)
with open(self.input_file, "rb") as infile:
# Although reading from the catalog template, read_po must
# be fed the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
catalog.locale = self._locale
catalog.revision_date = datetime.now()
catalog.fuzzy = False
with open(filename, "wb") as outfile:
write_po(outfile, catalog)
self.log.info("updating catalog %s based on %s", filename, self.input_file)
with open(filename, "rb") as infile:
catalog = read_po(infile, locale=locale, domain=domain)
catalog.update(
template,
self.no_fuzzy_matching,
update_header_comment=self.update_header_comment,
)
tmpname = os.path.join(
os.path.dirname(filename),
tempfile.gettempprefix() + os.path.basename(filename),
)
try:
with open(tmpname, "wb") as tmpfile:
write_po(
tmpfile,
catalog,
omit_header=self.omit_header,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous,
width=self.width,
)
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = "%%prog %s [options] %s"
version = "%%prog %s" % VERSION
commands = {
"compile": "compile message catalogs to MO files",
"extract": "extract messages from source files and generate a POT file",
"init": "create new message catalogs from a POT file",
"update": "update existing message catalogs from a POT file",
}
command_classes = {
"compile": compile_catalog,
"extract": extract_messages,
"init": init_catalog,
"update": update_catalog,
}
log = None # Replaced on instance level
def run(self, argv=None):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
if argv is None:
argv = sys.argv
self.parser = optparse.OptionParser(
usage=self.usage % ("command", "[args]"), version=self.version
)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option(
"--list-locales",
dest="list_locales",
action="store_true",
help="print all known locales and exit",
)
self.parser.add_option(
"-v",
"--verbose",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
help="print as much as possible",
)
self.parser.add_option(
"-q",
"--quiet",
action="store_const",
dest="loglevel",
const=logging.ERROR,
help="print as little as possible",
)
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = localedata.locale_identifiers()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u"%%-%ds %%s" % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print(
output.encode(
sys.stdout.encoding or getpreferredencoding() or "ascii",
"replace",
)
)
return 0
if not args:
self.parser.error(
"no valid command or option passed. "
"Try the -h/--help option for more information."
)
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
cmdinst = self._configure_command(cmdname, args[1:])
return cmdinst.run()
def _configure_logging(self, loglevel):
self.log = logging.getLogger("babel")
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
def _help(self):
print(self.parser.format_help())
print("commands:")
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = sorted(self.commands.items())
for name, description in commands:
print(format % (name, description))
def _configure_command(self, cmdname, argv):
"""
:type cmdname: str
:type argv: list[str]
"""
cmdclass = self.command_classes[cmdname]
cmdinst = cmdclass()
if self.log:
cmdinst.log = self.log # Use our logger, not distutils'.
assert isinstance(cmdinst, Command)
cmdinst.initialize_options()
parser = optparse.OptionParser(
usage=self.usage % (cmdname, ""), description=self.commands[cmdname]
)
as_args = getattr(cmdclass, "as_args", ())
for long, short, help in cmdclass.user_options:
name = long.strip("=")
default = getattr(cmdinst, name.replace("-", "_"))
strs = ["--%s" % name]
if short:
strs.append("-%s" % short)
strs.extend(cmdclass.option_aliases.get(name, ()))
choices = cmdclass.option_choices.get(name, None)
if name == as_args:
parser.usage += "<%s>" % name
elif name in cmdclass.boolean_options:
parser.add_option(*strs, action="store_true", help=help)
elif name in cmdclass.multiple_value_options:
parser.add_option(*strs, action="append", help=help, choices=choices)
else:
parser.add_option(*strs, help=help, default=default, choices=choices)
options, args = parser.parse_args(argv)
if as_args:
setattr(options, as_args.replace("-", "_"), args)
for key, value in vars(options).items():
setattr(cmdinst, key, value)
try:
cmdinst.ensure_finalized()
except DistutilsOptionError as err:
parser.error(str(err))
return cmdinst
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = StringIO('''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = OrderedDict(parser._sections) # We need ordered sections
parser.read_file(fileobj, filename)
for section in parser.sections():
if section == "extractors":
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(":", 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return method_map, options_map
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = sorted(parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items())
>>> for keyword, indices in kw:
... print((keyword, indices))
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ":" in string:
funcname, indices = string.split(":")
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(","):
if x[-1] == "c":
inds.append((int(x[:-1]), "c"))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords
if __name__ == "__main__":
main()
|
py | b41195ed17438fef0fd5dedafcce010d935ab2b2 | """Base classes for objects that can be displayed."""
__all__ = ["Mobject", "Group", "override_animate"]
import copy
import itertools as it
import operator as op
import random
import sys
import types
import warnings
from functools import reduce
from math import ceil
from pathlib import Path
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from colour import Color
from .. import config
from ..constants import *
from ..utils.color import (
BLACK,
WHITE,
YELLOW_C,
Colors,
color_gradient,
interpolate_color,
)
from ..utils.exceptions import MultiAnimationOverrideException
from ..utils.iterables import list_update, remove_list_redundancies
from ..utils.paths import straight_path
from ..utils.simple_functions import get_parameters
from ..utils.space_ops import (
angle_between_vectors,
normalize,
rotation_matrix,
rotation_matrix_transpose,
)
from .opengl_compatibility import ConvertToOpenGL
# TODO: Explain array_attrs
Updater = Union[Callable[["Mobject"], None], Callable[["Mobject", float], None]]
T = TypeVar("T", bound="Mobject")
if TYPE_CHECKING:
from ..animation.animation import Animation
class Mobject:
"""Mathematical Object: base class for objects that can be displayed on screen.
There is a compatibility layer that allows for
getting and setting generic attributes with ``get_*``
and ``set_*`` methods. See :meth:`set` for more details.
Attributes
----------
submobjects : List[:class:`Mobject`]
The contained objects.
points : :class:`numpy.ndarray`
The points of the objects.
.. seealso::
:class:`~.VMobject`
"""
animation_overrides = {}
@classmethod
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.animation_overrides: Dict[
Type["Animation"], Callable[["Mobject"], "Animation"]
] = {}
cls._add_intrinsic_animation_overrides()
def __init__(self, color=WHITE, name=None, dim=3, target=None, z_index=0):
self.color = Color(color) if color else None
self.name = self.__class__.__name__ if name is None else name
self.dim = dim
self.target = target
self.z_index = z_index
self.point_hash = None
self.submobjects = []
self.updaters = []
self.updating_suspended = False
self.reset_points()
self.generate_points()
self.init_colors()
@classmethod
def animation_override_for(
cls, animation_class: Type["Animation"]
) -> "Optional[Callable[[Mobject, ...], Animation]]":
"""Returns the function defining a specific animation override for this class.
Parameters
----------
animation_class
The animation class for which the override function should be returned.
Returns
-------
Optional[Callable[[Mobject, ...], Animation]]
The function returning the override animation or ``None`` if no such animation
override is defined.
"""
if animation_class in cls.animation_overrides:
return cls.animation_overrides[animation_class]
return None
@classmethod
def _add_intrinsic_animation_overrides(cls):
"""Initializes animation overrides marked with the :func:`~.override_animation`
decorator.
"""
for method_name in dir(cls):
# Ignore dunder methods
if method_name.startswith("__"):
continue
method = getattr(cls, method_name)
if hasattr(method, "_override_animation"):
animation_class = method._override_animation
cls.add_animation_override(animation_class, method)
@classmethod
def add_animation_override(
cls,
animation_class: Type["Animation"],
override_func: "Callable[[Mobject, ...], Animation]",
):
"""Add an animation override.
This does not apply to subclasses.
Parameters
----------
animation_class
The animation type to be overridden
override_func
The function returning an aniamtion replacing the default animation. It gets
passed the parameters given to the animnation constructor.
Raises
------
MultiAnimationOverrideException
If the overridden animation was already overridden.
"""
if animation_class not in cls.animation_overrides:
cls.animation_overrides[animation_class] = override_func
else:
raise MultiAnimationOverrideException(
f"The animation {animation_class.__name__} for "
f"{cls.__name__} is overridden by more than one method: "
f"{cls.animation_overrides[animation_class].__qualname__} and "
f"{override_func.__qualname__}."
)
@property
def animate(self):
"""Used to animate the application of a method.
.. warning::
Passing multiple animations for the same :class:`Mobject` in one
call to :meth:`~.Scene.play` is discouraged and will most likely
not work properly. Instead of writing an animation like
::
self.play(my_mobject.animate.shift(RIGHT), my_mobject.animate.rotate(PI))
make use of method chaining for ``animate``, meaning::
self.play(my_mobject.animate.shift(RIGHT).rotate(PI))
Keyword arguments that can be passed to :meth:`.Scene.play` can be passed
directly after accessing ``.animate``, like so::
self.play(my_mobject.animate(rate_func=linear).shift(RIGHT))
This is especially useful when animating simultaneous ``.animate`` calls that
you want to behave differently::
self.play(
mobject1.animate(run_time=2).rotate(PI),
mobject2.animate(rate_func=there_and_back).shift(RIGHT),
)
.. seealso::
:func:`override_animate`
Examples
--------
.. manim:: AnimateExample
class AnimateExample(Scene):
def construct(self):
s = Square()
self.play(Create(s))
self.play(s.animate.shift(RIGHT))
self.play(s.animate.scale(2))
self.play(s.animate.rotate(PI / 2))
self.play(Uncreate(s))
.. manim:: AnimateChainExample
class AnimateChainExample(Scene):
def construct(self):
s = Square()
self.play(Create(s))
self.play(s.animate.shift(RIGHT).scale(2).rotate(PI / 2))
self.play(Uncreate(s))
.. manim:: AnimateWithArgsExample
class AnimateWithArgsExample(Scene):
def construct(self):
s = Square()
c = Circle()
VGroup(s, c).arrange(RIGHT, buff=2)
self.add(s, c)
self.play(
s.animate(run_time=2).rotate(PI / 2),
c.animate(rate_func=there_and_back).shift(RIGHT),
)
.. warning::
``.animate``
will interpolate the :class:`~.Mobject` between its points prior to
``.animate`` and its points after applying ``.animate`` to it. This may
result in unexpected behavior when attempting to interpolate along paths,
or rotations.
If you want animations to consider the points between, consider using
:class:`~.ValueTracker` with updaters instead.
"""
return _AnimationBuilder(self)
def __deepcopy__(self, clone_from_id):
cls = self.__class__
result = cls.__new__(cls)
clone_from_id[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, clone_from_id))
result.original_id = str(id(self))
return result
def __repr__(self):
if config["renderer"] == "opengl":
return super().__repr__()
else:
return str(self.name)
def reset_points(self):
"""Sets :attr:`points` to be an empty array."""
self.points = np.zeros((0, self.dim))
def init_colors(self):
"""Initializes the colors.
Gets called upon creation. This is an empty method that can be implemented by
subclasses.
"""
pass
def generate_points(self):
"""Initializes :attr:`points` and therefore the shape.
Gets called upon creation. This is an empty method that can be implemented by
subclasses.
"""
pass
def add(self, *mobjects: "Mobject") -> "Mobject":
"""Add mobjects as submobjects.
The mobjects are added to :attr:`submobjects`.
Subclasses of mobject may implement ``+`` and ``+=`` dunder methods.
Parameters
----------
mobjects
The mobjects to add.
Returns
-------
:class:`Mobject`
``self``
Raises
------
:class:`ValueError`
When a mobject tries to add itself.
:class:`TypeError`
When trying to add an object that is not an instance of :class:`Mobject`.
Notes
-----
A mobject cannot contain itself, and it cannot contain a submobject
more than once. If the parent mobject is displayed, the newly-added
submobjects will also be displayed (i.e. they are automatically added
to the parent Scene).
See Also
--------
:meth:`remove`
:meth:`add_to_back`
Examples
--------
::
>>> outer = Mobject()
>>> inner = Mobject()
>>> outer = outer.add(inner)
Duplicates are not added again::
>>> outer = outer.add(inner)
>>> len(outer.submobjects)
1
Adding an object to itself raises an error::
>>> outer.add(outer)
Traceback (most recent call last):
...
ValueError: Mobject cannot contain self
"""
for m in mobjects:
if not isinstance(m, Mobject):
raise TypeError("All submobjects must be of type Mobject")
if m is self:
raise ValueError("Mobject cannot contain self")
self.submobjects = list_update(self.submobjects, mobjects)
return self
def __add__(self, mobject):
raise NotImplementedError
def __iadd__(self, mobject):
raise NotImplementedError
def add_to_back(self, *mobjects: "Mobject") -> "Mobject":
"""Add all passed mobjects to the back of the submobjects.
If :attr:`submobjects` already contains the given mobjects, they just get moved
to the back instead.
Parameters
----------
mobjects
The mobjects to add.
Returns
-------
:class:`Mobject`
``self``
.. note::
Technically, this is done by adding (or moving) the mobjects to
the head of :attr:`submobjects`. The head of this list is rendered
first, which places the corresponding mobjects behind the
subsequent list members.
Raises
------
:class:`ValueError`
When a mobject tries to add itself.
:class:`TypeError`
When trying to add an object that is not an instance of :class:`Mobject`.
Notes
-----
A mobject cannot contain itself, and it cannot contain a submobject
more than once. If the parent mobject is displayed, the newly-added
submobjects will also be displayed (i.e. they are automatically added
to the parent Scene).
See Also
--------
:meth:`remove`
:meth:`add`
"""
if self in mobjects:
raise ValueError("A mobject shouldn't contain itself")
for mobject in mobjects:
if not isinstance(mobject, Mobject):
raise TypeError("All submobjects must be of type Mobject")
self.remove(*mobjects)
# dict.fromkeys() removes duplicates while maintaining order
self.submobjects = list(dict.fromkeys(mobjects)) + self.submobjects
return self
def remove(self, *mobjects: "Mobject") -> "Mobject":
"""Remove :attr:`submobjects`.
The mobjects are removed from :attr:`submobjects`, if they exist.
Subclasses of mobject may implement ``-`` and ``-=`` dunder methods.
Parameters
----------
mobjects
The mobjects to remove.
Returns
-------
:class:`Mobject`
``self``
See Also
--------
:meth:`add`
"""
for mobject in mobjects:
if mobject in self.submobjects:
self.submobjects.remove(mobject)
return self
def __sub__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def set(self, **kwargs) -> "Mobject":
"""Sets attributes.
Mainly to be used along with :attr:`animate` to
animate setting attributes.
In addition to this method, there is a compatibility
layer that allows ``get_*`` and ``set_*`` methods to
get and set generic attributes. For instance::
>>> mob = Mobject()
>>> mob.set_foo(0)
Mobject
>>> mob.get_foo()
0
>>> mob.foo
0
This compatibility layer does not interfere with any
``get_*`` or ``set_*`` methods that are explicitly
defined.
.. warning::
This compatibility layer is for backwards compatibility
and is not guaranteed to stay around. Where applicable,
please prefer getting/setting attributes normally or with
the :meth:`set` method.
Parameters
----------
**kwargs
The attributes and corresponding values to set.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
::
>>> mob = Mobject()
>>> mob.set(foo=0)
Mobject
>>> mob.foo
0
"""
for attr, value in kwargs.items():
setattr(self, attr, value)
return self
def __getattr__(self, attr):
# Add automatic compatibility layer
# between properties and get_* and set_*
# methods.
#
# In python 3.9+ we could change this
# logic to use str.remove_prefix instead.
if attr.startswith("get_"):
# Remove the "get_" prefix
to_get = attr[4:]
def getter(self):
warnings.warn(
"This method is not guaranteed to stay around. Please prefer "
"getting the attribute normally.",
DeprecationWarning,
stacklevel=2,
)
return getattr(self, to_get)
# Return a bound method
return types.MethodType(getter, self)
if attr.startswith("set_"):
# Remove the "set_" prefix
to_set = attr[4:]
def setter(self, value):
warnings.warn(
"This method is not guaranteed to stay around. Please prefer "
"setting the attribute normally or with Mobject.set().",
DeprecationWarning,
stacklevel=2,
)
setattr(self, to_set, value)
return self
# Return a bound method
return types.MethodType(setter, self)
# Unhandled attribute, therefore error
raise AttributeError(f"{type(self).__name__} object has no attribute '{attr}'")
@property
def width(self):
"""The width of the mobject.
Returns
-------
:class:`float`
Examples
--------
.. manim:: WidthExample
class WidthExample(Scene):
def construct(self):
decimal = DecimalNumber().to_edge(UP)
rect = Rectangle(color=BLUE)
rect_copy = rect.copy().set_stroke(GRAY, opacity=0.5)
decimal.add_updater(lambda d: d.set_value(rect.width))
self.add(rect_copy, rect, decimal)
self.play(rect.animate.set(width=7))
self.wait()
See also
--------
:meth:`length_over_dim`
"""
# Get the length across the X dimension
return self.length_over_dim(0)
@width.setter
def width(self, value):
self.scale_to_fit_width(value)
@property
def height(self):
"""The height of the mobject.
Returns
-------
:class:`float`
Examples
--------
.. manim:: HeightExample
class HeightExample(Scene):
def construct(self):
decimal = DecimalNumber().to_edge(UP)
rect = Rectangle(color=BLUE)
rect_copy = rect.copy().set_stroke(GRAY, opacity=0.5)
decimal.add_updater(lambda d: d.set_value(rect.height))
self.add(rect_copy, rect, decimal)
self.play(rect.animate.set(height=5))
self.wait()
See also
--------
:meth:`length_over_dim`
"""
# Get the length across the Y dimension
return self.length_over_dim(1)
@height.setter
def height(self, value):
self.scale_to_fit_height(value)
@property
def depth(self):
"""The depth of the mobject.
Returns
-------
:class:`float`
See also
--------
:meth:`length_over_dim`
"""
# Get the length across the Z dimension
return self.length_over_dim(2)
@depth.setter
def depth(self, value):
self.scale_to_fit_depth(value)
def get_array_attrs(self):
return ["points"]
def apply_over_attr_arrays(self, func):
for attr in self.get_array_attrs():
setattr(self, attr, func(getattr(self, attr)))
return self
# Displaying
def get_image(self, camera=None):
if camera is None:
from ..camera.camera import Camera
camera = Camera()
camera.capture_mobject(self)
return camera.get_image()
def show(self, camera=None):
self.get_image(camera=camera).show()
def save_image(self, name=None):
"""Saves an image of only this :class:`Mobject` at its position to a png
file."""
self.get_image().save(
Path(config.get_dir("video_dir")).joinpath((name or str(self)) + ".png")
)
def copy(self: T) -> T:
"""Create and return an identical copy of the :class:`Mobject` including all
:attr:`submobjects`.
Returns
-------
:class:`Mobject`
The copy.
Note
----
The clone is initially not visible in the Scene, even if the original was.
"""
return copy.deepcopy(self)
def generate_target(self, use_deepcopy=False):
self.target = None # Prevent unbounded linear recursion
if use_deepcopy:
self.target = copy.deepcopy(self)
else:
self.target = self.copy()
return self.target
# Updating
def update(self, dt: float = 0, recursive: bool = True) -> "Mobject":
"""Apply all updaters.
Does nothing if updating is suspended.
Parameters
----------
dt
The parameter ``dt`` to pass to the update functions. Usually this is the
time in seconds since the last call of ``update``.
recursive
Whether to recursively update all submobjects.
Returns
-------
:class:`Mobject`
``self``
See Also
--------
:meth:`add_updater`
:meth:`get_updaters`
"""
if self.updating_suspended:
return self
for updater in self.updaters:
parameters = get_parameters(updater)
if "dt" in parameters:
updater(self, dt)
else:
updater(self)
if recursive:
for submob in self.submobjects:
submob.update(dt, recursive)
return self
def get_time_based_updaters(self) -> List[Updater]:
"""Return all updaters using the ``dt`` parameter.
The updaters use this parameter as the input for difference in time.
Returns
-------
List[:class:`Callable`]
The list of time based updaters.
See Also
--------
:meth:`get_updaters`
:meth:`has_time_based_updater`
"""
return [updater for updater in self.updaters if "dt" in get_parameters(updater)]
def has_time_based_updater(self) -> bool:
"""Test if ``self`` has a time based updater.
Returns
-------
class:`bool`
``True`` if at least one updater uses the ``dt`` parameter, ``False``
otherwise.
See Also
--------
:meth:`get_time_based_updaters`
"""
for updater in self.updaters:
if "dt" in get_parameters(updater):
return True
return False
def get_updaters(self) -> List[Updater]:
"""Return all updaters.
Returns
-------
List[:class:`Callable`]
The list of updaters.
See Also
--------
:meth:`add_updater`
:meth:`get_time_based_updaters`
"""
return self.updaters
def get_family_updaters(self):
return list(it.chain(*(sm.get_updaters() for sm in self.get_family())))
def add_updater(
self,
update_function: Updater,
index: Optional[int] = None,
call_updater: bool = False,
) -> "Mobject":
"""Add an update function to this mobject.
Update functions, or updaters in short, are functions that are applied to the
Mobject in every frame.
Parameters
----------
update_function
The update function to be added.
Whenever :meth:`update` is called, this update function gets called using
``self`` as the first parameter.
The updater can have a second parameter ``dt``. If it uses this parameter,
it gets called using a second value ``dt``, usually representing the time
in seconds since the last call of :meth:`update`.
index
The index at which the new updater should be added in ``self.updaters``.
In case ``index`` is ``None`` the updater will be added at the end.
call_updater
Whether or not to call the updater initially. If ``True``, the updater will
be called using ``dt=0``.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
.. manim:: NextToUpdater
class NextToUpdater(Scene):
def construct(self):
def dot_position(mobject):
mobject.set_value(dot.get_center()[0])
mobject.next_to(dot)
dot = Dot(RIGHT*3)
label = DecimalNumber()
label.add_updater(dot_position)
self.add(dot, label)
self.play(Rotating(dot, about_point=ORIGIN, angle=TAU, run_time=TAU, rate_func=linear))
.. manim:: DtUpdater
class DtUpdater(Scene):
def construct(self):
line = Square()
#Let the line rotate 90° per second
line.add_updater(lambda mobject, dt: mobject.rotate(dt*90*DEGREES))
self.add(line)
self.wait(2)
See also
--------
:meth:`get_updaters`
:meth:`remove_updater`
:class:`~.UpdateFromFunc`
"""
if index is None:
self.updaters.append(update_function)
else:
self.updaters.insert(index, update_function)
if call_updater:
update_function(self, 0)
return self
def remove_updater(self, update_function: Updater) -> "Mobject":
"""Remove an updater.
If the same updater is applied multiple times, every instance gets removed.
Parameters
----------
update_function
The update function to be removed.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`clear_updaters`
:meth:`add_updater`
:meth:`get_updaters`
"""
while update_function in self.updaters:
self.updaters.remove(update_function)
return self
def clear_updaters(self, recursive: bool = True) -> "Mobject":
"""Remove every updater.
Parameters
----------
recursive
Whether to recursively call ``clear_updaters`` on all submobjects.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`remove_updater`
:meth:`add_updater`
:meth:`get_updaters`
"""
self.updaters = []
if recursive:
for submob in self.submobjects:
submob.clear_updaters()
return self
def match_updaters(self, mobject: "Mobject") -> "Mobject":
"""Match the updaters of the given mobject.
Parameters
----------
mobject
The mobject whose updaters get matched.
Returns
-------
:class:`Mobject`
``self``
Note
----
All updaters from submobjects are removed, but only updaters of the given
mobject are matched, not those of it's submobjects.
See also
--------
:meth:`add_updater`
:meth:`clear_updaters`
"""
self.clear_updaters()
for updater in mobject.get_updaters():
self.add_updater(updater)
return self
def suspend_updating(self, recursive: bool = True) -> "Mobject":
"""Disable updating from updaters and animations.
Parameters
----------
recursive
Whether to recursively suspend updating on all submobjects.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`resume_updating`
:meth:`add_updater`
"""
self.updating_suspended = True
if recursive:
for submob in self.submobjects:
submob.suspend_updating(recursive)
return self
def resume_updating(self, recursive: bool = True) -> "Mobject":
"""Enable updating from updaters and animations.
Parameters
----------
recursive
Whether to recursively enable updating on all submobjects.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`suspend_updating`
:meth:`add_updater`
"""
self.updating_suspended = False
if recursive:
for submob in self.submobjects:
submob.resume_updating(recursive)
self.update(dt=0, recursive=recursive)
return self
# Transforming operations
def apply_to_family(self, func: Callable[["Mobject"], None]) -> "Mobject":
"""Apply a function to ``self`` and every submobject with points recursively.
Parameters
----------
func
The function to apply to each mobject. ``func`` gets passed the respective
(sub)mobject as parameter.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`family_members_with_points`
"""
for mob in self.family_members_with_points():
func(mob)
def shift(self, *vectors: np.ndarray) -> "Mobject":
"""Shift by the given vectors.
Parameters
----------
vectors
Vectors to shift by. If multiple vectors are given, they are added
together.
Returns
-------
:class:`Mobject`
``self``
See also
--------
:meth:`move_to`
"""
total_vector = reduce(op.add, vectors)
for mob in self.family_members_with_points():
mob.points = mob.points.astype("float")
mob.points += total_vector
return self
def scale(self, scale_factor: float, **kwargs) -> "Mobject":
r"""Scale the size by a factor.
Default behavior is to scale about the center of the mobject.
Parameters
----------
scale_factor
The scaling factor :math:`\alpha`. If :math:`0 < |\alpha| < 1`, the mobject
will shrink, and for :math:`|\alpha| > 1` it will grow. Furthermore,
if :math:`\alpha < 0`, the mobject is also flipped.
kwargs
Additional keyword arguments passed to
:meth:`apply_points_function_about_point`.
Returns
-------
Mobject
The scaled mobject.
Examples
--------
.. manim:: MobjectScaleExample
:save_last_frame:
class MobjectScaleExample(Scene):
def construct(self):
f1 = Text("F")
f2 = Text("F").scale(2)
f3 = Text("F").scale(0.5)
f4 = Text("F").scale(-1)
vgroup = VGroup(f1, f2, f3, f4).arrange(6 * RIGHT)
self.add(vgroup)
See also
--------
:meth:`move_to`
"""
self.apply_points_function_about_point(
lambda points: scale_factor * points, **kwargs
)
return self
def rotate_about_origin(self, angle, axis=OUT, axes=[]):
"""Rotates the :class:`~.Mobject` about the ORIGIN, which is at [0,0,0]."""
return self.rotate(angle, axis, about_point=ORIGIN)
def rotate(
self,
angle,
axis=OUT,
about_point: Optional[Sequence[float]] = None,
**kwargs,
):
"""Rotates the :class:`~.Mobject` about a certain point."""
rot_matrix = rotation_matrix(angle, axis)
self.apply_points_function_about_point(
lambda points: np.dot(points, rot_matrix.T), about_point, **kwargs
)
return self
def flip(self, axis=UP, **kwargs):
"""Flips/Mirrors an mobject about its center.
Examples
--------
.. manim:: FlipExample
:save_last_frame:
class FlipExample(Scene):
def construct(self):
s= Line(LEFT, RIGHT+UP).shift(4*LEFT)
self.add(s)
s2= s.copy().flip()
self.add(s2)
"""
return self.rotate(TAU / 2, axis, **kwargs)
def stretch(self, factor, dim, **kwargs):
def func(points):
points[:, dim] *= factor
return points
self.apply_points_function_about_point(func, **kwargs)
return self
def apply_function(self, function, **kwargs):
# Default to applying matrix about the origin, not mobjects center
if len(kwargs) == 0:
kwargs["about_point"] = ORIGIN
self.apply_points_function_about_point(
lambda points: np.apply_along_axis(function, 1, points), **kwargs
)
return self
def apply_function_to_position(self, function):
self.move_to(function(self.get_center()))
return self
def apply_function_to_submobject_positions(self, function):
for submob in self.submobjects:
submob.apply_function_to_position(function)
return self
def apply_matrix(self, matrix, **kwargs):
# Default to applying matrix about the origin, not mobjects center
if ("about_point" not in kwargs) and ("about_edge" not in kwargs):
kwargs["about_point"] = ORIGIN
full_matrix = np.identity(self.dim)
matrix = np.array(matrix)
full_matrix[: matrix.shape[0], : matrix.shape[1]] = matrix
self.apply_points_function_about_point(
lambda points: np.dot(points, full_matrix.T), **kwargs
)
return self
def apply_complex_function(self, function, **kwargs):
"""Applies a complex function to a :class:`Mobject`.
The x and y coordinates correspond to the real and imaginary parts respectively.
Example
-------
.. manim:: ApplyFuncExample
class ApplyFuncExample(Scene):
def construct(self):
circ = Circle().scale(1.5)
circ_ref = circ.copy()
circ.apply_complex_function(
lambda x: np.exp(x*1j)
)
t = ValueTracker(0)
circ.add_updater(
lambda x: x.become(circ_ref.copy().apply_complex_function(
lambda x: np.exp(x+t.get_value()*1j)
)).set_color(BLUE)
)
self.add(circ_ref)
self.play(TransformFromCopy(circ_ref, circ))
self.play(t.animate.set_value(TAU), run_time=3)
"""
def R3_func(point):
x, y, z = point
xy_complex = function(complex(x, y))
return [xy_complex.real, xy_complex.imag, z]
return self.apply_function(R3_func)
def wag(self, direction=RIGHT, axis=DOWN, wag_factor=1.0):
for mob in self.family_members_with_points():
alphas = np.dot(mob.points, np.transpose(axis))
alphas -= min(alphas)
alphas /= max(alphas)
alphas = alphas ** wag_factor
mob.points += np.dot(
alphas.reshape((len(alphas), 1)),
np.array(direction).reshape((1, mob.dim)),
)
return self
def reverse_points(self):
for mob in self.family_members_with_points():
mob.apply_over_attr_arrays(lambda arr: np.array(list(reversed(arr))))
return self
def repeat(self, count: int):
"""This can make transition animations nicer"""
def repeat_array(array):
return reduce(lambda a1, a2: np.append(a1, a2, axis=0), [array] * count)
for mob in self.family_members_with_points():
mob.apply_over_attr_arrays(repeat_array)
return self
# In place operations.
# Note, much of these are now redundant with default behavior of
# above methods
def apply_points_function_about_point(
self, func, about_point=None, about_edge=None
):
if about_point is None:
if about_edge is None:
about_edge = ORIGIN
about_point = self.get_critical_point(about_edge)
for mob in self.family_members_with_points():
mob.points -= about_point
mob.points = func(mob.points)
mob.points += about_point
return self
def rotate_in_place(self, angle, axis=OUT):
# redundant with default behavior of rotate now.
return self.rotate(angle, axis=axis)
def scale_in_place(self, scale_factor, **kwargs):
# Redundant with default behavior of scale now.
return self.scale(scale_factor, **kwargs)
def scale_about_point(self, scale_factor, point):
# Redundant with default behavior of scale now.
return self.scale(scale_factor, about_point=point)
def pose_at_angle(self, **kwargs):
self.rotate(TAU / 14, RIGHT + UP, **kwargs)
return self
# Positioning methods
def center(self):
self.shift(-self.get_center())
return self
def align_on_border(self, direction, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):
"""Direction just needs to be a vector pointing towards side or
corner in the 2d plane.
"""
target_point = np.sign(direction) * (
config["frame_x_radius"],
config["frame_y_radius"],
0,
)
point_to_align = self.get_critical_point(direction)
shift_val = target_point - point_to_align - buff * np.array(direction)
shift_val = shift_val * abs(np.sign(direction))
self.shift(shift_val)
return self
def to_corner(self, corner=LEFT + DOWN, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):
return self.align_on_border(corner, buff)
def to_edge(self, edge=LEFT, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):
return self.align_on_border(edge, buff)
def next_to(
self,
mobject_or_point,
direction=RIGHT,
buff=DEFAULT_MOBJECT_TO_MOBJECT_BUFFER,
aligned_edge=ORIGIN,
submobject_to_align=None,
index_of_submobject_to_align=None,
coor_mask=np.array([1, 1, 1]),
):
"""Move this :class:`~.Mobject` next to another's :class:`~.Mobject` or coordinate.
Examples
--------
.. manim:: GeometricShapes
:save_last_frame:
class GeometricShapes(Scene):
def construct(self):
d = Dot()
c = Circle()
s = Square()
t = Triangle()
d.next_to(c, RIGHT)
s.next_to(c, LEFT)
t.next_to(c, DOWN)
self.add(d, c, s, t)
"""
if isinstance(mobject_or_point, Mobject):
mob = mobject_or_point
if index_of_submobject_to_align is not None:
target_aligner = mob[index_of_submobject_to_align]
else:
target_aligner = mob
target_point = target_aligner.get_critical_point(aligned_edge + direction)
else:
target_point = mobject_or_point
if submobject_to_align is not None:
aligner = submobject_to_align
elif index_of_submobject_to_align is not None:
aligner = self[index_of_submobject_to_align]
else:
aligner = self
point_to_align = aligner.get_critical_point(aligned_edge - direction)
self.shift((target_point - point_to_align + buff * direction) * coor_mask)
return self
def shift_onto_screen(self, **kwargs):
space_lengths = [config["frame_x_radius"], config["frame_y_radius"]]
for vect in UP, DOWN, LEFT, RIGHT:
dim = np.argmax(np.abs(vect))
buff = kwargs.get("buff", DEFAULT_MOBJECT_TO_EDGE_BUFFER)
max_val = space_lengths[dim] - buff
edge_center = self.get_edge_center(vect)
if np.dot(edge_center, vect) > max_val:
self.to_edge(vect, **kwargs)
return self
def is_off_screen(self):
if self.get_left()[0] > config["frame_x_radius"]:
return True
if self.get_right()[0] < -config["frame_x_radius"]:
return True
if self.get_bottom()[1] > config["frame_y_radius"]:
return True
if self.get_top()[1] < -config["frame_y_radius"]:
return True
return False
def stretch_about_point(self, factor, dim, point):
return self.stretch(factor, dim, about_point=point)
def stretch_in_place(self, factor, dim):
# Now redundant with stretch
return self.stretch(factor, dim)
def rescale_to_fit(self, length, dim, stretch=False, **kwargs):
old_length = self.length_over_dim(dim)
if old_length == 0:
return self
if stretch:
self.stretch(length / old_length, dim, **kwargs)
else:
self.scale(length / old_length, **kwargs)
return self
def scale_to_fit_width(self, width, **kwargs):
"""Scales the :class:`~.Mobject` to fit a width while keeping height/depth proportional.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
::
>>> from manim import *
>>> sq = Square()
>>> sq.height
2.0
>>> sq.scale_to_fit_width(5)
Square
>>> sq.width
5.0
>>> sq.height
5.0
"""
return self.rescale_to_fit(width, 0, stretch=False, **kwargs)
def stretch_to_fit_width(self, width, **kwargs):
"""Stretches the :class:`~.Mobject` to fit a width, not keeping height/depth proportional.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
::
>>> from manim import *
>>> sq = Square()
>>> sq.height
2.0
>>> sq.stretch_to_fit_width(5)
Square
>>> sq.width
5.0
>>> sq.height
2.0
"""
return self.rescale_to_fit(width, 0, stretch=True, **kwargs)
def scale_to_fit_height(self, height, **kwargs):
"""Scales the :class:`~.Mobject` to fit a height while keeping width/depth proportional.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
::
>>> from manim import *
>>> sq = Square()
>>> sq.width
2.0
>>> sq.scale_to_fit_height(5)
Square
>>> sq.height
5.0
>>> sq.width
5.0
"""
return self.rescale_to_fit(height, 1, stretch=False, **kwargs)
def stretch_to_fit_height(self, height, **kwargs):
"""Stretches the :class:`~.Mobject` to fit a height, not keeping width/depth proportional.
Returns
-------
:class:`Mobject`
``self``
Examples
--------
::
>>> from manim import *
>>> sq = Square()
>>> sq.width
2.0
>>> sq.stretch_to_fit_height(5)
Square
>>> sq.height
5.0
>>> sq.width
2.0
"""
return self.rescale_to_fit(height, 1, stretch=True, **kwargs)
def scale_to_fit_depth(self, depth, **kwargs):
"""Scales the :class:`~.Mobject` to fit a depth while keeping width/height proportional."""
return self.rescale_to_fit(depth, 2, stretch=False, **kwargs)
def stretch_to_fit_depth(self, depth, **kwargs):
"""Stretches the :class:`~.Mobject` to fit a depth, not keeping width/height proportional."""
return self.rescale_to_fit(depth, 2, stretch=True, **kwargs)
def set_coord(self, value, dim, direction=ORIGIN):
curr = self.get_coord(dim, direction)
shift_vect = np.zeros(self.dim)
shift_vect[dim] = value - curr
self.shift(shift_vect)
return self
def set_x(self, x, direction=ORIGIN):
"""Set x value of the center of the :class:`~.Mobject` (``int`` or ``float``)"""
return self.set_coord(x, 0, direction)
def set_y(self, y, direction=ORIGIN):
"""Set y value of the center of the :class:`~.Mobject` (``int`` or ``float``)"""
return self.set_coord(y, 1, direction)
def set_z(self, z, direction=ORIGIN):
"""Set z value of the center of the :class:`~.Mobject` (``int`` or ``float``)"""
return self.set_coord(z, 2, direction)
def space_out_submobjects(self, factor=1.5, **kwargs):
self.scale(factor, **kwargs)
for submob in self.submobjects:
submob.scale(1.0 / factor)
return self
def move_to(
self, point_or_mobject, aligned_edge=ORIGIN, coor_mask=np.array([1, 1, 1])
):
"""Move center of the :class:`~.Mobject` to certain coordinate."""
if isinstance(point_or_mobject, Mobject):
target = point_or_mobject.get_critical_point(aligned_edge)
else:
target = point_or_mobject
point_to_align = self.get_critical_point(aligned_edge)
self.shift((target - point_to_align) * coor_mask)
return self
def replace(self, mobject, dim_to_match=0, stretch=False):
if not mobject.get_num_points() and not mobject.submobjects:
raise Warning("Attempting to replace mobject with no points")
if stretch:
self.stretch_to_fit_width(mobject.width)
self.stretch_to_fit_height(mobject.height)
else:
self.rescale_to_fit(
mobject.length_over_dim(dim_to_match), dim_to_match, stretch=False
)
self.shift(mobject.get_center() - self.get_center())
return self
def surround(
self, mobject: "Mobject", dim_to_match=0, stretch=False, buff=MED_SMALL_BUFF
):
self.replace(mobject, dim_to_match, stretch)
length = mobject.length_over_dim(dim_to_match)
self.scale_in_place((length + buff) / length)
return self
def put_start_and_end_on(self, start, end):
curr_start, curr_end = self.get_start_and_end()
curr_vect = curr_end - curr_start
if np.all(curr_vect == 0):
raise Exception("Cannot position endpoints of closed loop")
target_vect = np.array(end) - np.array(start)
axis = (
normalize(np.cross(curr_vect, target_vect))
if np.linalg.norm(np.cross(curr_vect, target_vect)) != 0
else OUT
)
self.scale(
np.linalg.norm(target_vect) / np.linalg.norm(curr_vect),
about_point=curr_start,
)
self.rotate(
angle_between_vectors(curr_vect, target_vect),
about_point=curr_start,
axis=axis,
)
self.shift(start - curr_start)
return self
# Background rectangle
def add_background_rectangle(
self, color: Colors = BLACK, opacity: float = 0.75, **kwargs
):
"""Add a BackgroundRectangle as submobject.
The BackgroundRectangle is added behind other submobjects.
This can be used to increase the mobjects visibility in front of a noisy background.
Parameters
----------
color
The color of the BackgroundRectangle
opacity
The opacity of the BackgroundRectangle
kwargs
Additional keyword arguments passed to the BackgroundRectangle constructor
Returns
-------
:class:`Mobject`
``self``
See Also
--------
:meth:`add_to_back`
:class:`~.BackgroundRectangle`
"""
# TODO, this does not behave well when the mobject has points,
# since it gets displayed on top
from ..mobject.shape_matchers import BackgroundRectangle
self.background_rectangle = BackgroundRectangle(
self, color=color, fill_opacity=opacity, **kwargs
)
self.add_to_back(self.background_rectangle)
return self
def add_background_rectangle_to_submobjects(self, **kwargs):
for submobject in self.submobjects:
submobject.add_background_rectangle(**kwargs)
return self
def add_background_rectangle_to_family_members_with_points(self, **kwargs):
for mob in self.family_members_with_points():
mob.add_background_rectangle(**kwargs)
return self
# Color functions
def set_color(self, color: Color = YELLOW_C, family: bool = True):
"""Condition is function which takes in one arguments, (x, y, z).
Here it just recurses to submobjects, but in subclasses this
should be further implemented based on the the inner workings
of color
"""
if family:
for submob in self.submobjects:
submob.set_color(color, family=family)
self.color = Color(color)
return self
def set_color_by_gradient(self, *colors):
self.set_submobject_colors_by_gradient(*colors)
return self
def set_colors_by_radial_gradient(
self, center=None, radius=1, inner_color=WHITE, outer_color=BLACK
):
self.set_submobject_colors_by_radial_gradient(
center, radius, inner_color, outer_color
)
return self
def set_submobject_colors_by_gradient(self, *colors):
if len(colors) == 0:
raise ValueError("Need at least one color")
elif len(colors) == 1:
return self.set_color(*colors)
mobs = self.family_members_with_points()
new_colors = color_gradient(colors, len(mobs))
for mob, color in zip(mobs, new_colors):
mob.set_color(color, family=False)
return self
def set_submobject_colors_by_radial_gradient(
self, center=None, radius=1, inner_color=WHITE, outer_color=BLACK
):
if center is None:
center = self.get_center()
for mob in self.family_members_with_points():
t = np.linalg.norm(mob.get_center() - center) / radius
t = min(t, 1)
mob_color = interpolate_color(inner_color, outer_color, t)
mob.set_color(mob_color, family=False)
return self
def to_original_color(self):
self.set_color(self.color)
return self
def fade_to(self, color, alpha, family=True):
if self.get_num_points() > 0:
new_color = interpolate_color(self.get_color(), color, alpha)
self.set_color(new_color, family=False)
if family:
for submob in self.submobjects:
submob.fade_to(color, alpha)
return self
def fade(self, darkness=0.5, family=True):
if family:
for submob in self.submobjects:
submob.fade(darkness, family)
return self
def get_color(self):
"""Returns the color of the :class:`~.Mobject`"""
return self.color
##
def save_state(self):
"""Save the current state (position, color & size). Can be restored with :meth:`~.Mobject.restore`."""
if hasattr(self, "saved_state"):
# Prevent exponential growth of data
self.saved_state = None
self.saved_state = self.copy()
return self
def restore(self):
"""Restores the state that was previously saved with :meth:`~.Mobject.save_state`."""
if not hasattr(self, "saved_state") or self.save_state is None:
raise Exception("Trying to restore without having saved")
self.become(self.saved_state)
return self
##
def reduce_across_dimension(self, points_func, reduce_func, dim):
points = self.get_all_points()
if points is None or len(points) == 0:
# Note, this default means things like empty VGroups
# will appear to have a center at [0, 0, 0]
return 0
values = points_func(points[:, dim])
return reduce_func(values)
def nonempty_submobjects(self):
return [
submob
for submob in self.submobjects
if len(submob.submobjects) != 0 or len(submob.points) != 0
]
def get_merged_array(self, array_attr):
result = getattr(self, array_attr)
for submob in self.submobjects:
result = np.append(result, submob.get_merged_array(array_attr), axis=0)
submob.get_merged_array(array_attr)
return result
def get_all_points(self):
return self.get_merged_array("points")
# Getters
def get_points_defining_boundary(self):
return self.get_all_points()
def get_num_points(self):
return len(self.points)
def get_extremum_along_dim(self, points=None, dim=0, key=0):
if points is None:
points = self.get_points_defining_boundary()
values = points[:, dim]
if key < 0:
return np.min(values)
elif key == 0:
return (np.min(values) + np.max(values)) / 2
else:
return np.max(values)
def get_critical_point(self, direction):
"""Picture a box bounding the :class:`~.Mobject`. Such a box has
9 'critical points': 4 corners, 4 edge center, the
center. This returns one of them, along the given direction.
::
sample = Arc(start_angle=PI/7, angle = PI/5)
# These are all equivalent
max_y_1 = sample.get_top()[1]
max_y_2 = sample.get_critical_point(UP)[1]
max_y_3 = sample.get_extremum_along_dim(dim=1, key=1)
"""
result = np.zeros(self.dim)
all_points = self.get_points_defining_boundary()
if len(all_points) == 0:
return result
for dim in range(self.dim):
result[dim] = self.get_extremum_along_dim(
all_points, dim=dim, key=direction[dim]
)
return result
# Pseudonyms for more general get_critical_point method
def get_edge_center(self, direction) -> np.ndarray:
"""Get edge coordinates for certain direction."""
return self.get_critical_point(direction)
def get_corner(self, direction) -> np.ndarray:
"""Get corner coordinates for certain direction."""
return self.get_critical_point(direction)
def get_center(self) -> np.ndarray:
"""Get center coordinates"""
return self.get_critical_point(np.zeros(self.dim))
def get_center_of_mass(self):
return np.apply_along_axis(np.mean, 0, self.get_all_points())
def get_boundary_point(self, direction):
all_points = self.get_points_defining_boundary()
index = np.argmax(np.dot(all_points, np.array(direction).T))
return all_points[index]
def get_midpoint(self) -> np.ndarray:
"""Get coordinates of the middle of the path that forms the :class:`~.Mobject`.
Examples
--------
.. manim:: AngleMidPoint
:save_last_frame:
class AngleMidPoint(Scene):
def construct(self):
line1 = Line(ORIGIN, 2*RIGHT)
line2 = Line(ORIGIN, 2*RIGHT).rotate_about_origin(80*DEGREES)
a = Angle(line1, line2, radius=1.5, other_angle=False)
d = Dot(a.get_midpoint()).set_color(RED)
self.add(line1, line2, a, d)
self.wait()
"""
return self.point_from_proportion(0.5)
def get_top(self) -> np.ndarray:
"""Get top coordinates of a box bounding the :class:`~.Mobject`"""
return self.get_edge_center(UP)
def get_bottom(self) -> np.ndarray:
"""Get bottom coordinates of a box bounding the :class:`~.Mobject`"""
return self.get_edge_center(DOWN)
def get_right(self) -> np.ndarray:
"""Get right coordinates of a box bounding the :class:`~.Mobject`"""
return self.get_edge_center(RIGHT)
def get_left(self) -> np.ndarray:
"""Get left coordinates of a box bounding the :class:`~.Mobject`"""
return self.get_edge_center(LEFT)
def get_zenith(self) -> np.ndarray:
"""Get zenith coordinates of a box bounding a 3D :class:`~.Mobject`."""
return self.get_edge_center(OUT)
def get_nadir(self) -> np.ndarray:
"""Get nadir (opposite the zenith) coordinates of a box bounding a 3D :class:`~.Mobject`."""
return self.get_edge_center(IN)
def length_over_dim(self, dim):
"""Measure the length of an :class:`~.Mobject` in a certain direction."""
return self.reduce_across_dimension(
np.max, np.max, dim
) - self.reduce_across_dimension(np.min, np.min, dim)
def get_coord(self, dim, direction=ORIGIN):
"""Meant to generalize ``get_x``, ``get_y`` and ``get_z``"""
return self.get_extremum_along_dim(dim=dim, key=direction[dim])
def get_x(self, direction=ORIGIN) -> np.float64:
"""Returns x coordinate of the center of the :class:`~.Mobject` as ``float``"""
return self.get_coord(0, direction)
def get_y(self, direction=ORIGIN) -> np.float64:
"""Returns y coordinate of the center of the :class:`~.Mobject` as ``float``"""
return self.get_coord(1, direction)
def get_z(self, direction=ORIGIN) -> np.float64:
"""Returns z coordinate of the center of the :class:`~.Mobject` as ``float``"""
return self.get_coord(2, direction)
def get_start(self):
"""Returns the point, where the stroke that surrounds the :class:`~.Mobject` starts."""
self.throw_error_if_no_points()
return np.array(self.points[0])
def get_end(self):
"""Returns the point, where the stroke that surrounds the :class:`~.Mobject` ends."""
self.throw_error_if_no_points()
return np.array(self.points[-1])
def get_start_and_end(self):
"""Returns starting and ending point of a stroke as a ``tuple``."""
return self.get_start(), self.get_end()
def point_from_proportion(self, alpha):
raise NotImplementedError("Please override in a child class.")
def proportion_from_point(self, point):
raise NotImplementedError("Please override in a child class.")
def get_pieces(self, n_pieces):
template = self.copy()
template.submobjects = []
alphas = np.linspace(0, 1, n_pieces + 1)
return Group(
*(
template.copy().pointwise_become_partial(self, a1, a2)
for a1, a2 in zip(alphas[:-1], alphas[1:])
)
)
def get_z_index_reference_point(self):
# TODO, better place to define default z_index_group?
z_index_group = getattr(self, "z_index_group", self)
return z_index_group.get_center()
def has_points(self) -> bool:
"""Check if :class:`~.Mobject` contains points."""
return len(self.points) > 0
def has_no_points(self) -> bool:
"""Check if :class:`~.Mobject` *does not* contains points."""
return not self.has_points()
# Match other mobject properties
def match_color(self, mobject: "Mobject"):
"""Match the color with the color of another :class:`~.Mobject`."""
return self.set_color(mobject.get_color())
def match_dim_size(self, mobject: "Mobject", dim, **kwargs):
"""Match the specified dimension with the dimension of another :class:`~.Mobject`."""
return self.rescale_to_fit(mobject.length_over_dim(dim), dim, **kwargs)
def match_width(self, mobject: "Mobject", **kwargs):
"""Match the width with the width of another :class:`~.Mobject`."""
return self.match_dim_size(mobject, 0, **kwargs)
def match_height(self, mobject: "Mobject", **kwargs):
"""Match the height with the height of another :class:`~.Mobject`."""
return self.match_dim_size(mobject, 1, **kwargs)
def match_depth(self, mobject: "Mobject", **kwargs):
"""Match the depth with the depth of another :class:`~.Mobject`."""
return self.match_dim_size(mobject, 2, **kwargs)
def match_coord(self, mobject: "Mobject", dim, direction=ORIGIN):
"""Match the coordinates with the coordinates of another :class:`~.Mobject`."""
return self.set_coord(
mobject.get_coord(dim, direction),
dim=dim,
direction=direction,
)
def match_x(self, mobject: "Mobject", direction=ORIGIN):
"""Match x coord. to the x coord. of another :class:`~.Mobject`."""
return self.match_coord(mobject, 0, direction)
def match_y(self, mobject: "Mobject", direction=ORIGIN):
"""Match y coord. to the x coord. of another :class:`~.Mobject`."""
return self.match_coord(mobject, 1, direction)
def match_z(self, mobject: "Mobject", direction=ORIGIN):
"""Match z coord. to the x coord. of another :class:`~.Mobject`."""
return self.match_coord(mobject, 2, direction)
def align_to(
self,
mobject_or_point: Union["Mobject", np.ndarray, List],
direction=ORIGIN,
alignment_vect=UP,
):
"""Aligns mobject to another :class:`~.Mobject` in a certain direction.
Examples:
mob1.align_to(mob2, UP) moves mob1 vertically so that its
top edge lines ups with mob2's top edge.
mob1.align_to(mob2, alignment_vect = RIGHT) moves mob1
horizontally so that it's center is directly above/below
the center of mob2
"""
if isinstance(mobject_or_point, Mobject):
point = mobject_or_point.get_critical_point(direction)
else:
point = mobject_or_point
for dim in range(self.dim):
if direction[dim] != 0:
self.set_coord(point[dim], dim, direction)
return self
# Family matters
def __getitem__(self, value):
self_list = self.split()
if isinstance(value, slice):
GroupClass = self.get_group_class()
return GroupClass(*self_list.__getitem__(value))
return self_list.__getitem__(value)
def __iter__(self):
return iter(self.split())
def __len__(self):
return len(self.split())
def get_group_class(self):
return Group
def split(self):
result = [self] if len(self.points) > 0 else []
return result + self.submobjects
def get_family(self, recurse=True):
sub_families = list(map(Mobject.get_family, self.submobjects))
all_mobjects = [self] + list(it.chain(*sub_families))
return remove_list_redundancies(all_mobjects)
def family_members_with_points(self):
return [m for m in self.get_family() if m.get_num_points() > 0]
def arrange(
self,
direction: Sequence[float] = RIGHT,
buff=DEFAULT_MOBJECT_TO_MOBJECT_BUFFER,
center=True,
**kwargs,
):
"""Sorts :class:`~.Mobject` next to each other on screen.
Examples
--------
.. manim:: Example
:save_last_frame:
class Example(Scene):
def construct(self):
s1 = Square()
s2 = Square()
s3 = Square()
s4 = Square()
x = VGroup(s1, s2, s3, s4).set_x(0).arrange(buff=1.0)
self.add(x)
"""
for m1, m2 in zip(self.submobjects, self.submobjects[1:]):
m2.next_to(m1, direction, buff, **kwargs)
if center:
self.center()
return self
def arrange_in_grid(
self,
rows: Optional[int] = None,
cols: Optional[int] = None,
buff: Union[float, Tuple[float, float]] = MED_SMALL_BUFF,
cell_alignment: np.ndarray = ORIGIN,
row_alignments: Optional[str] = None, # "ucd"
col_alignments: Optional[str] = None, # "lcr"
row_heights: Optional[Iterable[Optional[float]]] = None,
col_widths: Optional[Iterable[Optional[float]]] = None,
flow_order: str = "rd",
**kwargs,
) -> "Mobject":
"""Arrange submobjects in a grid.
Parameters
----------
rows
The number of rows in the grid.
cols
The number of columns in the grid.
buff
The gap between grid cells. To specify a different buffer in the horizontal and
vertical directions, a tuple of two values can be given - ``(row, col)``.
cell_alignment
The way each submobject is aligned in its grid cell.
row_alignments
The vertical alignment for each row (top to bottom). Accepts the following characters: ``"u"`` -
up, ``"c"`` - center, ``"d"`` - down.
col_alignments
The horizontal alignment for each column (left to right). Accepts the following characters ``"l"`` - left,
``"c"`` - center, ``"r"`` - right.
row_heights
Defines a list of heights for certain rows (top to bottom). If the list contains
``None``, the corresponding row will fit its height automatically based
on the highest element in that row.
col_widths
Defines a list of widths for certain columns (left to right). If the list contains ``None``, the
corresponding column will fit its width automatically based on the widest element in that column.
flow_order
The order in which submobjects fill the grid. Can be one of the following values:
"rd", "dr", "ld", "dl", "ru", "ur", "lu", "ul". ("rd" -> fill rightwards then downwards)
Returns
-------
Mobject
The mobject.
Raises
------
ValueError
If ``rows`` and ``cols`` are too small to fit all submobjects.
ValueError
If :code:`cols`, :code:`col_alignments` and :code:`col_widths` or :code:`rows`,
:code:`row_alignments` and :code:`row_heights` have mismatching sizes.
Notes
-----
If only one of ``cols`` and ``rows`` is set implicitly, the other one will be chosen big
enough to fit all submobjects. If neither is set, they will be chosen to be about the same,
tending towards ``cols`` > ``rows`` (simply because videos are wider than they are high).
If both ``cell_alignment`` and ``row_alignments`` / ``col_alignments`` are
defined, the latter has higher priority.
Examples
--------
.. manim:: ExampleBoxes
:save_last_frame:
class ExampleBoxes(Scene):
def construct(self):
boxes=VGroup(*[Square() for s in range(0,6)])
boxes.arrange_in_grid(rows=2, buff=0.1)
self.add(boxes)
.. manim:: ArrangeInGrid
:save_last_frame:
class ArrangeInGrid(Scene):
def construct(self):
#Add some numbered boxes:
np.random.seed(3)
boxes = VGroup(*[
Rectangle(WHITE, np.random.random()+.5, np.random.random()+.5).add(Text(str(i+1)).scale(0.5))
for i in range(22)
])
self.add(boxes)
boxes.arrange_in_grid(
buff=(0.25,0.5),
col_alignments="lccccr",
row_alignments="uccd",
col_widths=[2, *[None]*4, 2],
flow_order="dr"
)
"""
from .geometry import Line
mobs = self.submobjects.copy()
start_pos = self.get_center()
# get cols / rows values if given (implicitly)
def init_size(num, alignments, sizes):
if num is not None:
return num
if alignments is not None:
return len(alignments)
if sizes is not None:
return len(sizes)
cols = init_size(cols, col_alignments, col_widths)
rows = init_size(rows, row_alignments, row_heights)
# calculate rows cols
if rows is None and cols is None:
cols = ceil(np.sqrt(len(mobs)))
# make the grid as close to quadratic as possible.
# choosing cols first can results in cols>rows.
# This is favored over rows>cols since in general
# the sceene is wider than high.
if rows is None:
rows = ceil(len(mobs) / cols)
if cols is None:
cols = ceil(len(mobs) / rows)
if rows * cols < len(mobs):
raise ValueError("Too few rows and columns to fit all submobjetcs.")
# rows and cols are now finally valid.
if isinstance(buff, tuple):
buff_x = buff[0]
buff_y = buff[1]
else:
buff_x = buff_y = buff
# Initialize alignments correctly
def init_alignments(alignments, num, mapping, name, dir):
if alignments is None:
# Use cell_alignment as fallback
return [cell_alignment * dir] * num
if len(alignments) != num:
raise ValueError(f"{name}_alignments has a mismatching size.")
alignments = list(alignments)
for i in range(num):
alignments[i] = mapping[alignments[i]]
return alignments
row_alignments = init_alignments(
row_alignments, rows, {"u": UP, "c": ORIGIN, "d": DOWN}, "row", RIGHT
)
col_alignments = init_alignments(
col_alignments, cols, {"l": LEFT, "c": ORIGIN, "r": RIGHT}, "col", UP
)
# Now row_alignment[r] + col_alignment[c] is the alignment in cell [r][c]
mapper = {
"dr": lambda r, c: (rows - r - 1) + c * rows,
"dl": lambda r, c: (rows - r - 1) + (cols - c - 1) * rows,
"ur": lambda r, c: r + c * rows,
"ul": lambda r, c: r + (cols - c - 1) * rows,
"rd": lambda r, c: (rows - r - 1) * cols + c,
"ld": lambda r, c: (rows - r - 1) * cols + (cols - c - 1),
"ru": lambda r, c: r * cols + c,
"lu": lambda r, c: r * cols + (cols - c - 1),
}
if flow_order not in mapper:
raise ValueError(
'flow_order must be one of the following values: "dr", "rd", "ld" "dl", "ru", "ur", "lu", "ul".'
)
flow_order = mapper[flow_order]
# Reverse row_alignments and row_heights. Necessary since the
# grid filling is handled bottom up for simplicity reasons.
def reverse(maybe_list):
if maybe_list is not None:
maybe_list = list(maybe_list)
maybe_list.reverse()
return maybe_list
row_alignments = reverse(row_alignments)
row_heights = reverse(row_heights)
placeholder = Mobject()
# Used to fill up the grid temporarily, doesn't get added to the scene.
# In this case a Mobject is better than None since it has width and height
# properties of 0.
mobs.extend([placeholder] * (rows * cols - len(mobs)))
grid = [[mobs[flow_order(r, c)] for c in range(cols)] for r in range(rows)]
measured_heigths = [
max(grid[r][c].height for c in range(cols)) for r in range(rows)
]
measured_widths = [
max(grid[r][c].width for r in range(rows)) for c in range(cols)
]
# Initialize row_heights / col_widths correctly using measurements as fallback
def init_sizes(sizes, num, measures, name):
if sizes is None:
sizes = [None] * num
if len(sizes) != num:
raise ValueError(f"{name} has a mismatching size.")
return [
sizes[i] if sizes[i] is not None else measures[i] for i in range(num)
]
heights = init_sizes(row_heights, rows, measured_heigths, "row_heights")
widths = init_sizes(col_widths, cols, measured_widths, "col_widths")
x, y = 0, 0
for r in range(rows):
x = 0
for c in range(cols):
if grid[r][c] is not placeholder:
alignment = row_alignments[r] + col_alignments[c]
line = Line(
x * RIGHT + y * UP,
(x + widths[c]) * RIGHT + (y + heights[r]) * UP,
)
# Use a mobject to avoid rewriting align inside
# box code that Mobject.move_to(Mobject) already
# includes.
grid[r][c].move_to(line, alignment)
x += widths[c] + buff_x
y += heights[r] + buff_y
self.move_to(start_pos)
return self
def sort(self, point_to_num_func=lambda p: p[0], submob_func=None):
"""Sorts the list of :attr:`submobjects` by a function defined by ``submob_func``."""
if submob_func is None:
def submob_func(m):
return point_to_num_func(m.get_center())
self.submobjects.sort(key=submob_func)
return self
def shuffle(self, recursive=False):
"""Shuffles the list of :attr:`submobjects`."""
if recursive:
for submob in self.submobjects:
submob.shuffle(recursive=True)
random.shuffle(self.submobjects)
def invert(self, recursive=False):
"""Inverts the list of :attr:`submobjects`.
Parameters
----------
recursive
If ``True``, all submobject lists of this mobject's family are inverted.
Examples
--------
.. manim:: InvertSumobjectsExample
class InvertSumobjectsExample(Scene):
def construct(self):
s = VGroup(*[Dot().shift(i*0.1*RIGHT) for i in range(-20,20)])
s2 = s.copy()
s2.invert()
s2.shift(DOWN)
self.play(Write(s), Write(s2))
"""
if recursive:
for submob in self.submobjects:
submob.invert(recursive=True)
list.reverse(self.submobjects)
# Just here to keep from breaking old scenes.
def arrange_submobjects(self, *args, **kwargs):
"""Arrange the position of :attr:`submobjects` with a small buffer.
Examples
--------
.. manim:: ArrangeSumobjectsExample
:save_last_frame:
class ArrangeSumobjectsExample(Scene):
def construct(self):
s= VGroup(*[Dot().shift(i*0.1*RIGHT*np.random.uniform(-1,1)+UP*np.random.uniform(-1,1)) for i in range(0,15)])
s.shift(UP).set_color(BLUE)
s2= s.copy().set_color(RED)
s2.arrange_submobjects()
s2.shift(DOWN)
self.add(s,s2)
"""
return self.arrange(*args, **kwargs)
def sort_submobjects(self, *args, **kwargs):
"""Sort the :attr:`submobjects`"""
return self.sort(*args, **kwargs)
def shuffle_submobjects(self, *args, **kwargs):
"""Shuffles the order of :attr:`submobjects`
Examples
--------
.. manim:: ShuffleSubmobjectsExample
class ShuffleSubmobjectsExample(Scene):
def construct(self):
s= VGroup(*[Dot().shift(i*0.1*RIGHT) for i in range(-20,20)])
s2= s.copy()
s2.shuffle_submobjects()
s2.shift(DOWN)
self.play(Write(s), Write(s2))
"""
return self.shuffle(*args, **kwargs)
# Alignment
def align_data(self, mobject: "Mobject"):
self.null_point_align(mobject)
self.align_submobjects(mobject)
self.align_points(mobject)
# Recurse
for m1, m2 in zip(self.submobjects, mobject.submobjects):
m1.align_data(m2)
def get_point_mobject(self, center=None):
"""The simplest :class:`~.Mobject` to be transformed to or from self.
Should by a point of the appropriate type
"""
msg = f"get_point_mobject not implemented for {self.__class__.__name__}"
raise NotImplementedError(msg)
def align_points(self, mobject):
count1 = self.get_num_points()
count2 = mobject.get_num_points()
if count1 < count2:
self.align_points_with_larger(mobject)
elif count2 < count1:
mobject.align_points_with_larger(self)
return self
def align_points_with_larger(self, larger_mobject):
raise NotImplementedError("Please override in a child class.")
def align_submobjects(self, mobject):
mob1 = self
mob2 = mobject
n1 = len(mob1.submobjects)
n2 = len(mob2.submobjects)
mob1.add_n_more_submobjects(max(0, n2 - n1))
mob2.add_n_more_submobjects(max(0, n1 - n2))
return self
def null_point_align(self, mobject: "Mobject") -> "Mobject":
"""If a :class:`~.Mobject` with points is being aligned to
one without, treat both as groups, and push
the one with points into its own submobjects
list.
"""
for m1, m2 in (self, mobject), (mobject, self):
if m1.has_no_points() and m2.has_points():
m2.push_self_into_submobjects()
return self
def push_self_into_submobjects(self):
copy = self.copy()
copy.submobjects = []
self.reset_points()
self.add(copy)
return self
def add_n_more_submobjects(self, n):
if n == 0:
return
curr = len(self.submobjects)
if curr == 0:
# If empty, simply add n point mobjects
self.submobjects = [self.get_point_mobject() for k in range(n)]
return
target = curr + n
# TODO, factor this out to utils so as to reuse
# with VMobject.insert_n_curves
repeat_indices = (np.arange(target) * curr) // target
split_factors = [sum(repeat_indices == i) for i in range(curr)]
new_submobs = []
for submob, sf in zip(self.submobjects, split_factors):
new_submobs.append(submob)
for _ in range(1, sf):
new_submobs.append(submob.copy().fade(1))
self.submobjects = new_submobs
return self
def repeat_submobject(self, submob):
return submob.copy()
def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):
"""Turns this :class:`~.Mobject` into an interpolation between ``mobject1``
and ``mobject2``.
Examples
--------
.. manim:: DotInterpolation
:save_last_frame:
class DotInterpolation(Scene):
def construct(self):
dotL = Dot(color=DARK_GREY)
dotL.shift(2 * RIGHT)
dotR = Dot(color=WHITE)
dotR.shift(2 * LEFT)
dotMiddle = VMobject().interpolate(dotL, dotR, alpha=0.3)
self.add(dotL, dotR, dotMiddle)
"""
self.points = path_func(mobject1.points, mobject2.points, alpha)
self.interpolate_color(mobject1, mobject2, alpha)
return self
def interpolate_color(self, mobject1, mobject2, alpha):
raise NotImplementedError("Please override in a child class.")
def become(self, mobject: "Mobject", copy_submobjects: bool = True):
"""Edit points, colors and submobjects to be identical
to another :class:`~.Mobject`
Examples
--------
.. manim:: BecomeScene
class BecomeScene(Scene):
def construct(self):
circ = Circle(fill_color=RED, fill_opacity=0.8)
square = Square(fill_color=BLUE, fill_opacity=0.2)
self.add(circ)
self.wait(0.5)
circ.become(square)
self.wait(0.5)
"""
self.align_data(mobject)
for sm1, sm2 in zip(self.get_family(), mobject.get_family()):
sm1.points = np.array(sm2.points)
sm1.interpolate_color(sm1, sm2, 1)
return self
def match_points(self, mobject: "Mobject", copy_submobjects: bool = True):
"""Edit points, positions, and submobjects to be identical
to another :class:`~.Mobject`, while keeping the style unchanged.
Examples
--------
.. manim:: MatchPointsScene
class MatchPointsScene(Scene):
def construct(self):
circ = Circle(fill_color=RED, fill_opacity=0.8)
square = Square(fill_color=BLUE, fill_opacity=0.2)
self.add(circ)
self.wait(0.5)
self.play(circ.animate.match_points(square))
self.wait(0.5)
"""
self.align_data(mobject)
for sm1, sm2 in zip(self.get_family(), mobject.get_family()):
sm1.points = np.array(sm2.points)
return self
# Errors
def throw_error_if_no_points(self):
if self.has_no_points():
caller_name = sys._getframe(1).f_code.co_name
raise Exception(
f"Cannot call Mobject.{caller_name} for a Mobject with no points"
)
# About z-index
def set_z_index(
self,
z_index_value: float,
family: bool = True,
) -> "VMobject":
"""Sets the :class:`~.Mobject`'s :attr:`z_index` to the value specified in `z_index_value`.
Parameters
----------
z_index_value
The new value of :attr:`z_index` set.
family
If ``True``, the :attr:`z_index` value of all submobjects is also set.
Returns
-------
:class:`Mobject`
The Mobject itself, after :attr:`z_index` is set. For chaining purposes. (Returns `self`.)
Examples
--------
.. manim:: SetZIndex
:save_last_frame:
class SetZIndex(Scene):
def construct(self):
text = Text('z_index = 3', color = PURE_RED).shift(UP).set_z_index(3)
square = Square(2, fill_opacity=1).set_z_index(2)
tex = Tex(r'zIndex = 1', color = PURE_BLUE).shift(DOWN).set_z_index(1)
circle = Circle(radius = 1.7, color = GREEN, fill_opacity = 1) # z_index = 0
# Displaying order is now defined by z_index values
self.add(text)
self.add(square)
self.add(tex)
self.add(circle)
"""
if family:
for submob in self.submobjects:
submob.set_z_index(z_index_value, family=family)
self.z_index = z_index_value
return self
def set_z_index_by_z_coordinate(self):
"""Sets the :class:`~.Mobject`'s z coordinate to the value of :attr:`z_index`.
Returns
-------
:class:`Mobject`
The Mobject itself, after :attr:`z_index` is set. (Returns `self`.)
"""
z_coord = self.get_center()[-1]
self.set_z_index(z_coord)
return self
class Group(Mobject, metaclass=ConvertToOpenGL):
"""Groups together multiple :class:`Mobjects <.Mobject>`."""
def __init__(self, *mobjects, **kwargs):
super().__init__(**kwargs)
self.add(*mobjects)
class _AnimationBuilder:
def __init__(self, mobject):
self.mobject = mobject
self.mobject.generate_target()
self.overridden_animation = None
self.is_chaining = False
self.methods = []
# Whether animation args can be passed
self.cannot_pass_args = False
self.anim_args = {}
def __call__(self, **kwargs):
if self.cannot_pass_args:
raise ValueError(
"Animation arguments must be passed before accessing methods and can only be passed once"
)
self.anim_args = kwargs
self.cannot_pass_args = True
return self
def __getattr__(self, method_name):
method = getattr(self.mobject.target, method_name)
self.methods.append(method)
has_overridden_animation = hasattr(method, "_override_animate")
if (self.is_chaining and has_overridden_animation) or self.overridden_animation:
raise NotImplementedError(
"Method chaining is currently not supported for "
"overridden animations"
)
def update_target(*method_args, **method_kwargs):
if has_overridden_animation:
self.overridden_animation = method._override_animate(
self.mobject,
*method_args,
anim_args=self.anim_args,
**method_kwargs,
)
else:
method(*method_args, **method_kwargs)
return self
self.is_chaining = True
self.cannot_pass_args = True
return update_target
def build(self):
from ..animation.transform import _MethodAnimation
if self.overridden_animation:
anim = self.overridden_animation
else:
anim = _MethodAnimation(self.mobject, self.methods)
for attr, value in self.anim_args.items():
setattr(anim, attr, value)
return anim
def override_animate(method):
r"""Decorator for overriding method animations.
This allows to specify a method (returning an :class:`~.Animation`)
which is called when the decorated method is used with the ``.animate`` syntax
for animating the application of a method.
.. seealso::
:attr:`Mobject.animate`
.. note::
Overridden methods cannot be combined with normal or other overridden
methods using method chaining with the ``.animate`` syntax.
Examples
--------
.. manim:: AnimationOverrideExample
class CircleWithContent(VGroup):
def __init__(self, content):
super().__init__()
self.circle = Circle()
self.content = content
self.add(self.circle, content)
content.move_to(self.circle.get_center())
def clear_content(self):
self.remove(self.content)
self.content = None
@override_animate(clear_content)
def _clear_content_animation(self, anim_args=None):
if anim_args is None:
anim_args = {}
anim = Uncreate(self.content, **anim_args)
self.clear_content()
return anim
class AnimationOverrideExample(Scene):
def construct(self):
t = Text("hello!")
my_mobject = CircleWithContent(t)
self.play(Create(my_mobject))
self.play(my_mobject.animate.clear_content())
self.wait()
"""
def decorator(animation_method):
method._override_animate = animation_method
return animation_method
return decorator
|
py | b411965a03e4d121edc6e90f717af7d710024935 | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from matplotlib import pyplot as plt
def plot_with_wrapping(
x,
y,
ax=None,
low=0.0,
high=360.0,
linestyle="-",
marker=None,
color=None,
label=None,
rasterized=False,
):
"""Plot a line on an axis that deals with angle wrapping. Normally, using
pyplot will blindly connects points that wrap around 360 degrees, e.g.,
going from 357 degrees to 2 degrees. This will create a strong vertical
line through the plot that connects the two points, while actually these
two coordinates should be connecting by going through 360/0 deg (i.e.,
going up from 357 deg to 360 deg and then reappearing at 0 deg, and
connecting to 2 deg). This function automates this process and ensures
angles are wrapped around 0/360 deg appropriately.
Args:
x (iteratible): NumPy array or list containing indices/time stamps of
the data.
y (iteratible): NumPy array containing direction/angle data that
requires 360 deg angle wrapping. These are typically wind directions
or nacelle headings.
ax (plt.Axis, optional): Axis object of the matplotlib.pyplot class.
The line will be plotted on this axis. If None specified, will create
a figure and axis, and plot the line in there. Defaults to None.
low (float, optional): Lower limit at which the angles should be
wrapped. When using degrees, this should be typically 0.0 deg for wind
directions and nacelle directions (i.e., compass directions). When using
vane signals, this is typically -180.0 deg. When using radians,
this should be the equivalent in radians (e.g., 0 or - np.pi).
Defaults to 0.0.
high (float, optional): Upper limit at which the angles should be
wrapped. When using degrees, this should be 360.0 deg for wind
directions and nacelle directions (i.e., compass directions).
When using radians, this should be the equivalent in radians.
Defaults to 360.0.
linestyle (str, optional): Linestyle for the plot. Defaults to "-".
marker (str, optional): Marker style for the plot. If None is
specified, will not use markers. Defaults to None.
color (str, optional): Color of the line and markers. Defaults to
"black".
label (string, optional): Label for the line and markers. If None is
specified, will not label the line. Defaults to None.
rasterize (bool, optional): Rasterize the plot/line and thereby remove
its vectorized properties. This can help reduce the size of a .pdf or
.eps file when this figure is saved, at the cost of a potential
reduction in image quality.
Returns:
ax: Axis object of the matplotlib.pyplot class on which the line (and
optionally markers) are plotted.
"""
# Create figure, if not provided
if ax is None:
fig, ax = plt.subplots()
if color is None:
# Use matplotlib's internal color cycler
color = ax._get_lines.prop_cycler.__next__()['color']
if (low >= high):
raise UserWarning("'low' must be lower than 'high'.")
# Format inputs to numpy arrays
x = np.array(x, copy=True)
y = np.array(y, copy=True, dtype=float) - low # Normalize at 0
high_norm = high - low
y = np.remainder(y, high_norm)
# Initialize empty arrays
xw = np.array(x, copy=True)[0:0]
yw = np.array(y, copy=True)[0:0]
# Deal with wrapping
id_wrap_array = np.where(np.abs(np.diff(y)) > high_norm / 2.0)[0]
id_min = 0
for id_wrap in id_wrap_array:
# Step size in x direction
dx = x[id_wrap+1] - x[id_wrap]
# Wrap around 0 deg
if np.diff(y)[id_wrap] > high_norm / 2.0:
dy = y[id_wrap] - y[id_wrap + 1] + high_norm
xtp = x[id_wrap] + dx * (y[id_wrap]) / dy # transition point
xw = np.hstack([xw, x[id_min:id_wrap + 1], xtp - 0.001 * dx, xtp, xtp + 0.001 * dx])
yw = np.hstack([yw, y[id_min:id_wrap + 1], 0.0, np.nan, high_norm])
# Wrap around 360 deg
elif np.diff(y)[id_wrap] < - high_norm / 2.0:
dy = y[id_wrap+1] - y[id_wrap] + high_norm
xtp = x[id_wrap] + dx * (high_norm - y[id_wrap]) / dy # transition point
xw = np.hstack([xw, x[id_min:id_wrap + 1], xtp - 0.001 * dx, xtp, xtp + 0.001 * dx])
yw = np.hstack([yw, y[id_min:id_wrap + 1], high_norm, np.nan, 0.0])
id_min = id_wrap + 1
# Append remaining data
xw = np.hstack([xw, x[id_min::]])
yw = np.hstack([yw, y[id_min::]])
# Reintroduce offset from 'low'
yw = yw + low
y = y + low
# Now plot lines, without markers
if (marker is None):
# Plot without marker, but with label
ax.plot(xw, yw, linestyle=linestyle, color=color, label=label, rasterized=rasterized)
else:
# Plot lines, without markers
ax.plot(xw, yw, linestyle=linestyle, color=color, rasterized=rasterized)
# Now plot markers, only at non-transition points
ax.scatter(x, y, marker=marker, color=color, rasterized=rasterized)
# Now add a placeholder (empty) line with right marker for the legend
if label is not None:
ax.plot(
xw[0:0],
yw[0:0],
linestyle=linestyle,
marker=marker,
label=label,
color=color,
rasterized=rasterized
)
return ax
def plot_floris_layout(fi, turbine_names=None, plot_terrain=True):
"""Plot the wind farm layout and turbine performance curves for the
floris object of interest. This visualization function includes some
useful checks such as checking which turbine curves are identical,
and then plot those accordingly. It also includes the plotting of
different hub heights through a background colormap.
Args:
fi (FlorisInterface): The FLORIS object
turbine_names (iteratible, optional): List of turbine names, with
each entry being a string. It is recommended that this is something
like one or two letters, and then a number to indicate the turbine.
For example, A01, A02, A03, ... If None is specified, will assume
turbine names T01, T02, T03, .... Defaults to None.
plot_terrain (bool, optional): Plot the terrain as a colormap.
Defaults to True.
Returns:
_type_: _description_
"""
# Plot turbine configurations
fig = plt.figure(figsize=(16, 8))
if turbine_names is None:
nturbs = len(fi.layout_x)
turbine_names = ["T{:02d}".format(ti) for ti in range(nturbs)]
plt.subplot(1, 2, 1)
ax = [None, None, None]
ax[0] = plt.gca()
hub_heights = fi.floris.farm.hub_heights.flatten()
if plot_terrain:
cntr = ax[0].tricontourf(
fi.layout_x,
fi.layout_y,
hub_heights,
levels=14,
cmap="RdBu_r"
)
fig.colorbar(
cntr,
ax=ax[0],
label='Terrain-corrected hub height (m)',
ticks=np.linspace(
np.min(hub_heights) - 10.0,
np.max(hub_heights) + 10.0,
15,
)
)
turbine_types = (
[t["turbine_type"] for t in fi.floris.farm.turbine_definitions]
)
turbine_types = np.array(turbine_types, dtype="str")
for tt in np.unique(turbine_types):
ids = (turbine_types == tt)
ax[0].plot(fi.layout_x[ids], fi.layout_y[ids], "o", label=tt)
# Plot turbine names and hub heights
for ti in range(len(fi.layout_x)):
ax[0].text(
fi.layout_x[ti],
fi.layout_y[ti],
turbine_names[ti] + " ({:.1f} m)".format(hub_heights[ti])
)
ax[0].axis("equal")
ax[0].legend()
ax[0].grid(True)
ax[0].set_xlabel("x coordinate (m)")
ax[0].set_ylabel("y coordinate (m)")
ax[0].set_title("Farm layout")
# Plot turbine power and thrust curves
plt.subplot(2, 2, 2)
ax[1] = plt.gca()
plt.subplot(2, 2, 4)
ax[2] = plt.gca()
# Identify unique power-thrust curves and group turbines accordingly
for ti in range(len(fi.layout_x)):
pt = fi.floris.farm.turbine_definitions[ti]["power_thrust_table"]
if ti == 0:
unique_pt = [pt]
unique_turbines = [[ti]]
continue
# Check if power-thrust curve already exists somewhere
is_unique = True
for tii in range(len(unique_pt)):
if (unique_pt[tii] == pt):
unique_turbines[tii].append(ti)
is_unique = False
continue
# If not, append as new entry
if is_unique:
unique_pt.append(pt)
unique_turbines.append([ti])
for tii, pt in enumerate(unique_pt):
# Convert a very long string of turbine identifiers to ranges,
# e.g., from "A01, A02, A03, A04" to "A01-A04"
labels = [turbine_names[i] for i in unique_turbines[tii]]
prev_turb_in_list = np.zeros(len(labels), dtype=bool)
next_turb_in_list = np.zeros(len(labels), dtype=bool)
for ii, lb in enumerate(labels):
# Split initial string from sequence of texts
idx = 0
while lb[0:idx+1].isalpha():
idx += 1
# Now check various choices of numbers, i.e., A001, A01, A1
turb_prev_if_range = [
lb[0:idx] + "{:01d}".format(int(lb[idx::]) - 1),
lb[0:idx] + "{:02d}".format(int(lb[idx::]) - 1),
lb[0:idx] + "{:03d}".format(int(lb[idx::]) - 1)
]
turb_next_if_range = [
lb[0:idx] + "{:01d}".format(int(lb[idx::]) + 1),
lb[0:idx] + "{:02d}".format(int(lb[idx::]) + 1),
lb[0:idx] + "{:03d}".format(int(lb[idx::]) + 1)
]
prev_turb_in_list[ii] = np.any([t in labels for t in turb_prev_if_range])
next_turb_in_list[ii] = np.any([t in labels for t in turb_next_if_range])
# Remove label for turbines in the middle of ranges
for id in np.where(prev_turb_in_list & next_turb_in_list)[0]:
labels[id] = ""
# Append a dash to labels for turbines at the start of a range
for id in np.where(~prev_turb_in_list & next_turb_in_list)[0]:
labels[id] += "-"
# Append a comma to turbines at the end of a range
for id in np.where(~next_turb_in_list)[0]:
labels[id] += ","
# Now join all strings to a single label and remove last comma
label = "".join(labels)[0:-1]
# Plot power and thrust curves for groups of turbines
tn = fi.floris.farm.turbine_definitions[unique_turbines[tii][0]]["turbine_type"]
ax[1].plot(pt["wind_speed"], pt["power"], label=label + " ({:s})".format(tn))
ax[2].plot(pt["wind_speed"], pt["thrust"], label=label + " ({:s})".format(tn))
ax[1].set_xlabel("Wind speed (m/s)")
ax[2].set_xlabel("Wind speed (m/s)")
ax[1].set_ylabel("Power coefficient (-)")
ax[2].set_ylabel("Thrust coefficient (-)")
ax[1].grid(True)
ax[2].grid(True)
ax[1].legend()
ax[2].legend()
return fig, ax
|
py | b4119709b4d58dcf56d30db87e288d2b54ee112e | """RQ Dashboard Flask Blueprint.
Uses the standard Flask configuration mechanism e.g. to set the connection
parameters to REDIS. To keep the documentation and defaults all in once place
the default settings must be loaded from ``choreo.dashboard.default_settings``
e.g. as done in ``cli.py``.
RQ Dashboard does not contain any built-in authentication mechanism because
1. it is the responsbility of the wider hosting app rather than a
particular blueprint, and
2. there are numerous ways of adding security orthogonally.
As a quick-and-dirty convenience, the command line invocation in ``cli.py``
provides the option to require HTTP Basic Auth in a few lines of code.
"""
from functools import wraps
from math import ceil
import arrow
from redis import Redis, from_url
from redis.sentinel import Sentinel
from choreo.multirq import (Queue, Worker, cancel_job, get_failed_queue, pop_connection,
push_connection, requeue_job)
from choreo.multirq.job import Job
from six import string_types
from flask import Blueprint, current_app, render_template, url_for
blueprint = Blueprint(
'choreo.dashboard',
__name__,
template_folder='templates',
static_folder='static',
)
@blueprint.before_app_first_request
def setup_rq_connection():
redis_url = current_app.config.get('REDIS_URL')
redis_sentinels = current_app.config.get('REDIS_SENTINELS')
if isinstance(redis_url, list):
current_app.redis_conn = from_url(redis_url[0])
elif isinstance(redis_url, string_types):
current_app.redis_conn = from_url(redis_url)
elif redis_sentinels:
redis_master = current_app.config.get('REDIS_MASTER_NAME')
password = current_app.config.get('REDIS_PASSWORD')
db = current_app.config.get('REDIS_DB')
sentinel_hosts = [tuple(sentinel.split(':', 1))
for sentinel in redis_sentinels.split(',')]
sentinel = Sentinel(sentinel_hosts, db=db, password=password)
current_app.redis_conn = sentinel.master_for(redis_master)
else:
current_app.redis_conn = Redis(
host=current_app.config.get('REDIS_HOST'),
port=current_app.config.get('REDIS_PORT'),
password=current_app.config.get('REDIS_PASSWORD'),
db=current_app.config.get('REDIS_DB')
)
@blueprint.before_request
def push_rq_connection():
push_connection(current_app.redis_conn)
@blueprint.teardown_request
def pop_rq_connection(exception=None):
pop_connection()
def jsonify(f):
@wraps(f)
def _wrapped(*args, **kwargs):
from flask import jsonify as flask_jsonify
try:
result_dict = f(*args, **kwargs)
except Exception as e:
result_dict = dict(status='error')
if current_app.config['DEBUG']:
result_dict['reason'] = str(e)
from traceback import format_exc
result_dict['exc_info'] = format_exc()
return flask_jsonify(**result_dict), {'Cache-Control': 'no-store'}
return _wrapped
def serialize_queues(queues):
return [
dict(
name=q.name,
count=q.count,
url=url_for('.overview', queue_name=q.name))
for q in queues
]
def serialize_date(dt):
if dt is None:
return None
return arrow.get(dt).to('UTC').datetime.isoformat()
def serialize_job(job):
return dict(
id=job.id,
created_at=serialize_date(job.created_at),
enqueued_at=serialize_date(job.enqueued_at),
ended_at=serialize_date(job.ended_at),
origin=job.origin,
result=job._result,
exc_info=str(job.exc_info) if job.exc_info else None,
description=job.description)
def remove_none_values(input_dict):
return dict(((k, v) for k, v in input_dict.items() if v is not None))
def pagination_window(total_items, cur_page, per_page=5, window_size=10):
all_pages = range(1, int(ceil(total_items / float(per_page))) + 1)
result = all_pages
if window_size >= 1:
temp = min(
len(all_pages) - window_size,
(cur_page - 1) - int(ceil(window_size / 2.0))
)
pages_window_start = max(0, temp)
pages_window_end = pages_window_start + window_size
result = all_pages[pages_window_start:pages_window_end]
return result
@blueprint.route('/', defaults={'queue_name': None, 'page': '1'})
@blueprint.route('/<queue_name>', defaults={'page': '1'})
@blueprint.route('/<queue_name>/<page>')
def overview(queue_name, page):
if queue_name is None:
# Show the failed queue by default if it contains any jobs
failed = Queue('failed')
if not failed.is_empty():
queue = failed
else:
queue = Queue()
else:
queue = Queue(queue_name)
return render_template(
'choreo.dashboard/dashboard.html',
workers=Worker.all(),
queue=queue,
page=page,
queues=Queue.all(),
rq_url_prefix=url_for('.overview')
)
@blueprint.route('/job/<job_id>/cancel', methods=['POST'])
@jsonify
def cancel_job_view(job_id):
if current_app.config.get('DELETE_JOBS'):
Job.fetch(job_id).delete()
else:
cancel_job(job_id)
return dict(status='OK')
@blueprint.route('/job/<job_id>/requeue', methods=['POST'])
@jsonify
def requeue_job_view(job_id):
requeue_job(job_id)
return dict(status='OK')
@blueprint.route('/requeue-all', methods=['GET', 'POST'])
@jsonify
def requeue_all():
fq = get_failed_queue()
job_ids = fq.job_ids
count = len(job_ids)
for job_id in job_ids:
requeue_job(job_id)
return dict(status='OK', count=count)
@blueprint.route('/queue/<queue_name>/empty', methods=['POST'])
@jsonify
def empty_queue(queue_name):
q = Queue(queue_name)
q.empty()
return dict(status='OK')
@blueprint.route('/queue/<queue_name>/compact', methods=['POST'])
@jsonify
def compact_queue(queue_name):
q = Queue(queue_name)
q.compact()
return dict(status='OK')
@blueprint.route('/rq-instance/<instance_number>', methods=['POST'])
@jsonify
def change_rq_instance(instance_number):
redis_url = current_app.config.get('REDIS_URL')
if not isinstance(redis_url, list):
return dict(status='Single RQ. Not Permitted.')
if int(instance_number) >= len(redis_url):
raise LookupError('Index exceeds RQ list. Not Permitted.')
pop_connection()
current_app.redis_conn = from_url(redis_url[int(instance_number)])
push_rq_connection()
return dict(status='OK')
@blueprint.route('/rq-instances.json')
@jsonify
def list_instances():
return dict(rq_instances=current_app.config.get('REDIS_URL'))
@blueprint.route('/queues.json')
@jsonify
def list_queues():
queues = serialize_queues(sorted(Queue.all()))
return dict(queues=queues)
@blueprint.route('/jobs/<queue_name>/<page>.json')
@jsonify
def list_jobs(queue_name, page):
current_page = int(page)
queue = Queue(queue_name)
per_page = 10
total_items = queue.count
pages_numbers_in_window = pagination_window(
total_items, current_page, per_page)
pages_in_window = [
dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
for p in pages_numbers_in_window
]
last_page = int(ceil(total_items / float(per_page)))
prev_page = None
if current_page > 1:
prev_page = dict(url=url_for(
'.overview', queue_name=queue_name, page=(current_page - 1)))
next_page = None
if current_page < last_page:
next_page = dict(url=url_for(
'.overview', queue_name=queue_name, page=(current_page + 1)))
first_page_link = dict(url=url_for('.overview', queue_name=queue_name, page=1))
last_page_link = dict(url=url_for('.overview', queue_name=queue_name, page=last_page))
pagination = remove_none_values(
dict(
current_page=current_page,
num_pages=last_page,
pages_in_window=pages_in_window,
next_page=next_page,
prev_page=prev_page,
first_page=first_page_link,
last_page=last_page_link,
)
)
offset = (current_page - 1) * per_page
jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
return dict(name=queue.name, jobs=jobs, pagination=pagination)
def serialize_current_job(job):
if job is None:
return "idle"
return dict(
job_id=job.id,
description=job.description,
created_at=serialize_date(job.created_at)
)
@blueprint.route('/workers.json')
@jsonify
def list_workers():
def serialize_queue_names(worker):
return [q.name for q in worker.queues]
workers = [
dict(
name=worker.name,
queues=serialize_queue_names(worker),
state=str(worker.get_state()),
current_job=serialize_current_job(
worker.get_current_job()),
)
for worker in Worker.all()
]
return dict(workers=workers)
@blueprint.context_processor
def inject_interval():
interval = current_app.config.get('RQ_POLL_INTERVAL')
return dict(poll_interval=interval)
|
py | b41198a95aeb978459255e1e2aec45a0e93dbc04 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 3/05/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
import csv
path = "Google_Stock_ Market_Data - google_stock_data.csv"
file = open(path, newline='')
reader = csv.reader(file)
header = next(reader) # first line is the head
data = [row for row in reader] #read the remaining data
print (header)
print(data[0])
|
py | b41199abe9d904b9499e85b9cda02304308ec3a1 | def refresh():
getdata()
def helpp():
notification.notify(
title='HELP Menu for COVID Update App By Aman Ojha',
message='As it was Not ment like that you will search for Help Whatever Mail me your Issue to [email protected]',
timeout=10)
statelist = [
"Andaman and Nicobar Islands",
"Andhra Pradesh",
"Arunachal Pradesh",
"Assam",
"Bihar",
"Chandigarh",
"Chhattisgarh",
"Dadra and Nagar Haveli and Daman and Diu",
"Delhi",
"Goa",
"Gujarat",
"Haryana",
"Himachal Pradesh",
"Jammu and Kashmir",
"Jharkhand",
"Karnataka",
"Kerala",
"Ladakh",
"Lakshadweep",
"Madhya Pradesh",
"Maharashtra",
"Manipur",
"Meghalaya",
"Mizoram",
"Nagaland",
"Odisha",
"Puducherry",
"Punjab",
"Rajasthan",
"Sikkim",
"Tamil Nadu",
"Telangana",
"Tripura",
"Uttar Pradesh",
"Uttarakhand",
"West Bengal",
]
text = ''
optionVal = ''
numberOfSubmit = 0
def getdata():
global text
data = requests.get("https://api.covid19india.org/data.json")
text = data.json()
def printdata():
global numberOfSubmit
global optionVal
state = optionVal
global text
for i in range(0, 38):
if state.lower() == text['statewise'][i]['state'].lower():
# if numberOfSubmit == 1:
# numberOfSubmit = 0
# label.pack_forget()
label = Label(
text=f"Active Cases = {text['statewise'][i]['active']}").pack(anchor=W)
label1 = Label(
text=f"Confirmed Cases = {text['statewise'][i]['confirmed']}").pack(anchor=W)
label2 = Label(
text=f"Deaths = {text['statewise'][i]['deaths']}").pack(anchor=W)
label3 = Label(
text=f"Migrated = {text['statewise'][i]['migratedother']}").pack(anchor=W)
label4 = Label(
text=f"Recovered = {text['statewise'][i]['recovered']}").pack(anchor=W)
label5 = Label(
text=f"Last Updated = {text['statewise'][i]['lastupdatedtime']}").pack(anchor=W)
numberOfSubmit = 1
def getstats():
global numberOfSubmit
getdata()
printdata()
def body():
global optionVal
label = Label(
text='Welcome to the COVID 19 Tracker System Made with Love in Python By Aman Ojha')
label.pack(padx=0, pady=10)
stateName = Label(text='Enter Your State Name')
stateName.pack(anchor=NW, pady=5, padx=15)
option = StringVar(root)
option.set(statelist[33])
opt = OptionMenu(root, option, *statelist)
opt.config(width=15, font=('sanssarif', 8))
opt.pack()
button = Button(text='Submit', command=getstats)
button.pack(anchor=CENTER)
optionVal = option.get()
def main():
mymenu = Menu(root)
file = Menu(mymenu, tearoff=0)
file.add_command(label='Refresh', command=refresh)
file.add_command(label='Exit', command=exit)
mymenu.add_cascade(label='File', menu=file)
root.config(menu=mymenu)
helpmenu = Menu(mymenu, tearoff=0)
helpmenu.add_command(label='Help', command=helpp)
mymenu.add_cascade(label='Help', menu=helpmenu)
root.config(menu=mymenu)
body()
if __name__ == '__main__':
try:
from tkinter import *
import os
from plyer import notification
import datetime
import requests
except Exception as e:
os.system('pip install requests')
os.system('pip install plyer')
else:
root = Tk()
root.title('COVID update Software')
root.geometry("655x333")
root.minsize(655, 333)
root.maxsize(655, 333)
# root.wm_iconbitmap("icon.png")
main()
root.mainloop()
|
py | b4119a982623cab9485846ae237d481c1ac35d95 | #! /usr/bin/env python3
# -*- encoding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 ETH Zurich, Luc Grosheintz-Laval
from wave_propagation import WeakWavePropagation, WeakWavePropagationReference
from wave_propagation import WavePropagationRates
if __name__ == "__main__":
sim = WavePropagationRates()
sim(WeakWavePropagation, WeakWavePropagationReference, "isentropic")
|
py | b4119abdf492f253caf5ed6385a8871cf3856e10 | from typing import List, Sequence, Dict, Any, Tuple, Optional, Set
from pathlib import Path
from collections import Counter
import sys
import srsly
from wasabi import Printer, MESSAGES, msg
import typer
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli
from ..training import Example
from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER
from ..pipeline import Morphologizer
from ..morphology import Morphology
from ..language import Language
from ..util import registry, resolve_dot_names
from .. import util
# Minimum number of expected occurrences of NER label in data to train new label
NEW_LABEL_THRESHOLD = 50
# Minimum number of expected occurrences of dependency labels
DEP_LABEL_THRESHOLD = 20
# Minimum number of expected examples to train a new pipeline
BLANK_MODEL_MIN_THRESHOLD = 100
BLANK_MODEL_THRESHOLD = 2000
@debug_cli.command(
"data", context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
@app.command(
"debug-data",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
hidden=True, # hide this from main CLI help but still allow it to work with warning
)
def debug_data_cli(
# fmt: off
ctx: typer.Context, # This is only used to read additional arguments
config_path: Path = Arg(..., help="Path to config file", exists=True, allow_dash=True),
code_path: Optional[Path] = Opt(None, "--code-path", "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
ignore_warnings: bool = Opt(False, "--ignore-warnings", "-IW", help="Ignore warnings, only show stats and errors"),
verbose: bool = Opt(False, "--verbose", "-V", help="Print additional information and explanations"),
no_format: bool = Opt(False, "--no-format", "-NF", help="Don't pretty-print the results"),
# fmt: on
):
"""
Analyze, debug and validate your training and development data. Outputs
useful stats, and can help you find problems like invalid entity annotations,
cyclic dependencies, low data labels and more.
DOCS: https://spacy.io/api/cli#debug-data
"""
if ctx.command.name == "debug-data":
msg.warn(
"The debug-data command is now available via the 'debug data' "
"subcommand (without the hyphen). You can run python -m spacy debug "
"--help for an overview of the other available debugging commands."
)
overrides = parse_config_overrides(ctx.args)
import_code(code_path)
debug_data(
config_path,
config_overrides=overrides,
ignore_warnings=ignore_warnings,
verbose=verbose,
no_format=no_format,
silent=False,
)
def debug_data(
config_path: Path,
*,
config_overrides: Dict[str, Any] = {},
ignore_warnings: bool = False,
verbose: bool = False,
no_format: bool = True,
silent: bool = True,
):
msg = Printer(
no_print=silent, pretty=not no_format, ignore_warnings=ignore_warnings
)
# Make sure all files and paths exists if they are needed
with show_validation_error(config_path):
cfg = util.load_config(config_path, overrides=config_overrides)
nlp = util.load_model_from_config(cfg)
config = nlp.config.interpolate()
T = registry.resolve(config["training"], schema=ConfigSchemaTraining)
# Use original config here, not resolved version
sourced_components = get_sourced_components(cfg)
frozen_components = T["frozen_components"]
resume_components = [p for p in sourced_components if p not in frozen_components]
pipeline = nlp.pipe_names
factory_names = [nlp.get_pipe_meta(pipe).factory for pipe in nlp.pipe_names]
msg.divider("Data file validation")
# Create the gold corpus to be able to better analyze data
dot_names = [T["train_corpus"], T["dev_corpus"]]
train_corpus, dev_corpus = resolve_dot_names(config, dot_names)
train_dataset = list(train_corpus(nlp))
dev_dataset = list(dev_corpus(nlp))
msg.good("Corpus is loadable")
nlp.initialize(lambda: train_dataset)
msg.good("Pipeline can be initialized with data")
# Create all gold data here to avoid iterating over the train_dataset constantly
gold_train_data = _compile_gold(train_dataset, factory_names, nlp, make_proj=True)
gold_train_unpreprocessed_data = _compile_gold(
train_dataset, factory_names, nlp, make_proj=False
)
gold_dev_data = _compile_gold(dev_dataset, factory_names, nlp, make_proj=True)
train_texts = gold_train_data["texts"]
dev_texts = gold_dev_data["texts"]
frozen_components = T["frozen_components"]
msg.divider("Training stats")
msg.text(f"Language: {nlp.lang}")
msg.text(f"Training pipeline: {', '.join(pipeline)}")
if resume_components:
msg.text(f"Components from other pipelines: {', '.join(resume_components)}")
if frozen_components:
msg.text(f"Frozen components: {', '.join(frozen_components)}")
msg.text(f"{len(train_dataset)} training docs")
msg.text(f"{len(dev_dataset)} evaluation docs")
if not len(gold_dev_data):
msg.fail("No evaluation docs")
overlap = len(train_texts.intersection(dev_texts))
if overlap:
msg.warn(f"{overlap} training examples also in evaluation data")
else:
msg.good("No overlap between training and evaluation data")
# TODO: make this feedback more fine-grained and report on updated
# components vs. blank components
if not resume_components and len(train_dataset) < BLANK_MODEL_THRESHOLD:
text = f"Low number of examples to train a new pipeline ({len(train_dataset)})"
if len(train_dataset) < BLANK_MODEL_MIN_THRESHOLD:
msg.fail(text)
else:
msg.warn(text)
msg.text(
f"It's recommended to use at least {BLANK_MODEL_THRESHOLD} examples "
f"(minimum {BLANK_MODEL_MIN_THRESHOLD})",
show=verbose,
)
msg.divider("Vocab & Vectors")
n_words = gold_train_data["n_words"]
msg.info(
f"{n_words} total word(s) in the data ({len(gold_train_data['words'])} unique)"
)
if gold_train_data["n_misaligned_words"] > 0:
n_misaligned = gold_train_data["n_misaligned_words"]
msg.warn(f"{n_misaligned} misaligned tokens in the training data")
if gold_dev_data["n_misaligned_words"] > 0:
n_misaligned = gold_dev_data["n_misaligned_words"]
msg.warn(f"{n_misaligned} misaligned tokens in the dev data")
most_common_words = gold_train_data["words"].most_common(10)
msg.text(
f"10 most common words: {_format_labels(most_common_words, counts=True)}",
show=verbose,
)
if len(nlp.vocab.vectors):
msg.info(
f"{len(nlp.vocab.vectors)} vectors ({nlp.vocab.vectors.n_keys} "
f"unique keys, {nlp.vocab.vectors_length} dimensions)"
)
n_missing_vectors = sum(gold_train_data["words_missing_vectors"].values())
msg.warn(
"{} words in training data without vectors ({:.0f}%)".format(
n_missing_vectors,
100 * (n_missing_vectors / gold_train_data["n_words"]),
),
)
msg.text(
"10 most common words without vectors: {}".format(
_format_labels(
gold_train_data["words_missing_vectors"].most_common(10),
counts=True,
)
),
show=verbose,
)
else:
msg.info("No word vectors present in the package")
if "ner" in factory_names:
# Get all unique NER labels present in the data
labels = set(
label for label in gold_train_data["ner"] if label not in ("O", "-", None)
)
label_counts = gold_train_data["ner"]
model_labels = _get_labels_from_model(nlp, "ner")
has_low_data_warning = False
has_no_neg_warning = False
has_ws_ents_error = False
has_punct_ents_warning = False
msg.divider("Named Entity Recognition")
msg.info(f"{len(model_labels)} label(s)")
missing_values = label_counts["-"]
msg.text(f"{missing_values} missing value(s) (tokens with '-' label)")
for label in labels:
if len(label) == 0:
msg.fail("Empty label found in train data")
labels_with_counts = [
(label, count)
for label, count in label_counts.most_common()
if label != "-"
]
labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose)
missing_labels = model_labels - labels
if missing_labels:
msg.warn(
"Some model labels are not present in the train data. The "
"model performance may be degraded for these labels after "
f"training: {_format_labels(missing_labels)}."
)
if gold_train_data["ws_ents"]:
msg.fail(f"{gold_train_data['ws_ents']} invalid whitespace entity spans")
has_ws_ents_error = True
if gold_train_data["punct_ents"]:
msg.warn(f"{gold_train_data['punct_ents']} entity span(s) with punctuation")
has_punct_ents_warning = True
for label in labels:
if label_counts[label] <= NEW_LABEL_THRESHOLD:
msg.warn(
f"Low number of examples for label '{label}' ({label_counts[label]})"
)
has_low_data_warning = True
with msg.loading("Analyzing label distribution..."):
neg_docs = _get_examples_without_label(train_dataset, label)
if neg_docs == 0:
msg.warn(f"No examples for texts WITHOUT new label '{label}'")
has_no_neg_warning = True
if not has_low_data_warning:
msg.good("Good amount of examples for all labels")
if not has_no_neg_warning:
msg.good("Examples without occurrences available for all labels")
if not has_ws_ents_error:
msg.good("No entities consisting of or starting/ending with whitespace")
if not has_punct_ents_warning:
msg.good("No entities consisting of or starting/ending with punctuation")
if has_low_data_warning:
msg.text(
f"To train a new entity type, your data should include at "
f"least {NEW_LABEL_THRESHOLD} instances of the new label",
show=verbose,
)
if has_no_neg_warning:
msg.text(
"Training data should always include examples of entities "
"in context, as well as examples without a given entity "
"type.",
show=verbose,
)
if has_ws_ents_error:
msg.text(
"As of spaCy v2.1.0, entity spans consisting of or starting/ending "
"with whitespace characters are considered invalid."
)
if has_punct_ents_warning:
msg.text(
"Entity spans consisting of or starting/ending "
"with punctuation can not be trained with a noise level > 0."
)
if "textcat" in factory_names:
msg.divider("Text Classification (Exclusive Classes)")
labels = _get_labels_from_model(nlp, "textcat")
msg.info(f"Text Classification: {len(labels)} label(s)")
msg.text(f"Labels: {_format_labels(labels)}", show=verbose)
missing_labels = labels - set(gold_train_data["cats"])
if missing_labels:
msg.warn(
"Some model labels are not present in the train data. The "
"model performance may be degraded for these labels after "
f"training: {_format_labels(missing_labels)}."
)
if set(gold_train_data["cats"]) != set(gold_dev_data["cats"]):
msg.warn(
"Potential train/dev mismatch: the train and dev labels are "
"not the same. "
f"Train labels: {_format_labels(gold_train_data['cats'])}. "
f"Dev labels: {_format_labels(gold_dev_data['cats'])}."
)
if len(labels) < 2:
msg.fail(
"The model does not have enough labels. 'textcat' requires at "
"least two labels due to mutually-exclusive classes, e.g. "
"LABEL/NOT_LABEL or POSITIVE/NEGATIVE for a binary "
"classification task."
)
if (
gold_train_data["n_cats_bad_values"] > 0
or gold_dev_data["n_cats_bad_values"] > 0
):
msg.fail(
"Unsupported values for cats: the supported values are "
"1.0/True and 0.0/False."
)
if gold_train_data["n_cats_multilabel"] > 0:
# Note: you should never get here because you run into E895 on
# initialization first.
msg.fail(
"The train data contains instances without mutually-exclusive "
"classes. Use the component 'textcat_multilabel' instead of "
"'textcat'."
)
if gold_dev_data["n_cats_multilabel"] > 0:
msg.fail(
"The dev data contains instances without mutually-exclusive "
"classes. Use the component 'textcat_multilabel' instead of "
"'textcat'."
)
if "textcat_multilabel" in factory_names:
msg.divider("Text Classification (Multilabel)")
labels = _get_labels_from_model(nlp, "textcat_multilabel")
msg.info(f"Text Classification: {len(labels)} label(s)")
msg.text(f"Labels: {_format_labels(labels)}", show=verbose)
missing_labels = labels - set(gold_train_data["cats"])
if missing_labels:
msg.warn(
"Some model labels are not present in the train data. The "
"model performance may be degraded for these labels after "
f"training: {_format_labels(missing_labels)}."
)
if set(gold_train_data["cats"]) != set(gold_dev_data["cats"]):
msg.warn(
"Potential train/dev mismatch: the train and dev labels are "
"not the same. "
f"Train labels: {_format_labels(gold_train_data['cats'])}. "
f"Dev labels: {_format_labels(gold_dev_data['cats'])}."
)
if (
gold_train_data["n_cats_bad_values"] > 0
or gold_dev_data["n_cats_bad_values"] > 0
):
msg.fail(
"Unsupported values for cats: the supported values are "
"1.0/True and 0.0/False."
)
if gold_train_data["n_cats_multilabel"] > 0:
if gold_dev_data["n_cats_multilabel"] == 0:
msg.warn(
"Potential train/dev mismatch: the train data contains "
"instances without mutually-exclusive classes while the "
"dev data contains only instances with mutually-exclusive "
"classes."
)
else:
msg.warn(
"The train data contains only instances with "
"mutually-exclusive classes. You can potentially use the "
"component 'textcat' instead of 'textcat_multilabel'."
)
if gold_dev_data["n_cats_multilabel"] > 0:
msg.fail(
"Train/dev mismatch: the dev data contains instances "
"without mutually-exclusive classes while the train data "
"contains only instances with mutually-exclusive classes."
)
if "tagger" in factory_names:
msg.divider("Part-of-speech Tagging")
labels = [label for label in gold_train_data["tags"]]
model_labels = _get_labels_from_model(nlp, "tagger")
msg.info(f"{len(labels)} label(s) in train data")
missing_labels = model_labels - set(labels)
if missing_labels:
msg.warn(
"Some model labels are not present in the train data. The "
"model performance may be degraded for these labels after "
f"training: {_format_labels(missing_labels)}."
)
labels_with_counts = _format_labels(
gold_train_data["tags"].most_common(), counts=True
)
msg.text(labels_with_counts, show=verbose)
if "morphologizer" in factory_names:
msg.divider("Morphologizer (POS+Morph)")
labels = [label for label in gold_train_data["morphs"]]
model_labels = _get_labels_from_model(nlp, "morphologizer")
msg.info(f"{len(labels)} label(s) in train data")
missing_labels = model_labels - set(labels)
if missing_labels:
msg.warn(
"Some model labels are not present in the train data. The "
"model performance may be degraded for these labels after "
f"training: {_format_labels(missing_labels)}."
)
labels_with_counts = _format_labels(
gold_train_data["morphs"].most_common(), counts=True
)
msg.text(labels_with_counts, show=verbose)
if "parser" in factory_names:
has_low_data_warning = False
msg.divider("Dependency Parsing")
# profile sentence length
msg.info(
f"Found {gold_train_data['n_sents']} sentence(s) with an average "
f"length of {gold_train_data['n_words'] / gold_train_data['n_sents']:.1f} words."
)
# check for documents with multiple sentences
sents_per_doc = gold_train_data["n_sents"] / len(gold_train_data["texts"])
if sents_per_doc < 1.1:
msg.warn(
f"The training data contains {sents_per_doc:.2f} sentences per "
f"document. When there are very few documents containing more "
f"than one sentence, the parser will not learn how to segment "
f"longer texts into sentences."
)
# profile labels
labels_train = [label for label in gold_train_data["deps"]]
labels_train_unpreprocessed = [
label for label in gold_train_unpreprocessed_data["deps"]
]
labels_dev = [label for label in gold_dev_data["deps"]]
if gold_train_unpreprocessed_data["n_nonproj"] > 0:
n_nonproj = gold_train_unpreprocessed_data["n_nonproj"]
msg.info(f"Found {n_nonproj} nonprojective train sentence(s)")
if gold_dev_data["n_nonproj"] > 0:
n_nonproj = gold_dev_data["n_nonproj"]
msg.info(f"Found {n_nonproj} nonprojective dev sentence(s)")
msg.info(f"{len(labels_train_unpreprocessed)} label(s) in train data")
msg.info(f"{len(labels_train)} label(s) in projectivized train data")
labels_with_counts = _format_labels(
gold_train_unpreprocessed_data["deps"].most_common(), counts=True
)
msg.text(labels_with_counts, show=verbose)
# rare labels in train
for label in gold_train_unpreprocessed_data["deps"]:
if gold_train_unpreprocessed_data["deps"][label] <= DEP_LABEL_THRESHOLD:
msg.warn(
f"Low number of examples for label '{label}' "
f"({gold_train_unpreprocessed_data['deps'][label]})"
)
has_low_data_warning = True
# rare labels in projectivized train
rare_projectivized_labels = []
for label in gold_train_data["deps"]:
if (
gold_train_data["deps"][label] <= DEP_LABEL_THRESHOLD
and DELIMITER in label
):
rare_projectivized_labels.append(
f"{label}: {gold_train_data['deps'][label]}"
)
if len(rare_projectivized_labels) > 0:
msg.warn(
f"Low number of examples for {len(rare_projectivized_labels)} "
"label(s) in the projectivized dependency trees used for "
"training. You may want to projectivize labels such as punct "
"before training in order to improve parser performance."
)
msg.warn(
f"Projectivized labels with low numbers of examples: ",
", ".join(rare_projectivized_labels),
show=verbose,
)
has_low_data_warning = True
# labels only in train
if set(labels_train) - set(labels_dev):
msg.warn(
"The following labels were found only in the train data:",
", ".join(set(labels_train) - set(labels_dev)),
show=verbose,
)
# labels only in dev
if set(labels_dev) - set(labels_train):
msg.warn(
"The following labels were found only in the dev data:",
", ".join(set(labels_dev) - set(labels_train)),
show=verbose,
)
if has_low_data_warning:
msg.text(
f"To train a parser, your data should include at "
f"least {DEP_LABEL_THRESHOLD} instances of each label.",
show=verbose,
)
# multiple root labels
if len(gold_train_unpreprocessed_data["roots"]) > 1:
msg.warn(
f"Multiple root labels "
f"({', '.join(gold_train_unpreprocessed_data['roots'])}) "
f"found in training data. spaCy's parser uses a single root "
f"label ROOT so this distinction will not be available."
)
# these should not happen, but just in case
if gold_train_data["n_nonproj"] > 0:
msg.fail(
f"Found {gold_train_data['n_nonproj']} nonprojective "
f"projectivized train sentence(s)"
)
if gold_train_data["n_cycles"] > 0:
msg.fail(
f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles"
)
msg.divider("Summary")
good_counts = msg.counts[MESSAGES.GOOD]
warn_counts = msg.counts[MESSAGES.WARN]
fail_counts = msg.counts[MESSAGES.FAIL]
if good_counts:
msg.good(f"{good_counts} {'check' if good_counts == 1 else 'checks'} passed")
if warn_counts:
msg.warn(f"{warn_counts} {'warning' if warn_counts == 1 else 'warnings'}")
if fail_counts:
msg.fail(f"{fail_counts} {'error' if fail_counts == 1 else 'errors'}")
sys.exit(1)
def _load_file(file_path: Path, msg: Printer) -> None:
file_name = file_path.parts[-1]
if file_path.suffix == ".json":
with msg.loading(f"Loading {file_name}..."):
data = srsly.read_json(file_path)
msg.good(f"Loaded {file_name}")
return data
elif file_path.suffix == ".jsonl":
with msg.loading(f"Loading {file_name}..."):
data = srsly.read_jsonl(file_path)
msg.good(f"Loaded {file_name}")
return data
msg.fail(
f"Can't load file extension {file_path.suffix}",
"Expected .json or .jsonl",
exits=1,
)
def _compile_gold(
examples: Sequence[Example],
factory_names: List[str],
nlp: Language,
make_proj: bool,
) -> Dict[str, Any]:
data = {
"ner": Counter(),
"cats": Counter(),
"tags": Counter(),
"morphs": Counter(),
"deps": Counter(),
"words": Counter(),
"roots": Counter(),
"ws_ents": 0,
"punct_ents": 0,
"n_words": 0,
"n_misaligned_words": 0,
"words_missing_vectors": Counter(),
"n_sents": 0,
"n_nonproj": 0,
"n_cycles": 0,
"n_cats_multilabel": 0,
"n_cats_bad_values": 0,
"texts": set(),
}
for eg in examples:
gold = eg.reference
doc = eg.predicted
valid_words = [x.text for x in gold]
data["words"].update(valid_words)
data["n_words"] += len(valid_words)
align = eg.alignment
for token in doc:
if token.orth_.isspace():
continue
if align.x2y.lengths[token.i] != 1:
data["n_misaligned_words"] += 1
data["texts"].add(doc.text)
if len(nlp.vocab.vectors):
for word in [t.text for t in doc]:
if nlp.vocab.strings[word] not in nlp.vocab.vectors:
data["words_missing_vectors"].update([word])
if "ner" in factory_names:
for i, label in enumerate(eg.get_aligned_ner()):
if label is None:
continue
if label.startswith(("B-", "U-", "L-")) and doc[i].is_space:
# "Illegal" whitespace entity
data["ws_ents"] += 1
if label.startswith(("B-", "U-", "L-")) and doc[i].text in [
".",
"'",
"!",
"?",
",",
]:
# punctuation entity: could be replaced by whitespace when training with noise,
# so add a warning to alert the user to this unexpected side effect.
data["punct_ents"] += 1
if label.startswith(("B-", "U-")):
combined_label = label.split("-")[1]
data["ner"][combined_label] += 1
elif label == "-":
data["ner"]["-"] += 1
if "textcat" in factory_names or "textcat_multilabel" in factory_names:
data["cats"].update(gold.cats)
if any(val not in (0, 1) for val in gold.cats.values()):
data["n_cats_bad_values"] += 1
if list(gold.cats.values()).count(1) != 1:
data["n_cats_multilabel"] += 1
if "tagger" in factory_names:
tags = eg.get_aligned("TAG", as_string=True)
data["tags"].update([x for x in tags if x is not None])
if "morphologizer" in factory_names:
pos_tags = eg.get_aligned("POS", as_string=True)
morphs = eg.get_aligned("MORPH", as_string=True)
for pos, morph in zip(pos_tags, morphs):
# POS may align (same value for multiple tokens) when morph
# doesn't, so if either is misaligned (None), treat the
# annotation as missing so that truths doesn't end up with an
# unknown morph+POS combination
if pos is None or morph is None:
pass
# If both are unset, the annotation is missing (empty morph
# converted from int is "_" rather than "")
elif pos == "" and morph == "":
pass
# Otherwise, generate the combined label
else:
label_dict = Morphology.feats_to_dict(morph)
if pos:
label_dict[Morphologizer.POS_FEAT] = pos
label = eg.reference.vocab.strings[
eg.reference.vocab.morphology.add(label_dict)
]
data["morphs"].update([label])
if "parser" in factory_names:
aligned_heads, aligned_deps = eg.get_aligned_parse(projectivize=make_proj)
data["deps"].update([x for x in aligned_deps if x is not None])
for i, (dep, head) in enumerate(zip(aligned_deps, aligned_heads)):
if head == i:
data["roots"].update([dep])
data["n_sents"] += 1
if nonproj.is_nonproj_tree(aligned_heads):
data["n_nonproj"] += 1
if nonproj.contains_cycle(aligned_heads):
data["n_cycles"] += 1
return data
def _format_labels(labels: List[Tuple[str, int]], counts: bool = False) -> str:
if counts:
return ", ".join([f"'{l}' ({c})" for l, c in labels])
return ", ".join([f"'{l}'" for l in labels])
def _get_examples_without_label(data: Sequence[Example], label: str) -> int:
count = 0
for eg in data:
labels = [
label.split("-")[1]
for label in eg.get_aligned_ner()
if label not in ("O", "-", None)
]
if label not in labels:
count += 1
return count
def _get_labels_from_model(nlp: Language, pipe_name: str) -> Set[str]:
if pipe_name not in nlp.pipe_names:
return set()
pipe = nlp.get_pipe(pipe_name)
return set(pipe.labels)
|
py | b4119ad73e9dd56f33c19316089598df6f238033 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
import os
from page_sets.login_helpers import linkedin_login
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry.util import wpr_modes
class SkiaDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_linkedin_mobile.json'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
credentials_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data/credentials.json')
linkedin_login.LoginDesktopAccount(action_runner, 'linkedin',
credentials_path)
action_runner.Wait(15)
action_runner.Navigate(self.url)
action_runner.Wait(15)
class SkiaLinkedinDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaLinkedinDesktopPageSet, self).__init__(
archive_data_file='data/skia_linkedin_mobile.json')
urls_list = [
# go/skia-skps-3-2019
'https://www.linkedin.com/in/linustorvalds',
]
for url in urls_list:
self.AddStory(SkiaDesktopPage(url, self))
|
py | b4119b101b5bb93cf576ccd6704f388d1e6f3aae | import pytest
from urllib3.response import HTTPResponse
from urllib3.packages.six.moves import xrange
from urllib3.util.retry import Retry, RequestHistory
from urllib3.exceptions import (
ConnectTimeoutError,
MaxRetryError,
ReadTimeoutError,
ResponseError,
)
class TestRetry(object):
def test_string(self):
""" Retry string representation looks the way we expect """
retry = Retry()
assert str(retry) == 'Retry(total=10, connect=None, read=None, redirect=None, status=None)'
for _ in range(3):
retry = retry.increment(method='GET')
assert str(retry) == 'Retry(total=7, connect=None, read=None, redirect=None, status=None)'
def test_retry_both_specified(self):
"""Total can win if it's lower than the connect value"""
error = ConnectTimeoutError()
retry = Retry(connect=3, total=2)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError) as e:
retry.increment(error=error)
assert e.value.reason == error
def test_retry_higher_total_loses(self):
""" A lower connect timeout than the total is honored """
error = ConnectTimeoutError()
retry = Retry(connect=2, total=3)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError):
retry.increment(error=error)
def test_retry_higher_total_loses_vs_read(self):
""" A lower read timeout than the total is honored """
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(read=2, total=3)
retry = retry.increment(method='GET', error=error)
retry = retry.increment(method='GET', error=error)
with pytest.raises(MaxRetryError):
retry.increment(method='GET', error=error)
def test_retry_total_none(self):
""" if Total is none, connect error should take precedence """
error = ConnectTimeoutError()
retry = Retry(connect=2, total=None)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError) as e:
retry.increment(error=error)
assert e.value.reason == error
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(connect=2, total=None)
retry = retry.increment(method='GET', error=error)
retry = retry.increment(method='GET', error=error)
retry = retry.increment(method='GET', error=error)
assert not retry.is_exhausted()
def test_retry_default(self):
""" If no value is specified, should retry connects 3 times """
retry = Retry()
assert retry.total == 10
assert retry.connect is None
assert retry.read is None
assert retry.redirect is None
error = ConnectTimeoutError()
retry = Retry(connect=1)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError):
retry.increment(error=error)
retry = Retry(connect=1)
retry = retry.increment(error=error)
assert not retry.is_exhausted()
assert Retry(0).raise_on_redirect
assert not Retry(False).raise_on_redirect
def test_retry_read_zero(self):
""" No second chances on read timeouts, by default """
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(read=0)
with pytest.raises(MaxRetryError) as e:
retry.increment(method='GET', error=error)
assert e.value.reason == error
def test_status_counter(self):
resp = HTTPResponse(status=400)
retry = Retry(status=2)
retry = retry.increment(response=resp)
retry = retry.increment(response=resp)
with pytest.raises(MaxRetryError) as e:
retry.increment(response=resp)
assert str(e.value.reason) == ResponseError.SPECIFIC_ERROR.format(status_code=400)
def test_backoff(self):
""" Backoff is computed correctly """
max_backoff = Retry.BACKOFF_MAX
retry = Retry(total=100, backoff_factor=0.2)
assert retry.get_backoff_time() == 0 # First request
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 0 # First retry
retry = retry.increment(method='GET')
assert retry.backoff_factor == 0.2
assert retry.total == 98
assert retry.get_backoff_time() == 0.4 # Start backoff
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 0.8
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 1.6
for _ in xrange(10):
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == max_backoff
def test_zero_backoff(self):
retry = Retry()
assert retry.get_backoff_time() == 0
retry = retry.increment(method='GET')
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 0
def test_backoff_reset_after_redirect(self):
retry = Retry(total=100, redirect=5, backoff_factor=0.2)
retry = retry.increment(method='GET')
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 0.4
redirect_response = HTTPResponse(status=302, headers={'location': 'test'})
retry = retry.increment(method='GET', response=redirect_response)
assert retry.get_backoff_time() == 0
retry = retry.increment(method='GET')
retry = retry.increment(method='GET')
assert retry.get_backoff_time() == 0.4
def test_sleep(self):
# sleep a very small amount of time so our code coverage is happy
retry = Retry(backoff_factor=0.0001)
retry = retry.increment(method='GET')
retry = retry.increment(method='GET')
retry.sleep()
def test_status_forcelist(self):
retry = Retry(status_forcelist=xrange(500, 600))
assert not retry.is_retry('GET', status_code=200)
assert not retry.is_retry('GET', status_code=400)
assert retry.is_retry('GET', status_code=500)
retry = Retry(total=1, status_forcelist=[418])
assert not retry.is_retry('GET', status_code=400)
assert retry.is_retry('GET', status_code=418)
# String status codes are not matched.
retry = Retry(total=1, status_forcelist=['418'])
assert not retry.is_retry('GET', status_code=418)
def test_method_whitelist_with_status_forcelist(self):
# Falsey method_whitelist means to retry on any method.
retry = Retry(status_forcelist=[500], method_whitelist=None)
assert retry.is_retry('GET', status_code=500)
assert retry.is_retry('POST', status_code=500)
# Criteria of method_whitelist and status_forcelist are ANDed.
retry = Retry(status_forcelist=[500], method_whitelist=['POST'])
assert not retry.is_retry('GET', status_code=500)
assert retry.is_retry('POST', status_code=500)
def test_exhausted(self):
assert not Retry(0).is_exhausted()
assert Retry(-1).is_exhausted()
assert Retry(1).increment(method='GET').total == 0
@pytest.mark.parametrize('total', [-1, 0])
def test_disabled(self, total):
with pytest.raises(MaxRetryError):
Retry(total).increment(method='GET')
def test_error_message(self):
retry = Retry(total=0)
with pytest.raises(MaxRetryError) as e:
retry = retry.increment(method='GET',
error=ReadTimeoutError(None, "/", "read timed out"))
assert 'Caused by redirect' not in str(e.value)
assert str(e.value.reason) == 'None: read timed out'
retry = Retry(total=1)
with pytest.raises(MaxRetryError) as e:
retry = retry.increment('POST', '/')
retry = retry.increment('POST', '/')
assert 'Caused by redirect' not in str(e.value)
assert isinstance(e.value.reason, ResponseError)
assert str(e.value.reason) == ResponseError.GENERIC_ERROR
retry = Retry(total=1)
response = HTTPResponse(status=500)
with pytest.raises(MaxRetryError) as e:
retry = retry.increment('POST', '/', response=response)
retry = retry.increment('POST', '/', response=response)
assert 'Caused by redirect' not in str(e.value)
msg = ResponseError.SPECIFIC_ERROR.format(status_code=500)
assert str(e.value.reason) == msg
retry = Retry(connect=1)
with pytest.raises(MaxRetryError) as e:
retry = retry.increment(error=ConnectTimeoutError('conntimeout'))
retry = retry.increment(error=ConnectTimeoutError('conntimeout'))
assert 'Caused by redirect' not in str(e.value)
assert str(e.value.reason) == 'conntimeout'
def test_history(self):
retry = Retry(total=10, method_whitelist=frozenset(['GET', 'POST']))
assert retry.history == tuple()
connection_error = ConnectTimeoutError('conntimeout')
retry = retry.increment('GET', '/test1', None, connection_error)
history = (RequestHistory('GET', '/test1', connection_error, None, None),)
assert retry.history == history
read_error = ReadTimeoutError(None, "/test2", "read timed out")
retry = retry.increment('POST', '/test2', None, read_error)
history = (RequestHistory('GET', '/test1', connection_error, None, None),
RequestHistory('POST', '/test2', read_error, None, None))
assert retry.history == history
response = HTTPResponse(status=500)
retry = retry.increment('GET', '/test3', response, None)
history = (RequestHistory('GET', '/test1', connection_error, None, None),
RequestHistory('POST', '/test2', read_error, None, None),
RequestHistory('GET', '/test3', None, 500, None))
assert retry.history == history
def test_retry_method_not_in_whitelist(self):
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry()
with pytest.raises(ReadTimeoutError):
retry.increment(method='POST', error=error)
def test_retry_default_remove_headers_on_redirect(self):
retry = Retry()
assert list(retry.remove_headers_on_redirect) == ['Authorization']
def test_retry_set_remove_headers_on_redirect(self):
retry = Retry(remove_headers_on_redirect=['X-API-Secret'])
assert list(retry.remove_headers_on_redirect) == ['X-API-Secret']
|
py | b4119b891ac9be844b63539288f584ebbcbe0c63 | get_ipython().getoutput("pip freeze > pandas_profiling_env.txt")
# get_ipython().run_line_magic("%capture", " silences console warnings, updates, etc")
get_ipython().run_line_magic("%capture", " ")
get_ipython().getoutput("pip install pandas-profiling==2.6")
get_ipython().getoutput("pip install visions")
import pandas as pd
from pandas_profiling import ProfileReport
file_path = '/Volumes/GoogleDrive/My Drive/508/Project_508/Data/vehicle_registration.parquet.gzip'
df = pd.read_parquet(file_path)
df.shape
ProfileReport(df).to_file("vehicle_registration_report.html")
df.head()
|
py | b4119c12dcfb3c0cccec2961e881dd4b0b326085 | # import classes and functions
from chainer.dataset.convert import concat_examples # NOQA
from chainer.dataset.convert import ConcatWithAsyncTransfer # NOQA
from chainer.dataset.convert import converter # NOQA
from chainer.dataset.convert import Converter # NOQA
from chainer.dataset.convert import to_device # NOQA
from chainer.dataset.dataset_mixin import DatasetMixin # NOQA
from chainer.dataset.download import cache_or_load_file # NOQA
from chainer.dataset.download import cached_download # NOQA
from chainer.dataset.download import get_dataset_directory # NOQA
from chainer.dataset.download import get_dataset_root # NOQA
from chainer.dataset.download import set_dataset_root # NOQA
from chainer.dataset.iterator import Iterator # NOQA
from chainer.dataset.tabular.tabular_dataset import TabularDataset # NOQA
|
py | b4119d792e1d71524d396cd2ddc761a9e185b071 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku migrations."""
|
py | b4119f47b71709dfd466ff0a89a4f193cf458238 | from django.conf import settings
from django.core.management.base import BaseCommand
import pandas as pd
from categories.models import Pool
import os
import logging
import re
def vendor_logger():
return logging.getLogger('vendor')
def vehicle_info(vehicle):
field_map = {
'oasis': {
'field_types': ('core', 'zones')
},
'oasis_sb': {
'field_types': ('core', 'setasides', 'zones')
},
'hcats': {
'field_types': ('core', 'zones')
},
'hcats_sb': {
'field_types': ('core', 'setasides', 'zones')
},
'bmo': {
'field_types': ('core', 'zones')
},
'bmo_sb': {
'field_types': ('core', 'setasides', 'zones')
},
'pss': {
'field_types': ('core', 'setasides', 'zones')
}
}
return field_map[vehicle]
def vendor_field_type_core():
return [
'ContractorName',
'ContractNumber',
'ContractEnd',
'DUNS',
'POC1',
'Phone1',
'Email1',
'POC2',
'Phone2',
'Email2'
]
def vendor_field_type_setasides():
return [
'SB',
'8(a)',
'8(a)Date',
'HubZ',
'SDB',
'WO',
'VO',
'SDVOSB',
'VIP'
]
def vendor_field_type_zones():
return [
'Zone1',
'Zone2',
'Zone3',
'Zone4',
'Zone5',
'Zone6'
]
class Command(BaseCommand):
def check_pool(self, vehicle, pool, df):
variables = globals()
info = vehicle_info(vehicle)
logger = vendor_logger()
columns = list(df.columns)
vendor_count = 0
print(" > Data:")
for field_group in info['field_types']:
field_processor = "vendor_field_type_{}".format(field_group)
missing = 0
print(" - {}:".format(field_group))
for column in variables[field_processor]():
if column not in columns:
print(" - Missing: {}".format(column))
missing += 1
if missing == 0:
print(" - No missing fields")
for index, record in df.iterrows():
vendor_count += 1
print(" > Vendors: {}".format(vendor_count))
def check_vehicle(self, vehicle):
vehicle_file = os.path.join(settings.BASE_DIR, 'data/pools/{}.xlsx'.format(vehicle))
wb = pd.ExcelFile(vehicle_file)
sheets = wb.sheet_names
print("\nVehicle [ {} ]".format(vehicle))
for name in sheets:
try:
pool = re.search(r'\(\s*([0-9a-zA-Z]+)\s*\)', name, re.IGNORECASE).group(1)
pool_data = Pool.objects.get(number=pool, vehicle__id__iexact=vehicle)
print("\n > Pool [ {} ]".format(pool))
self.check_pool(vehicle, pool, wb.parse(name))
except AttributeError as e:
pass # Not a pool sheet, skip...
except Pool.DoesNotExist as e:
logger.debug(" > Pool {} not found".format(pool))
raise(e)
except Pool.MultipleObjectsReturned as e:
logger.debug(" > More than one pool matched {}. Integrity error!".format(pool))
raise(e)
def handle(self, *args, **options):
for vehicle in settings.VEHICLES:
self.check_vehicle(vehicle)
|
py | b4119fe871b244bbba6f4d4dd5d15389d6bc4854 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test subject extraction from certificate and SubjectInfo."""
import responses
import d1_gmn.app.middleware.session_cert
import d1_gmn.tests.gmn_test_case
import d1_test.test_files
class TestCert(d1_gmn.tests.gmn_test_case.GMNTestCase):
cert_simple_subject_info_pem = d1_test.test_files.load_cert(
"cert_with_simple_subject_info.pem"
)
@responses.activate
def test_1000(self):
"""Extract primary and equivalent subjects from certificate.
This does not perform validation
"""
primary_str, equivalent_set = d1_gmn.app.middleware.session_cert.get_authenticated_subjects(
self.cert_simple_subject_info_pem
)
assert primary_str == "CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org"
assert sorted(equivalent_set) == [
"CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org",
"authenticatedUser",
"public",
"verifiedUser",
]
|
py | b411a13aab37939c204688c5aa6d047a75f39d93 | import asyncio
import pycommons.logger
from maxwell.client import Client
logger = pycommons.logger.get_instance(__name__)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
client = Client(["localhost:8081", "localhost:8082"], loop=loop)
master = client.get_master()
endpoint = loop.run_until_complete(asyncio.ensure_future(master.resolve_frontend()))
logger.info("endpoint: %s", endpoint)
endpoint = loop.run_until_complete(asyncio.ensure_future(master.resolve_backend("topic_0")))
logger.info("endpoint: %s", endpoint)
loop.run_forever()
|
py | b411a148d43fff797811ab6c5e1a50a3acf349c4 | """Support for ZoneMinder switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.components.zoneminder import DOMAIN as ZONEMINDER_DOMAIN
from homeassistant.const import (CONF_COMMAND_ON, CONF_COMMAND_OFF)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zoneminder']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_ON): cv.string,
vol.Required(CONF_COMMAND_OFF): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZoneMinder switch platform."""
from zoneminder.monitor import MonitorState
on_state = MonitorState(config.get(CONF_COMMAND_ON))
off_state = MonitorState(config.get(CONF_COMMAND_OFF))
switches = []
for zm_client in hass.data[ZONEMINDER_DOMAIN].values():
monitors = zm_client.get_monitors()
if not monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return
for monitor in monitors:
switches.append(ZMSwitchMonitors(monitor, on_state, off_state))
add_entities(switches)
class ZMSwitchMonitors(SwitchDevice):
"""Representation of a ZoneMinder switch."""
icon = 'mdi:record-rec'
def __init__(self, monitor, on_state, off_state):
"""Initialize the switch."""
self._monitor = monitor
self._on_state = on_state
self._off_state = off_state
self._state = None
@property
def name(self):
"""Return the name of the switch."""
return '{} State'.format(self._monitor.name)
def update(self):
"""Update the switch value."""
self._state = self._monitor.function == self._on_state
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the entity on."""
self._monitor.function = self._on_state
def turn_off(self, **kwargs):
"""Turn the entity off."""
self._monitor.function = self._off_state
|
py | b411a168d948dc4fb1cf64d4a0deec815d20017c | import gym
import logging
import argparse
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.keras.layers as kl
import tensorflow.keras.losses as kls
import tensorflow.keras.optimizers as ko
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--batch_size', type=int, default=64)
parser.add_argument('-n', '--num_updates', type=int, default=250)
parser.add_argument('-lr', '--learning_rate', type=float, default=7e-3)
parser.add_argument('-r', '--render_test', action='store_true', default=False)
parser.add_argument('-p', '--plot_results', action='store_true', default=False)
class ProbabilityDistribution(tf.keras.Model):
def call(self, logits, **kwargs):
# Sample a random categorical action from the given logits.
return tf.squeeze(tf.random.categorical(logits, 1), axis=-1)
class Model(tf.keras.Model):
def __init__(self, num_actions):
super().__init__('mlp_policy')
# Note: no tf.get_variable(), just simple Keras API!
self.hidden1 = kl.Dense(128, activation='relu')
self.hidden2 = kl.Dense(128, activation='relu')
self.value = kl.Dense(1, name='value')
# Logits are unnormalized log probabilities.
self.logits = kl.Dense(num_actions, name='policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs, **kwargs):
# Inputs is a numpy array, convert to a tensor.
x = tf.convert_to_tensor(inputs)
# Separate hidden layers from the same input tensor.
hidden_logs = self.hidden1(x)
hidden_vals = self.hidden2(x)
return self.logits(hidden_logs), self.value(hidden_vals)
def action_value(self, obs):
# Executes `call()` under the hood.
logits, value = self.predict_on_batch(obs)
action = self.dist.predict_on_batch(logits)
# Another way to sample actions:
# action = tf.random.categorical(logits, 1)
# Will become clearer later why we don't use it.
return np.squeeze(action, axis=-1), np.squeeze(value, axis=-1)
class A2CAgent:
def __init__(self, model, lr=7e-3, gamma=0.99, value_c=0.5, entropy_c=1e-4):
# `gamma` is the discount factor; coefficients are used for the loss terms.
self.gamma = gamma
self.value_c = value_c
self.entropy_c = entropy_c
self.model = model
self.model.compile(
optimizer=ko.RMSprop(lr=lr),
# Define separate losses for policy logits and value estimate.
loss=[self._logits_loss, self._value_loss])
def train(self, env, batch_sz=64, updates=250):
# Storage helpers for a single batch of data.
actions = np.empty((batch_sz,), dtype=np.int32)
rewards, dones, values = np.empty((3, batch_sz))
observations = np.empty((batch_sz,) + env.observation_space.shape)
# Training loop: collect samples, send to optimizer, repeat updates times.
ep_rewards = [0.0]
next_obs = env.reset()
for update in range(updates):
for step in range(batch_sz):
observations[step] = next_obs.copy()
actions[step], values[step] = self.model.action_value(next_obs[None, :])
next_obs, rewards[step], dones[step], _ = env.step(actions[step])
ep_rewards[-1] += rewards[step]
if dones[step]:
ep_rewards.append(0.0)
next_obs = env.reset()
logging.info("Episode: %03d, Reward: %03d" % (len(ep_rewards) - 1, ep_rewards[-2]))
_, next_value = self.model.action_value(next_obs[None, :])
returns, advs = self._returns_advantages(rewards, dones, values, next_value)
# A trick to input actions and advantages through same API.
acts_and_advs = np.concatenate([actions[:, None], advs[:, None]], axis=-1)
# Performs a full training step on the collected batch.
# Note: no need to mess around with gradients, Keras API handles it.
losses = self.model.train_on_batch(observations, [acts_and_advs, returns])
logging.debug("[%d/%d] Losses: %s" % (update + 1, updates, losses))
return ep_rewards
def test(self, env, render=False):
obs, done, ep_reward = env.reset(), False, 0
while not done:
action, _ = self.model.action_value(obs[None, :])
obs, reward, done, _ = env.step(action)
ep_reward += reward
if render:
env.render()
return ep_reward
def _returns_advantages(self, rewards, dones, values, next_value):
# `next_value` is the bootstrap value estimate of the future state (critic).
returns = np.append(np.zeros_like(rewards), next_value, axis=-1)
# Returns are calculated as discounted sum of future rewards.
for t in reversed(range(rewards.shape[0])):
returns[t] = rewards[t] + self.gamma * returns[t + 1] * (1 - dones[t])
returns = returns[:-1]
# Advantages are equal to returns - baseline (value estimates in our case).
advantages = returns - values
return returns, advantages
def _value_loss(self, returns, value):
# Value loss is typically MSE between value estimates and returns.
return self.value_c * kls.mean_squared_error(returns, value)
def _logits_loss(self, actions_and_advantages, logits):
# A trick to input actions and advantages through the same API.
actions, advantages = tf.split(actions_and_advantages, 2, axis=-1)
# Sparse categorical CE loss obj that supports sample_weight arg on `call()`.
# `from_logits` argument ensures transformation into normalized probabilities.
weighted_sparse_ce = kls.SparseCategoricalCrossentropy(from_logits=True)
# Policy loss is defined by policy gradients, weighted by advantages.
# Note: we only calculate the loss on the actions we've actually taken.
actions = tf.cast(actions, tf.int32)
policy_loss = weighted_sparse_ce(actions, logits, sample_weight=advantages)
# Entropy loss can be calculated as cross-entropy over itself.
probs = tf.nn.softmax(logits)
entropy_loss = kls.categorical_crossentropy(probs, probs)
# We want to minimize policy and maximize entropy losses.
# Here signs are flipped because the optimizer minimizes.
return policy_loss - self.entropy_c * entropy_loss
if __name__ == '__main__':
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
env = gym.make('CartPole-v0')
model = Model(num_actions=env.action_space.n)
agent = A2CAgent(model, args.learning_rate)
rewards_history = agent.train(env, args.batch_size, args.num_updates)
print("Finished training. Testing...")
print("Total Episode Reward: %d out of 200" % agent.test(env, args.render_test))
if args.plot_results:
plt.style.use('seaborn')
plt.plot(np.arange(0, len(rewards_history), 10), rewards_history[::10])
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.show()
|
py | b411a3073605de4eeec5cd3ac05801e46487c3e4 | # -*- coding: utf-8 -*-
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import json
import re
import time
import datetime
import os
import sys
import message
class HitCarder(object):
"""Hit carder class
Attributes:
username: (str) 浙大统一认证平台用户名(一般为学号)
password: (str) 浙大统一认证平台密码
login_url: (str) 登录url
base_url: (str) 打卡首页url
save_url: (str) 提交打卡url
sess: (requests.Session) 统一的session
"""
def __init__(self, username, password):
self.username = username
self.password = password
self.login_url = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex"
self.base_url = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
self.save_url = "https://healthreport.zju.edu.cn/ncov/wap/default/save"
self.sess = requests.Session()
self.sess.keep_alive = False
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
self.sess.mount('http://', adapter)
self.sess.mount('https://', adapter)
# ua = UserAgent()
# self.sess.headers['User-Agent'] = ua.chrome
self.sess.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}
def login(self):
"""Login to ZJU platform"""
time.sleep(1)
res = self.sess.get(self.login_url)
execution = re.search(
'name="execution" value="(.*?)"', res.text).group(1)
time.sleep(1)
res = self.sess.get(
url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()
n, e = res['modulus'], res['exponent']
encrypt_password = self._rsa_encrypt(self.password, e, n)
data = {
'username': self.username,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit'
}
time.sleep(1)
res = self.sess.post(url=self.login_url, data=data)
# check if login successfully
if '统一身份认证' in res.content.decode():
raise LoginError('登录失败,请核实账号密码重新登录')
return self.sess
def post(self):
"""Post the hit card info."""
time.sleep(1)
res = self.sess.post(self.save_url, data=self.info)
return json.loads(res.text)
def get_date(self):
"""Get current date."""
today = datetime.datetime.utcnow() + datetime.timedelta(hours=+8)
return "%4d%02d%02d" % (today.year, today.month, today.day)
def check_form(self):
"""Get hitcard form, compare with old form """
res = self.sess.get(self.base_url)
html = res.content.decode()
try:
new_form = re.findall(r'<ul>[\s\S]*?</ul>', html)[0]
except IndexError as _:
raise RegexMatchError('Relative info not found in html with regex')
with open("form.txt", "r", encoding="utf-8") as f:
if new_form == f.read():
return True
else:
f_n = open("form_new.txt", "w", encoding="utf-8")
f_n.write(new_form)
f_n.flush()
# with open("form.txt", "w", encoding="utf-8") as f:
# f.write(new_form)
return False
def get_info(self, html=None):
"""Get hit card info, which is the old info with updated new time."""
if not html:
time.sleep(1)
res = self.sess.get(self.base_url)
html = res.content.decode()
try:
old_infos = re.findall(r'oldInfo: ({[^\n]+})', html)
if len(old_infos) != 0:
old_info = json.loads(old_infos[0])
else:
raise RegexMatchError("未发现缓存信息,请先至少手动成功打卡一次再运行脚本")
def_info = json.loads(re.findall(r'def = ({[^\n]+})', html)[0])
magic_code = re.findall(
r'"([0-9a-z]{32})": "([0-9]{10})","([0-9a-z]{32})":"([0-9a-z]{32})"', html)[0]
magic_code_group = {
magic_code[0]: magic_code[1],
magic_code[2]: magic_code[3]
}
except IndexError as err:
raise RegexMatchError(
'Relative info not found in html with regex: ' + str(err))
except json.decoder.JSONDecodeError as err:
raise DecodeError('JSON decode error: ' + str(err))
new_info = def_info.copy()
new_info.update(magic_code_group)
# form change
new_info['szgjcs'] = ""
new_info['zgfx14rfhsj'] = ""
new_info['geo_api_info'] = old_info['geo_api_info'] # 定位
new_info['address'] = old_info['address']
new_info['area'] = old_info['area']
new_info['city'] = old_info['city']
new_info['ismoved'] = 0
new_info['sfzx'] = old_info['sfzx'] # 在校
new_info['sfymqjczrj'] = old_info['sfymqjczrj'] # 入境
new_info['sfqrxxss'] = 1 # 属实
self.info = new_info
# print(json.dumps(self.info))
return new_info
def _rsa_encrypt(self, password_str, e_str, M_str):
password_bytes = bytes(password_str, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
return hex(result_int)[2:].rjust(128, '0')
# Exceptions
class LoginError(Exception):
"""Login Exception"""
pass
class RegexMatchError(Exception):
"""Regex Matching Exception"""
pass
class DecodeError(Exception):
"""JSON Decode Exception"""
pass
def main(username, password):
"""Hit card process
Arguments:
username: (str) 浙大统一认证平台用户名(一般为学号)
password: (str) 浙大统一认证平台密码
"""
hit_carder = HitCarder(username, password)
print("[Time] %s" % datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'))
print(datetime.datetime.utcnow() + datetime.timedelta(hours=+8))
print("打卡任务启动")
try:
hit_carder.login()
print('已登录到浙大统一身份认证平台')
except Exception as err:
return 1, '打卡登录失败:' + str(err)
try:
ret = hit_carder.check_form()
if not ret:
return 2, '打卡信息已改变,请手动打卡'
except Exception as err:
return 1, '获取信息失败,请手动打卡: ' + str(err)
try:
hit_carder.get_info()
except Exception as err:
return 1, '获取信息失败,请手动打卡: ' + str(err)
try:
res = hit_carder.post()
print(res)
if str(res['e']) == '0':
return 0, '打卡成功'
elif str(res['m']) == '今天已经填报了':
return 0, '今天已经打卡'
else:
return 1, '打卡失败'
except:
return 1, '打卡数据提交失败'
if __name__ == "__main__":
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
ret, msg = main(username, password)
print(ret, msg)
if ret == 1:
time.sleep(5)
ret, msg = main(username, password)
print(ret, msg)
dingtalk_token = os.environ.get('DINGTALK_TOKEN')
if dingtalk_token:
ret = message.dingtalk(msg, dingtalk_token)
print('send_dingtalk_message', ret)
serverchan_key = os.environ.get('SERVERCHAN_KEY')
if serverchan_key:
ret = message.serverchan(msg, '', serverchan_key)
print('send_serverChan_message', ret)
pushplus_token = os.environ.get('PUSHPLUS_TOKEN')
if pushplus_token:
print('pushplus服务已下线,建议使用钉钉')
exit(-1)
|
py | b411a3a8749da4c20d0ff70ebe547b3dcfc3c4b1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/2 15:36
# @Author : Ropon
# @File : ftp.py
import os
from api.utils.tools import execShell
class PanelFtp(object):
def __init__(self):
self._ftp_bin = "/usr/local/ftp/bin"
def check(self):
if not os.path.exists(self._ftp_bin):
return "还没安装ftp服务,请先安装"
if int(execShell("ps aux|grep pure-ftpd|grep -v grep|wc -l")) == 0:
return "还没启动ftp服务,请先启动"
def checkpathisexists(self, path):
if not os.path.exists(path):
execShell(f"mkdir -p {path}")
execShell(f"chown -R www.www {path}")
execShell(f"chmod -R 744 {path}")
def reloadftp(self):
execShell(f"{self._ftp_bin}/pure-pw mkdb /usr/local/ftp/etc/pureftpd.pdb")
def create_ftp(self, ftp_user=None, ftp_passwd=None, ftp_path=None):
self.checkpathisexists(ftp_path)
execShell(
f"{self._ftp_bin}/pure-pw useradd {ftp_user} -u www -d {ftp_path} -m <<EOF \n{ftp_passwd}\n{ftp_passwd}\nEOF")
self.reloadftp()
def delete_ftp(self, ftp_user=None, ftp_path=None):
execShell(f"{self._ftp_bin}/pure-pw userdel {ftp_user}")
if os.path.exists(ftp_path):
# pass
# 删除FTP根目录
execShell(f"/usr/bin/rm -rf {ftp_path}")
self.reloadftp()
def update_ftp(self, status=None, ftp_user=None, ftp_passwd=None):
# 修改状态
if status is not None:
if status == 0:
execShell(f"{self._ftp_bin}/pure-pw usermod {ftp_user} -r 1")
else:
execShell(f"{self._ftp_bin}/pure-pw usermod {ftp_user} -r ''")
# 修改密码
if ftp_passwd is not None:
execShell(f"{self._ftp_bin}/pure-pw passwd {ftp_user} <<EOF \n{ftp_passwd}\n{ftp_passwd}\nEOF")
self.reloadftp()
|
py | b411a481959badcbc48c9ca712f3ca0550f0af29 | class Solution:
def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:
from collections import deque, defaultdict
graph = defaultdict(list)
for a, b in dislikes:
graph[a-1].append(b-1)
graph[b-1].append(a-1)
queue = deque()
color = {}
for i in range(N):
if not i in color:
color[i] = 0
queue.append(i)
while queue:
j = queue.popleft()
for nb in graph[j]:
if nb in color:
if color[nb] == color[j]:
return False
else:
color[nb] = 1 - color[j]
queue.append(nb)
return True |
py | b411a496b7c0636b902db7638853733c678ade99 | import random
import numpy as np
from datasets import shape
import model as modellib
from visualize import display_dataset, display_instances
from shape import InferenceConfig
from utils import log
if __name__ == '__main__':
tr_ds = shape.load_data(count=500)
val_ds = shape.load_data(count=50)
# Visualize dataset
# display_dataset(tr_ds, n=4)
# Test on a random image
image_id = random.choice(val_ds.image_ids)
inference_config = InferenceConfig()
original_image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(val_ds, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
tr_ds.class_names, figsize=(8, 8)) |
py | b411a6ec3086e91ca582246df71c3dd3de10826d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytz
import json
import requests
from datetime import datetime
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from account.models import Account, Transaction, Game
from account.forms import PaymentForm
def json_bad_request(error):
out = {'success': 'false'}
out.update({'error': error})
return HttpResponseBadRequest(json.dumps(out))
def get_other_accounts(usr):
return [
{
'id': acct.id,
'name': acct.name,
'email': acct.email
} for acct in Account.objects.exclude(user=usr)]
def get_profile(acct):
return {
'id': acct.id,
'name': acct.name,
'phone': acct.phone,
'email': acct.email,
'balance': '{}'.format(acct.balance),
'regdate': acct.registered_on.strftime('%d %b %Y'),
}
@login_required
def profile(request):
try:
acct = get_object_or_404(Account, user=request.user)
except Account.DoesNotExist:
return json_bad_request('Invalid profile')
else:
profile = get_profile(acct)
accounts = get_other_accounts(request.user)
transactions = get_transactions(acct)
games = get_games()
return JsonResponse(
{
'profile': profile,
'accounts': accounts,
'transactions': transactions,
'games': games
})
def get_transactions(acct):
return [
{
'id': trans.id,
'kind': trans.get_kind_display(),
'amount': '{}'.format(trans.amount),
'balance': '{}'.format(trans.balance),
'comment': trans.comment,
'processed': trans.processed,
'when': trans.trans_date.strftime('%d %b %Y %H:%M %p')
} for trans in Transaction.objects.filter(account=acct).order_by('-id')
]
@csrf_exempt
@login_required
def pay(request):
#import pdb;pdb.set_trace()
try:
sender = get_object_or_404(Account, user=request.user)
except Account.DoesNotExist:
return json_bad_request('Invalid transaction')
else:
if request.method == 'POST':
form = PaymentForm(sender.balance, request.POST)
if form.is_valid():
amount = form.cleaned_data['amount']
recipient = form.cleaned_data['recipient']
sender_balance = sender.balance - amount
recipient_balance = recipient.balance + amount
Transaction.objects.create(
account=sender,
kind=Transaction.DEBIT,
amount=amount,
balance=sender_balance,
comment='Sent to {}'.format(recipient.name),
processed=True)
Transaction.objects.create(
account=recipient,
kind=Transaction.CREDIT,
amount=amount,
balance=recipient_balance,
comment='Received from {}'.format(sender.name),
processed=True)
sender.balance = sender_balance
sender.save()
recipient.balance = recipient_balance
recipient.save()
transactions = get_transactions(sender)
profile = get_profile(sender)
return JsonResponse(
{'profile': profile, 'transactions': transactions})
else: # Invalid form
pass
return json_bad_request('Invalid transaction body')
@csrf_exempt
def save_game(request):
white = request.POST.get('white')
black = request.POST.get('black')
game = Game.objects.create(white=white, black=black)
games = get_games()
return JsonResponse({'game': unicode(game), 'games': games})
def get_games():
return [
{
'id': g.id,
'white': g.white,
'black': g.black,
'white_score': g.white_score,
'black_score': g.black_score,
'when': g.game_date.strftime('%d %b %Y %H:%M')
}
for g in Game.objects.all()]
def get_result(request, id):
#import pdb;pdb.set_trace()
game = Game.objects.get(pk=id)
url = 'https://lichess.org/api/games/vs/{}/{}'.format(
game.white, game.black)
res = requests.get(url)
data = json.loads(res.content)['currentPageResults']
#fltd = [i for i in data if i['players']['white']['userId'] == game.white
# and pytz.timezone('UTC').localize(
# datetime.fromtimestamp(i['createdAt']/1000),
# is_dst=None) >= game.game_date]
fltd = [i for i in data if i['players']['white']['userId'] == game.white]
if fltd:
last_game = fltd[0]
if last_game['status'] == 'draw':
result = {'white': 0.5, 'black': 0.5}
elif last_game['winner'] == 'white':
result = {'white': 1, 'black': 0}
else:
result = {'white': 0, 'black': 1}
else:
result = {'white': None, 'black': None}
return JsonResponse(result)
|
py | b411a7150ff4f5a88384cc88380b68d2e8fcef39 | #
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Algorithms for track objects."""
import copy
from .. import (
schema,
exceptions,
opentime,
)
def track_trimmed_to_range(in_track, trim_range):
"""Returns a new track that is a copy of the in_track, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the track is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for."""
new_track = copy.deepcopy(in_track)
track_map = new_track.range_of_all_children()
# iterate backwards so we can delete items
for c, child in reversed(list(enumerate(new_track))):
child_range = track_map[child]
if not trim_range.intersects(child_range):
# completely outside the trim range, so we discard it
del new_track[c]
elif trim_range.contains(child_range):
# completely contained, keep the whole thing
pass
else:
if isinstance(child, schema.Transition):
raise exceptions.CannotTrimTransitionsError(
"Cannot trim in the middle of a Transition."
)
# we need to clip the end(s)
child_source_range = child.trimmed_range()
# should we trim the start?
if trim_range.start_time > child_range.start_time:
trim_amount = trim_range.start_time - child_range.start_time
child_source_range = opentime.TimeRange(
start_time=child_source_range.start_time + trim_amount,
duration=child_source_range.duration - trim_amount
)
# should we trim the end?
trim_end = trim_range.end_time_exclusive()
child_end = child_range.end_time_exclusive()
if trim_end < child_end:
trim_amount = child_end - trim_end
child_source_range = opentime.TimeRange(
start_time=child_source_range.start_time,
duration=child_source_range.duration - trim_amount
)
# set the new child's trims
child.source_range = child_source_range
return new_track
def track_with_expanded_transitions(in_track):
"""Expands transitions such that neighboring clips are trimmed into
regions of overlap.
For example, if your track is:
Clip1, T, Clip2
will return:
Clip1', (Clip1_t, T, Clip2_t), Clip2'
Where Clip1' is the part of Clip1 not in the transition, Clip1_t is the
part inside the transition and so on. Please note that the items used in
a transition are encapsulated in `tuple`s
"""
result_track = []
seq_iter = iter(in_track)
prev_thing = None
thing = next(seq_iter, None)
next_thing = next(seq_iter, None)
while thing is not None:
if isinstance(thing, schema.Transition):
result_track.append(_expand_transition(thing, in_track))
else:
# not a transition, but might be trimmed by one before or after
# in the track
pre_transition = None
next_transition = None
if isinstance(prev_thing, schema.Transition):
pre_transition = prev_thing
if isinstance(next_thing, schema.Transition):
next_transition = next_thing
result_track.append(
_trim_from_transitions(
thing,
pre=pre_transition,
post=next_transition
)
)
# loop
prev_thing = thing
thing = next_thing
next_thing = next(seq_iter, None)
return result_track
def _expand_transition(target_transition, from_track):
""" Expand transitions into the portions of pre-and-post clips that
overlap with the transition.
"""
result = from_track.neighbors_of(
target_transition,
schema.NeighborGapPolicy.around_transitions
)
trx_duration = target_transition.in_offset + target_transition.out_offset
# make copies of the before and after, and modify their in/out points
pre = copy.deepcopy(result[0])
if isinstance(pre, schema.Transition):
raise exceptions.TransitionFollowingATransitionError(
"cannot put two transitions next to each other in a track: "
"{}, {}".format(
pre,
target_transition
)
)
if target_transition.in_offset is None:
raise RuntimeError(
"in_offset is None on: {}".format(target_transition)
)
if target_transition.out_offset is None:
raise RuntimeError(
"out_offset is None on: {}".format(target_transition)
)
pre.name = (pre.name or "") + "_transition_pre"
# ensure that pre.source_range is set, because it will get manipulated
tr = pre.trimmed_range()
pre.source_range = opentime.TimeRange(
start_time=(
tr.end_time_exclusive() - target_transition.in_offset
),
duration=trx_duration.rescaled_to(
tr.start_time
)
)
post = copy.deepcopy(result[1])
if isinstance(post, schema.Transition):
raise exceptions.TransitionFollowingATransitionError(
"cannot put two transitions next to each other in a track: "
"{}, {}".format(
target_transition,
post
)
)
post.name = (post.name or "") + "_transition_post"
# ensure that post.source_range is set, because it will get manipulated
tr = post.trimmed_range()
post.source_range = opentime.TimeRange(
start_time=(
tr.start_time - target_transition.in_offset
).rescaled_to(tr.start_time),
duration=trx_duration.rescaled_to(tr.start_time)
)
return pre, target_transition, post
def _trim_from_transitions(thing, pre=None, post=None):
""" Trim clips next to transitions. """
result = copy.deepcopy(thing)
# We might not have a source_range yet,
# We can trim to the computed trimmed_range to
# ensure we have something.
new_range = result.trimmed_range()
start_time = new_range.start_time
duration = new_range.duration
if pre:
start_time += pre.out_offset
duration -= pre.out_offset
if post:
duration -= post.in_offset
result.source_range = opentime.TimeRange(start_time, duration)
return result
|
py | b411a7cd8f8f8f8e22e48405d64961466b0a66e3 | from import_export import resources
from .models import DPP_Intervention,Haor
class DPP_Intervention_Resouces(resources.ModelResource):
class Meta:
model=DPP_Intervention |
py | b411a87f0e9bf3c82c4886395d82a00eae504c4a | import os
import re
import random
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import pandas as pd
from datetime import date, timedelta
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
import streamlit as st
pairs = ['AUDCAD', 'AUDCHF', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CAD', 'CADCHF',
'CADJPY', 'CHF', 'CHFJPY', 'EURAUD', 'EURCAD', 'EURCHF', 'EURGBP',
'EURJPY', 'EURNZD', 'EURUSD', 'GBPAUD', 'GBPCAD', 'GBPCHF', 'GBPJPY',
'GBPNZD', 'GBPUSD', 'JPY', 'NZDCAD', 'NZDCHF', 'NZDJPY', 'NZDUSD']
def get_data(pair):
''' Retrieves (from a github repo) and prepares the data.
'''
url = f'https://raw.githubusercontent.com/African-Quant/WQU_MScFE_Capstone_Grp9/master/Datasets/{pair}%3DX.csv'
raw = pd.read_csv(url)
raw = pd.DataFrame(raw).drop(['Adj Close', 'Volume'], axis=1)
raw.iloc[:,0] = pd.to_datetime(raw.iloc[:,0])
raw.set_index('Date', inplace=True)
return raw
# ATR
def eATR(df1,n=14):
"""This calculates the exponential Average True Range of of a dataframe of the open,
high, low, and close data of an instrument"""
df = df1[['Open', 'High', 'Low', 'Close']].copy()
# True Range
df['TR'] = 0
for i in range(len(df)):
try:
df.iloc[i, 4] = max(df.iat[i,1] - df.iat[i,2],
abs(df.iat[i,1] - df.iat[i-1,3]),
abs(df.iat[i,2] - df.iat[i-1,3]))
except ValueError:
pass
# eATR
df['eATR'] = df['TR'].ewm(span=n, adjust=False).mean()
return df['eATR']
def ssl(df1):
"""This function adds the ssl indicator as features to a dataframe
"""
df = df1.copy()
df['smaHigh'] = df['High'].rolling(window=10).mean()
df['smaLow'] = df['Low'].rolling(window=10).mean()
df['hlv'] = 0
df['hlv'] = np.where(df['Close'] > df['smaHigh'],1,np.where(df['Close'] < df['smaLow'],-1,df['hlv'].shift(1)))
df['sslDown'] = np.where(df['hlv'] < 0, df['smaHigh'], df['smaLow'])
df['sslUp'] = np.where(df['hlv'] < 0, df['smaLow'], df['smaHigh'])
df['sslPosition'] = np.where(df['Close'] > df['sslUp'], 1,
np.where(df['Close'] < df['sslDown'], -1, 0))
return df[['sslDown', 'sslUp', 'sslPosition']]
# Waddah Attar
def WAE(df1):
"""This function creates adds the indicator Waddah Attar features to a dataframe
"""
df = df1.copy()
# EMA
long_ema = df.loc[:,'Close'].ewm(span=40, adjust=False).mean()
short_ema = df.loc[:,'Close'].ewm(span=20, adjust=False).mean()
# MACD
MACD = short_ema - long_ema
# bBands
sma20 = df.loc[:,'Close'].rolling(window=20).mean() # 20 SMA
stddev = df.loc[:,'Close'].rolling(window=20).std() # 20 STDdev
lower_band = sma20 - (2 * stddev)
upper_band = sma20 + (2 * stddev)
#Waddah Attar
t1 = (MACD - MACD.shift(1))* 150
#t2 = MACD.shift(2) - MACD.shift(3)
df['e1'] = upper_band - lower_band
df['e2'] = -1 *df['e1']
#e2 = upper_band.shift(1) - lower_band.shift(1)
df['trendUp'] = np.where(t1 > 0, t1, 0)
df['trendDown'] = np.where(t1 < 0, t1, 0)
df['waePosition'] = np.where(df['trendUp'] > 0, 1,
np.where(df['trendDown'] < 0, -1, 0))
return df[['e1','e2','trendUp', 'trendDown', 'waePosition']]
def lag_feat(data1):
"""This function adds lag returns as features to a dataframe
"""
data = data1.copy()
lags = 8
cols = []
for lag in range(1, lags + 1):
col = f'lag_{lag}'
data[col] = data['ret'].shift(lag)
cols.append(col)
return data[cols]
def datepart_feat(df0, colname = 'Date'):
"""This function adds some common pandas date parts like 'year',
'month' etc as features to a dataframe
"""
df = df0.copy()
df.reset_index(inplace=True)
df1 = df.loc[:,colname]
nu_feats = ['Day', 'Dayofweek', 'Dayofyear']
targ_pre = re.sub('[Dd]ate$', '', colname)
for n in nu_feats:
df[targ_pre+n] = getattr(df1.dt,n.lower())
df[targ_pre+'week'] = df1.dt.isocalendar().week
df['week'] = np.int64(df['week'])
df[targ_pre+'Elapsed'] = df1.astype(np.int64) // 10**9
nu_feats.extend(['week', 'Elapsed'])
df.set_index(colname, inplace=True)
return df[nu_feats]
def gen_feat(pair):
df0 = get_data(pair)
df0['ret'] = df0['Close'].pct_change()
df0['dir'] = np.sign(df0['ret'])
eATR_ = eATR(df0).shift(1)
wae = WAE(df0).shift(1)
ssl1 = ssl(df0).shift(1)
datepart = datepart_feat(df0)
lags = lag_feat(df0)
return pd.concat([df0, eATR_, wae, ssl1, datepart, lags], axis=1).dropna()
# random forest
def rfc(xs, y, n_estimators=40, max_samples=100,
max_features=0.5, min_samples_leaf=5, **kwargs):
return RandomForestClassifier(n_jobs=-1, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features,
min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
def rfc_deploy():
"""This function trains a Random Forest classifier and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', 'Low', 'Close'], axis=1, inplace=True)
# selecting the features to train on
cols = list(dataset.columns)
feats = cols[2:]
#splitting into training, validation and test sets
df_train = dataset.iloc[:-100,:]
train = df_train.copy()
df_test = dataset.iloc[-100:,:]
test = df_test.copy()
train_f = train.iloc[:-100,:]
valid = train.iloc[-100:,:]
#training the algorithm
m = rfc(train_f[feats], train_f['dir'])
# test sets
test_pred = m.predict(test[feats])
test_proba = m.predict_proba(test[feats])
df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index)
proba_short = []
proba_long = []
for x in range(len(test_proba)):
proba_short.append(test_proba[x][0])
proba_long.append(test_proba[x][-1])
proba = {'proba_short': proba_short,
'proba_long': proba_long}
df2 = pd.DataFrame(proba, index=test.index)
df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'],
np.where(df1['prediction'] == -1, df2['proba_short'], 0))
df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long',
np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside'))
df1.reset_index(inplace=True)
df1['pair'] = pair
df1.set_index('pair', inplace=True)
entry_sig = df1[['probability', 'signal']].iloc[-1:]
# Merge
df = pd.concat([df, entry_sig], axis=0)
#output
return df
# Light GBM
def lgb(xs, y, learning_rate=0.15, boosting_type='gbdt',
objective='binary', n_estimators=50,
metric=['auc', 'binary_logloss'],
num_leaves=100, max_depth= 1,
**kwargs):
return LGBMClassifier().fit(xs, y)
def lgb_deploy():
"""This function trains a Light Gradient Boosting Method and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', 'Low', 'Close'], axis=1, inplace=True)
# selecting the features to train on
cols = list(dataset.columns)
feats = cols[2:]
#splitting into training, validation and test sets
df_train = dataset.iloc[:-1000,:]
train = df_train.copy()
df_test = dataset.iloc[-1000:,:]
test = df_test.copy()
train_f = train.iloc[:-1000,:]
valid = train.iloc[-1000:,:]
#training the algorithm
m = lgb(train_f[feats], train_f['dir']);
# test sets
test_pred = m.predict(test[feats])
test_proba = m.predict_proba(test[feats])
df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index)
proba_short = []
proba_long = []
for x in range(len(test_proba)):
proba_short.append(test_proba[x][0])
proba_long.append(test_proba[x][-1])
proba = {'proba_short': proba_short,
'proba_long': proba_long}
df2 = pd.DataFrame(proba, index=test.index)
df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'],
np.where(df1['prediction'] == -1, df2['proba_short'], 0))
df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long',
np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside'))
df1.reset_index(inplace=True)
df1['pair'] = pair
df1.set_index('pair', inplace=True)
entry_sig = df1[['probability', 'signal']].iloc[-1:]
# Merge
df = pd.concat([df, entry_sig], axis=0)
#output
return df
# eXtreme Gradient Boosting
def xgb(xs, y):
return XGBClassifier().fit(xs, y)
def xgb_deploy():
"""This function trains a eXtreme Gradient Boosting Method and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', 'Low', 'Close'], axis=1, inplace=True)
# selecting the features to train on
cols = list(dataset.columns)
feats = cols[2:]
#splitting into training, validation and test sets
df_train = dataset.iloc[:-1000,:]
train = df_train.copy()
df_test = dataset.iloc[-1000:,:]
test = df_test.copy()
train_f = train.iloc[:-1000,:]
valid = train.iloc[-1000:,:]
#training the algorithm
m = xgb(train_f[feats], train_f['dir']);
# test sets
test_pred = m.predict(test[feats])
test_proba = m.predict_proba(test[feats])
df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index)
proba_short = []
proba_long = []
for x in range(len(test_proba)):
proba_short.append(test_proba[x][0])
proba_long.append(test_proba[x][-1])
proba = {'proba_short': proba_short,
'proba_long': proba_long}
df2 = pd.DataFrame(proba, index=test.index)
df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'],
np.where(df1['prediction'] == -1, df2['proba_short'], 0))
df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long',
np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside'))
df1.reset_index(inplace=True)
df1['pair'] = pair
df1.set_index('pair', inplace=True)
entry_sig = df1[['probability', 'signal']].iloc[-1:]
# Merge
df = pd.concat([df, entry_sig], axis=0)
#output
return df
df = pd.concat([xgb_deploy(), lgb_deploy()], axis=1)
st.subheader(f"Today's Signals: {date.today()}")
st.write(df) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.