filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_4539 | from .output import Output
import gevent
import gevent.monkey
gevent.monkey.patch_socket()
import urllib2
import json
class Elasticsearch(Output):
"""Outputs to an elasticsearch index.
:param string host: elasticsearch host
:param integer port: elasticsearch port
:param string index: (required) elasticsearch index. This can be formatted by fields in the event.
:param string type: (required) elasticsearch type. This can be formatted by fields in the event.
Example configuration for kibana::
Mutate(rename={'@timestamp': 'timestamp', '@message': 'message'})
Elasticsearch(index='logstash-{@timestamp:%Y.%m.%d}', type='event')
"""
RETRIES = 10
def __init__(self, index, type, host='localhost', port=9200):
super(Elasticsearch, self).__init__()
self.host = host
self.port = port
self.index = index
self.type = type
def process(self, event):
data = event.to_json()
index = event.format(self.index)
itype = event.format(self.type)
if not index:
raise ValueError("index is empty")
if not itype:
raise ValueError("type is empty")
url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype)
success = False
delay = 1.0
for retry in xrange(self.RETRIES):
try:
res = urllib2.urlopen(url, data=data)
# 200 response indicates all is well
success = True
result = json.load(res)
break
except urllib2.HTTPError as ex:
if ex.getcode() == 400:
# Bad Request - do not retry
self.logger.error("Bad request: %s, not retrying" % (ex,))
break
else:
delay *= 2.0
self.logger.warn('Unable to index: %s, retrying in %ds' % (ex, delay))
gevent.sleep(delay)
except urllib2.URLError as ex:
delay *= 2.0
self.logger.warn('Unable to index: %s, retrying in %ds' % (ex, delay))
gevent.sleep(delay)
if success:
self.logger.debug('Indexed to elasticsearch: index:%s type:%s id:%s' % (index, itype, result['_id']))
|
the-stack_0_4540 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Rhombus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
from test_framework.test_rhombus import RhombusTestFramework, isclose, connect_nodes_bi
class ExtKeyTest(RhombusTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-reservebalance=10000000'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_all()
def run_test(self):
node = self.nodes[0]
node1 = self.nodes[1]
ro = node.extkeyimportmaster('abandon baby cabbage dad eager fabric gadget habit ice kangaroo lab absorb')
assert(ro['account_id'] == 'aaaZf2qnNr5T7PWRmqgmusuu5ACnBcX2ev')
assert(node.getwalletinfo()['total_balance'] == 100000)
# Start staking
node.walletsettings('stakelimit', {'height':1})
node.reservebalance(False)
assert(self.wait_for_height(node, 1))
# stop staking
node.reservebalance(True, 10000000)
node1.reservebalance(True, 10000000)
ro = node1.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate')
assert(ro['account_id'] == 'ahL1QdHhzNCtZWJzv36ScfPipJP1cUzAD8')
extAddrTo = node1.getnewextaddress('test label')
assert(extAddrTo == 'pparszNYZ1cpWxnNieFqHCV2rtXmG74a4WAXHHhXaRATzzU6kMixjy1rXDM1UM4LVgkXRpLNM1rQNvkgLf7kUeMXiyaBMK8aSR3td4b4cX4epnHF')
ro = node1.filteraddresses()
assert(len(ro) == 1)
assert(ro[0]['label'] == 'test label')
ro = node1.getaddressinfo(extAddrTo)
assert(ro['ismine'] == True)
assert(ro['isextkey'] == True)
ro = node1.dumpprivkey(extAddrTo)
assert(ro == 'xparFnnG7xJkEekTjWGumcEY1BKgryY4txW5Ce56KQPBJG7u3cNsUHxGgjVwHGEaxUGDAjT4SXv7fkWkp4TFaFHjaoZVh8Zricnwz3DjAxtqtmi')
txnHash = node.sendtoaddress(extAddrTo, 10)
ro = node.getmempoolentry(txnHash)
assert(ro['height'] == 1)
# start staking
node.walletsettings('stakelimit', {'height':2})
node.reservebalance(False)
assert(self.wait_for_height(node, 2))
# stop staking
ro = node.reservebalance(True, 10000000)
ro = node1.listtransactions()
assert(len(ro) == 1)
assert(ro[0]['address'] == 'pkGv5xgviEAEjwpRPeEt8c9cvraw2umKYo')
assert(ro[0]['amount'] == 10)
ro = node1.getwalletinfo()
assert(ro['total_balance'] == 10)
block2_hash = node.getblockhash(2)
ro = node.getblock(block2_hash)
assert(txnHash in ro['tx'])
txnHash2 = node.sendtoaddress(extAddrTo, 20, '', '', False, 'narration test')
assert(self.wait_for_mempool(node1, txnHash2))
ro = node1.listtransactions()
assert(len(ro) == 2)
assert(ro[1]['address'] == 'pbo5e7tsLJBdUcCWteTTkGBxjW8Xy12o1V')
assert(ro[1]['amount'] == 20)
assert('narration test' in ro[1].values())
ro = node.listtransactions()
assert('narration test' in ro[-1].values())
extAddrTo0 = node.getnewextaddress()
txnHashes = []
for k in range(24):
v = round(0.01 * float(k+1), 5)
node1.syncwithvalidationinterfacequeue()
txnHash = node1.sendtoaddress(extAddrTo0, v, '', '', False)
txnHashes.append(txnHash)
for txnHash in txnHashes:
assert(self.wait_for_mempool(node, txnHash))
ro = node.listtransactions('*', 24)
assert(len(ro) == 24)
assert[isclose(ro[0]['amount'], 0.01)]
assert[isclose(ro[23]['amount'], 0.24)]
assert[ro[23]['address'] == 'pm23xKs3gy6AhZZ7JZe61Rn1m8VB83P49d']
# start staking
node.walletsettings('stakelimit', {'height':3})
node.reservebalance(False)
assert(self.wait_for_height(node, 3))
block3_hash = node.getblockhash(3)
ro = node.getblock(block3_hash)
for txnHash in txnHashes:
assert(txnHash in ro['tx'])
# Test bech32 encoding
ek_b32 = 'tpep1q3ehtcetqqqqqqesj04mypkmhnly5rktqmcpmjuq09lyevcsjxrgra6x8trd52vp2vpsk6kf86v3npg6x66ymrn5yrqnclxtqrlfdlw3j4f0309dhxct8kc68paxt'
assert(node.getnewextaddress('lbl_b32', '', True) == ek_b32)
assert(ek_b32 in json.dumps(node.filteraddresses()))
if __name__ == '__main__':
ExtKeyTest().main()
|
the-stack_0_4541 | #!/usr/bin/env python
"""
lgc/main.py
Note to program performers:
- parallel_pr_nibble produces the same results as ligra's `apps/localAlg/ACL-Sync-Local-Opt.C`
- ista produces the same results as LocalGraphClustering's `ista_dinput_dense` method
"""
import os
import sys
import argparse
import numpy as np
from time import time
from tqdm import tqdm
from scipy import sparse
from scipy.io import mmread
from scipy.stats import spearmanr
# --
# Parallel PR-Nibble
def parallel_pr_nibble(seeds, degrees, num_nodes, adj_indices, adj_indptr, alpha, epsilon):
out = []
for seed in tqdm(seeds):
p = np.zeros(num_nodes)
r = np.zeros(num_nodes)
r[seed] = 1
frontier = np.array([seed])
while True:
if len(frontier) == 0:
break
r_prime = r.copy()
for node_idx in frontier:
p[node_idx] += (2 * alpha) / (1 + alpha) * r[node_idx]
r_prime[node_idx] = 0
for src_idx in frontier:
neighbors = adj_indices[adj_indptr[src_idx]:adj_indptr[src_idx + 1]]
for dst_idx in neighbors:
update = ((1 - alpha) / (1 + alpha)) * r[src_idx] / degrees[src_idx]
r_prime[dst_idx] += update
r = r_prime
frontier = np.where((r >= degrees * epsilon) & (degrees > 0))[0]
out.append(p)
return np.column_stack(out)
# --
# Run
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-seeds', type=int, default=50)
parser.add_argument('--alpha', type=float, default=0.15)
parser.add_argument('--pnib-epsilon', type=float, default=1e-6)
args = parser.parse_args()
# !! In order to check accuracy, you _must_ use these parameters !!
assert args.num_seeds == 50
assert args.alpha == 0.15
assert args.pnib_epsilon == 1e-6
return args
args = parse_args()
adj = mmread('data/jhu.mtx').tocsr()
degrees = np.asarray(adj.sum(axis=-1)).squeeze().astype(int)
num_nodes = adj.shape[0]
adj_indices = adj.indices
adj_indptr = adj.indptr
pnib_seeds = np.array(range(args.num_seeds))
alpha = args.alpha
pnib_epsilon = args.pnib_epsilon
t = time()
pnib_scores = parallel_pr_nibble(pnib_seeds, degrees, num_nodes, adj_indices, adj_indptr, alpha=alpha, epsilon=pnib_epsilon)
t2 = time()
assert pnib_scores.shape[0] == adj.shape[0]
assert pnib_scores.shape[1] == len(pnib_seeds)
pnib_elapsed = time() - t
print("[Nibble Elapsed Time]: ", (t2 - t))
os.makedirs('results', exist_ok=True)
np.savetxt('results/pnib_score.txt', pnib_scores)
open('results/pnib_elapsed', 'w').write(str(pnib_elapsed))
|
the-stack_0_4543 | # Ex. 095 +=
# jogador = {'Nome': 'Joelson', 'Gols': [2, 1, 0, 0, 3], 'Total': 6}
jogador = {}
jogadores, gols = [], []
posGol = 0
Blue, Normal, Color, Red = "\033[34m", "\033[m", "\033[36m", "\033[31m"
while True:
jogador['Nome'] = str(input("Nome do jogador: ")).capitalize()
partidas = int(input(f"Quantas partidas {jogador['Nome']} jogou? "))
for i in range(1, partidas + 1):
gol = int(input(f" Quantos gols na {i}ª partida? "))
gols.append(gol)
jogador['Gols'] = gols[:] # Cópia de 'gols'
jogador['Total'] = sum(gols) # sum() faz a soma de algo iterável. No caso, uma lista
jogadores.append(jogador.copy()) # Passa uma cópia do jogador atual para a lista de jogadores
gols.clear() # Zera a lista de gols
resp = str(input("Deseja continuar [s/n]? ")).lower()
if "s" in resp:
print()
else:
break
print("-" * 50)
# Placar geral
print(f"{Blue}Nº Nome Gols Total{Normal}")
# jogadores = [{'Nome': 'Joel', 'Gols': [0, 0, 3], 'Total': 3}, {'Nome': 'Tody', 'Gols': [2, 2, 0], 'Total': 4}]
for i in range(len(jogadores)):
print(f"{i:<2} {jogadores[i]['Nome']:<13} {str(jogadores[i]['Gols']):<15} {jogadores[i]['Total']:<5}")
print("-" * 50)
while True:
quem = int(input("Deseja ver o placar de quem [-1 para sair]? "))
if quem == -1: # Sair
break
elif quem > len(jogadores) or quem < 0: # Inválido
print(f"{Red}Jogador não existe, tente novamente!{Normal}")
else: # Válido
print(f"{Color} -- Levantamento do jogador {jogadores[quem]['Nome']} --{Normal}")
for pos, numGols in enumerate(jogadores[quem]['Gols']): # Tamanho da lista de gols
print(f" => Na partida {pos}, fez {numGols} gol(s).")
print("-" * 50)
|
the-stack_0_4544 | import time
import logging
import pytest
import operator
import numpy as np
from copy import copy
from ophyd.epics_motor import EpicsMotor
from ophyd.pseudopos import PseudoPositioner, PseudoSingle
from ophyd import Component as C
from ophyd.utils import ExceptionBundle
logger = logging.getLogger(__name__)
def setUpModule():
logging.getLogger("ophyd.pseudopos").setLevel(logging.DEBUG)
def tearDownModule():
logger.debug("Cleaning up")
logging.getLogger("ophyd.pseudopos").setLevel(logging.INFO)
motor_recs = [
"XF:31IDA-OP{Tbl-Ax:X1}Mtr",
"XF:31IDA-OP{Tbl-Ax:X2}Mtr",
"XF:31IDA-OP{Tbl-Ax:X3}Mtr",
"XF:31IDA-OP{Tbl-Ax:X4}Mtr",
"XF:31IDA-OP{Tbl-Ax:X5}Mtr",
"XF:31IDA-OP{Tbl-Ax:X6}Mtr",
]
class Pseudo3x3(PseudoPositioner):
pseudo1 = C(PseudoSingle, "", limits=(-10, 10), egu="a")
pseudo2 = C(PseudoSingle, "", limits=(-10, 10), egu="b")
pseudo3 = C(PseudoSingle, "", limits=None, egu="c")
real1 = C(EpicsMotor, motor_recs[0])
real2 = C(EpicsMotor, motor_recs[1])
real3 = C(EpicsMotor, motor_recs[2])
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(
real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo2,
real3=-pseudo_pos.pseudo3,
)
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(
pseudo1=real_pos.real1, pseudo2=real_pos.real2, pseudo3=real_pos.real3
)
class Pseudo1x3(PseudoPositioner):
pseudo1 = C(PseudoSingle, limits=(-10, 10))
real1 = C(EpicsMotor, motor_recs[0])
real2 = C(EpicsMotor, motor_recs[1])
real3 = C(EpicsMotor, motor_recs[2])
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(
real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo1,
real3=-pseudo_pos.pseudo1,
)
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1)
class FaultyStopperEpicsMotor(EpicsMotor):
def stop(self, *, success=False):
raise RuntimeError("Expected exception")
class FaultyPseudo1x3(Pseudo1x3):
real1 = C(FaultyStopperEpicsMotor, motor_recs[0])
def test_onlypseudo():
# can't instantiate it on its own
with pytest.raises(TypeError):
PseudoPositioner("prefix")
def test_position_wrapper():
pseudo = Pseudo3x3("", name="mypseudo", concurrent=False)
test_pos = pseudo.PseudoPosition(pseudo1=1, pseudo2=2, pseudo3=3)
extra_kw = dict(a=3, b=4, c=6)
# positional arguments
assert pseudo.to_pseudo_tuple(1, 2, 3, **extra_kw) == (test_pos, extra_kw)
# sequence
assert pseudo.to_pseudo_tuple((1, 2, 3), **extra_kw) == (test_pos, extra_kw)
# correct type
assert pseudo.to_pseudo_tuple(test_pos, **extra_kw) == (test_pos, extra_kw)
# kwargs
assert pseudo.to_pseudo_tuple(pseudo1=1, pseudo2=2, pseudo3=3, **extra_kw) == (
test_pos,
extra_kw,
)
# too many positional arguments
with pytest.raises(ValueError):
pseudo.to_pseudo_tuple(1, 2, 3, 4)
# valid kwargs, but passing in args too
with pytest.raises(ValueError):
pseudo.to_pseudo_tuple(1, pseudo1=1, pseudo2=2, pseudo3=3)
@pytest.mark.motorsim
def test_multi_sequential():
pseudo = Pseudo3x3("", name="mypseudo", concurrent=False)
pseudo.wait_for_connection()
assert pseudo.egu == "a, b, c"
pos2 = pseudo.PseudoPosition(pseudo1=0, pseudo2=0, pseudo3=0)
pseudo.set(pos2, wait=True)
time.sleep(1.0)
pos1 = pseudo.PseudoPosition(pseudo1=0.1, pseudo2=0.2, pseudo3=0.3)
pseudo.set(pos1, wait=True)
pseudo.real1.set(0, wait=True)
pseudo.real2.set(0, wait=True)
pseudo.real3.set(0, wait=True)
pseudo.pseudo1.stop()
pseudo.real3.set(0, wait=True)
@pytest.mark.motorsim
def test_faulty_stopper():
pseudo = FaultyPseudo1x3("", name="mypseudo", concurrent=False)
pseudo.wait_for_connection()
with pytest.raises(ExceptionBundle):
# smoke-testing for coverage
pseudo.pseudo1.stop()
def test_limits():
pseudo = Pseudo3x3("", name="mypseudo", concurrent=True)
assert pseudo.limits == ((-10, 10), (-10, 10), (0, 0))
assert pseudo.low_limit == (-10, -10, 0)
assert pseudo.high_limit == (10, 10, 0)
@pytest.mark.motorsim
def test_read_describe():
pseudo = Pseudo3x3("", name="mypseudo", concurrent=True)
pseudo.wait_for_connection()
desc_dict = pseudo.describe()
desc_keys = [
"source",
"upper_ctrl_limit",
"lower_ctrl_limit",
"shape",
"dtype",
"units",
]
for key in desc_keys:
assert key in desc_dict["mypseudo_pseudo3"]
read_dict = pseudo.read()
read_keys = ["value", "timestamp"]
for key in read_keys:
assert key in read_dict["mypseudo_pseudo3"]
assert pseudo.read().keys() == pseudo.describe().keys()
@pytest.mark.motorsim
def test_multi_concurrent():
def done(**kwargs):
logger.debug("** Finished moving (%s)", kwargs)
pseudo = Pseudo3x3(
"", name="mypseudo", concurrent=True, settle_time=0.1, timeout=25.0
)
assert pseudo.sequential is False
assert pseudo.concurrent is True
assert pseudo.settle_time == 0.1
assert pseudo.timeout == 25.0
pseudo.wait_for_connection()
assert pseudo.connected
assert tuple(pseudo.pseudo_positioners) == (
pseudo.pseudo1,
pseudo.pseudo2,
pseudo.pseudo3,
)
assert tuple(pseudo.real_positioners) == (pseudo.real1, pseudo.real2, pseudo.real3)
logger.info("Move to (.2, .2, .2), which is (-.2, -.2, -.2) for real " "motors")
pseudo.set(pseudo.PseudoPosition(0.2, 0.2, 0.2), wait=True)
logger.info("Position is: %s (moving=%s)", pseudo.position, pseudo.moving)
pseudo.check_value((2, 2, 2))
pseudo.check_value(pseudo.PseudoPosition(2, 2, 2))
try:
pseudo.check_value((2, 2, 2, 3))
except ValueError as ex:
logger.info("Check value failed, as expected (%s)", ex)
real1 = pseudo.real1
pseudo1 = pseudo.pseudo1
try:
pseudo.check_value((real1.high_limit + 1, 2, 2))
except ValueError as ex:
logger.info("Check value failed, as expected (%s)", ex)
ret = pseudo.set((2, 2, 2), wait=False, moved_cb=done)
assert ret.settle_time == 0.1
count = 0
while not ret.done:
logger.info("Pos=%s %s (err=%s)", pseudo.position, ret, ret.error)
count += 1
if count > 1000:
raise Exception
time.sleep(0.1)
logger.info("Single pseudo axis: %s", pseudo1)
pseudo1.set(0, wait=True, timeout=5)
assert pseudo1.target == 0
pseudo1.sync()
assert pseudo1.target == pseudo1.position
# coverage
pseudo1._started_moving
try:
pseudo1.check_value(real1.high_limit + 1)
except ValueError as ex:
logger.info("Check value for single failed, as expected (%s)", ex)
logger.info("Move pseudo1 to 0, position=%s", pseudo.position)
logger.info("pseudo1 = %s", pseudo1.position)
def single_sub(**kwargs):
# logger.info('Single sub: %s', kwargs)
pass
pseudo1.subscribe(single_sub, pseudo1.SUB_READBACK)
ret = pseudo1.set(1, wait=False)
assert pseudo.timeout == ret.timeout
count = 0
while not ret.done:
logger.info(
"pseudo1.pos=%s Pos=%s %s (err=%s)",
pseudo1.position,
pseudo.position,
ret,
ret.error,
)
count += 1
if count > 20:
raise Exception
time.sleep(0.1)
logger.info(
"pseudo1.pos=%s Pos=%s %s (err=%s)",
pseudo1.position,
pseudo.position,
ret,
ret.error,
)
copy(pseudo)
pseudo.read()
pseudo.describe()
pseudo.read_configuration()
pseudo.describe_configuration()
repr(pseudo)
str(pseudo)
pseudo.pseudo1.read()
pseudo.pseudo1.describe()
pseudo.pseudo1.read_configuration()
pseudo.pseudo1.describe_configuration()
@pytest.mark.motorsim
def test_single_pseudo():
logger.info("------- Sequential, single pseudo positioner")
pos = Pseudo1x3("", name="mypseudo", concurrent=False)
pos.wait_for_connection()
reals = pos._real
logger.info("Move to .2, which is (-.2, -.2, -.2) for real motors")
pos.set((0.2,), wait=True)
logger.info("Position is: %s (moving=%s)", pos.position, pos.moving)
logger.info("Real positions: %s", [real.position for real in reals])
logger.info("Move to -.2, which is (.2, .2, .2) for real motors")
pos.set((-0.2,), wait=True)
logger.info("Position is: %s (moving=%s)", pos.position, pos.moving)
logger.info("Real positions: %s", [real.position for real in reals])
copy(pos)
pos.read()
pos.describe()
repr(pos)
str(pos)
@pytest.mark.parametrize(
"inpargs,inpkwargs,expected_position,expected_kwargs",
[
((1, 2, 3), {}, (1, 2, 3), {}),
((1, 2), {}, (1, 2, -3), {}),
((1,), {}, (1, -2, -3), {}),
(((1, 2, 3),), {}, (1, 2, 3), {}),
(([1, 2],), {}, (1, 2, -3), {}),
(((1,),), {}, (1, -2, -3), {}),
((), {"pseudo1": 1, "pseudo2": 2, "pseudo3": 3}, (1, 2, 3), {}),
((), {"pseudo1": 1, "pseudo2": 2}, (1, 2, -3), {}),
((), {"pseudo1": 1}, (1, -2, -3), {}),
((), {"pseudo1": 1, "wait": True}, (1, -2, -3), {"wait": True}),
(({"pseudo1": 1, "pseudo2": 2, "pseudo3": 3},), {}, (1, 2, 3), {}),
(({"pseudo1": 1, "pseudo2": 2},), {}, (1, 2, -3), {}),
(({"pseudo1": 1},), {}, (1, -2, -3), {}),
(
({"pseudo1": 1, "wait": True},),
{"timeout": None},
(1, -2, -3),
{"wait": True, "timeout": None},
),
((1, 2, 3), {"timeout": 1}, (1, 2, 3), {"timeout": 1}),
(((1, 2, 3),), {"timeout": 1}, (1, 2, 3), {"timeout": 1}),
],
)
def test_pseudo_position_input_3x3(
hw, inpargs, inpkwargs, expected_position, expected_kwargs
):
pseudo3x3 = hw.pseudo3x3
pseudo3x3.real1.set(1)
pseudo3x3.real2.set(2)
pseudo3x3.real3.set(3)
out, extra_kwargs = pseudo3x3.to_pseudo_tuple(*inpargs, **inpkwargs)
assert out == pseudo3x3.PseudoPosition(*expected_position)
assert extra_kwargs == expected_kwargs
pseudo3x3.set(*inpargs, **inpkwargs)
assert pseudo3x3.position == pseudo3x3.PseudoPosition(*expected_position)
@pytest.mark.parametrize(
"inpargs,inpkwargs",
[
((1, 2, 3, 5), {}),
((1, 2, 3), {"pseudo1": 1}),
((1, 2, 3), {"pseudo2": 1}),
((1,), {"pseudo2": 1, "pseudo3": 1}),
((1, 2), {"pseudo3": 1}),
],
)
def test_pseudo_position_fail_3x3(hw, inpargs, inpkwargs):
pseudo3x3 = hw.pseudo3x3
with pytest.raises(ValueError):
pseudo3x3.to_pseudo_tuple(*inpargs, **inpkwargs)
@pytest.mark.parametrize(
"inpargs,inpkwargs,expected_position,expected_kwargs",
[
((1, 2, 3), {}, (1, 2, 3), {}),
((1, 2), {}, (1, 2, 3), {}),
((1,), {}, (1, 2, 3), {}),
(((1, 2, 3),), {}, (1, 2, 3), {}),
(([1, 2],), {}, (1, 2, 3), {}),
(((1,),), {}, (1, 2, 3), {}),
((), {"real1": 1, "real2": 2, "real3": 3}, (1, 2, 3), {}),
((), {"real1": 1, "real2": 2}, (1, 2, 3), {}),
((), {"real1": 1}, (1, 2, 3), {}),
((), {"real1": 1, "foo": "bar"}, (1, 2, 3), {"foo": "bar"}),
(({"real1": 1, "real2": 2, "real3": 3},), {}, (1, 2, 3), {}),
(({"real1": 1, "real2": 2},), {}, (1, 2, 3), {}),
(({"real1": 1},), {}, (1, 2, 3), {}),
(
({"real1": 1, "foo": "bar"},),
{"baz": "buz"},
(1, 2, 3),
{"foo": "bar", "baz": "buz"},
),
((1, 2, 3), {"foo": "bar"}, (1, 2, 3), {"foo": "bar"}),
],
)
def test_real_position_input_3x3(
hw, inpargs, inpkwargs, expected_position, expected_kwargs
):
pseudo3x3 = hw.pseudo3x3
pseudo3x3.real1.set(1)
pseudo3x3.real2.set(2)
pseudo3x3.real3.set(3)
out, extra_kwargs = pseudo3x3.to_real_tuple(*inpargs, **inpkwargs)
assert out == pseudo3x3.RealPosition(*expected_position)
assert extra_kwargs == expected_kwargs
@pytest.mark.parametrize(
"inpargs,inpkwargs",
[
((1, 2, 3, 5), {}),
((1, 2, 3), {"real1": 1}),
((1, 2, 3), {"real2": 1}),
((1,), {"real2": 1, "real3": 1}),
((1, 2), {"real3": 1}),
(({"real3": 1, "foo": "bar"},), {"foo": "bizz"}),
((), {}),
],
)
def test_real_position_fail_3x3(hw, inpargs, inpkwargs):
pseudo3x3 = hw.pseudo3x3
with pytest.raises(ValueError):
pseudo3x3.to_real_tuple(*inpargs, **inpkwargs)
def test_single_pseudo_with_sim(hw):
logger.info("------- Sequential, single pseudo positioner")
pos = hw.pseudo1x3
reals = pos._real
logger.info("Move to .2, which is (-.2, -.2, -.2) for real motors")
pos.set((0.2,), wait=True)
logger.info("Position is: %s (moving=%s)", pos.position, pos.moving)
logger.info("Real positions: %s", [real.position for real in reals])
logger.info("Move to -.2, which is (.2, .2, .2) for real motors")
pos.set((-0.2,), wait=True)
logger.info("Position is: %s (moving=%s)", pos.position, pos.moving)
logger.info("Real positions: %s", [real.position for real in reals])
copy(pos)
pos.read()
pos.describe()
repr(pos)
str(pos)
@pytest.mark.parametrize("typ", ("to_real_tuple", "to_pseudo_tuple"))
@pytest.mark.parametrize("op", (operator.sub, operator.add))
@pytest.mark.parametrize(
"a,b",
[((0, 0, 0), (1, 1, 1)), ((1, 0, 1), (1, 1, 1)), ((9, 0, 0.3), (0.3, 0.1, 0.5))],
)
def test_pseudo_math(hw, a, b, op, typ):
pos = hw.pseudo3x3
a, _ = getattr(pos, typ)(a)
b, _ = getattr(pos, typ)(b)
# TODO switch to np asserts
expected = op(np.asarray(a), np.asarray(b))
assert (np.asarray(op(a, b)) == expected).all()
assert (np.asarray(op(a, tuple(b))) == expected).all()
assert (np.asarray(op(a, list(b))) == expected).all()
assert (np.asarray(op(a, b._asdict())) == expected).all()
assert (np.asarray(op(a, {})) == a).all()
assert abs(op(a, b)) == np.sqrt(np.sum(expected ** 2))
def test_pseudo_hints(hw):
pos = hw.pseudo3x3
for j in (1, 2, 3):
p = getattr(pos, "pseudo{}".format(j))
assert p.hints["fields"] == [p.readback.name]
p.readback.name = "aardvark{}".format(j)
assert p.hints["fields"] == [p.readback.name]
expected_fields = [
getattr(pos, "pseudo{}".format(j)).readback.name for j in (1, 2, 3)
]
assert pos.hints["fields"] == expected_fields
|
the-stack_0_4547 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2019-2021 Xenios SEZC
# https://www.veriblock.org
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
import io
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
)
from test_framework.messages import CTransaction, COIN
from test_framework.pop_const import POW_PAYOUT
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
parser.add_argument("--segwit", dest="segwit", default=False, action="store_true",
help="Test behaviour with SegWit txn (which should fail")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
if self.options.segwit:
output_type = "p2sh-segwit"
else:
output_type = "legacy"
# All nodes should start with 1250 vBTC:
starting_balance = (POW_PAYOUT*25)
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, ((POW_PAYOUT*25)-31))
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, (POW_PAYOUT-29))
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1, 1)
clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
clone_tx = CTransaction()
clone_tx.deserialize(io.BytesIO(bytes.fromhex(clone_raw)))
if (rawtx1["vout"][0]["value"] == 40 and clone_tx.vout[0].nValue != 40*COIN or rawtx1["vout"][0]["value"] != 40 and clone_tx.vout[0].nValue == 40*COIN):
(clone_tx.vout[0], clone_tx.vout[1]) = (clone_tx.vout[1], clone_tx.vout[0])
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_tx.serialize().hex(), None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50 vBTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += POW_PAYOUT
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
if self.options.segwit:
assert_equal(txid1, txid1_clone)
return
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 60 vBTC for 2 matured,
# less possible orphaned matured subsidy
expected += (POW_PAYOUT * 2)
if (self.options.mine_block):
expected -= POW_PAYOUT
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
|
the-stack_0_4548 | # Input:
# 1
# 1
# 1
# 2
if __name__ == '__main__':
# Take the input in correct format
x = int(input())
y = int(input())
z = int(input())
n = int(input())
# Using for loop
# final_list =[]
# Iterate from x to z append the values into one list
# for i in range(x + 1):
# for j in range(y + 1):
# for k in range(z + 1):
# Only add the sublists whose sum is not equal to n
# if sum([i, j, k]) != n:
# final_list.append([i, j, k])
# Defining Final list
final_list = []
# Create grid with the given values
[[[final_list.append([i, j, k]) for k in range(z + 1)]
for j in range(y + 1)] for i in range(x + 1)]
# Only add the sublists whose sum is not equal to n
last_list = [
final_list[i] for i in range(len(final_list))
if sum(final_list[i]) != n
]
# Retun of print the modified list
print([
final_list[i] for i in range(len(final_list))
if sum(final_list[i]) != n
])
|
the-stack_0_4549 | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: ATLH
# Description: Tests for equality_query_generator
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 6 August 2012 ATLH Original version
# *****************************************************************
from __future__ import division
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..', '..')
sys.path.append(base_dir)
import unittest
import time
import keyword_query_generator as kqg
import spar_python.common.spar_random as spar_random
import spar_python.common.distributions.text_generator as text_generator
import StringIO as s
import spar_python.query_generation.query_schema as qs
import spar_python.data_generation.spar_variables as sv
class KeywordQueryGeneratorTest(unittest.TestCase):
def setUp(self):
self.seed = int(time.time())
self.seed_msg = "Random seed used for this test: %s" % self.seed
self.longMessage = True
spar_random.seed(self.seed)
#set up intitialization values
sub_cat = 'word'
f = s.StringIO('''Buck had accepted the rope with quiet dignity. To be sure, it
unwonted performance: but he had learned to trust in men he knew, and to
give them credit for a wisdom that outreached his own. But when the ends
of the ropes were placed in the strangers hands, he growled menacingly.
He had merely intimated his displeasure, in his pride believing that to
intimate was to command. But to his surprise the rope tightened around
his neck, shutting off his breath. In quick rage he sprang at the man,
who met him halfway, grappled him close by the throat, and with a deft
twist threw him over on his back. Then the rope tightened mercilessly,
while Buck struggled in a fury, his tongue lolling out of his mouth and
his great chest. Never in all his life had he been so
vilely treated, and never in all his life had he been so angry. But his
strength ebbed, his eyes glazed, and he knew nothing when the train was
flagged and the two men threw him into the baggage car.''')
self._kw_dist = text_generator.TextGenerator((f,))
fields = [sv.VARS.NOTES3]
dists = [self._kw_dist]
other_fields = ['no_queries', 'rss','keyword_len','type']
other_cols = [[3, 60, 4, 'word'], [3, 60, 5, 'word'],
[3, 75, 4, 'stem'], [3, 60, 5, 'stem']]
self.generator = kqg.KeywordQueryGenerator('P3',sub_cat, ["LL"],dists, fields, 1000,
100, other_fields, other_cols)
@unittest.skip("Sporadically fails, not sure why")
def testGenerateQuery(self):
"""
Tests equality query generator against a 'db' to make sure it is
generating the right queries
"""
#generate a 'db' to test against
notes = [self._kw_dist.generate(125) for _ in xrange(1000)]
#generate queries
query_batches = self.generator.produce_query_batches()
queries = []
for query_batch in query_batches:
queries += query_batch.produce_queries()
#check to see right number of queries generated
self.assertGreaterEqual(len(queries), 6, self.seed_msg)
#check queries against 'db' to make sure they match within a factor
#of two
word = 0
stem = 0
working_queries = 0
non_working_queries = []
for q in queries:
if q[qs.QRY_TYPE] == 'word':
x = lambda generated_text: \
generated_text.contains_upper(q[qs.QRY_SEARCHFOR])
word +=1
elif q[qs.QRY_TYPE] == 'stem':
x = lambda generated_text: \
generated_text.contains_stem(q[qs.QRY_SEARCHFOR])
stem +=1
count_match = len([note for note in notes if x(note)])
msg = 'Query %d was: \n' \
'sub_cat: %s\n'\
'field: %s\n'\
'type: %s\n'\
'rss: %d\n'\
'value: %s\n' % (q[qs.QRY_QID], q[qs.QRY_SUBCAT],
q[qs.QRY_FIELD], q[qs.QRY_TYPE],
q[qs.QRY_RSS], q[qs.QRY_SEARCHFOR])
if count_match <= q[qs.QRY_URSS]*4 and count_match >= q[qs.QRY_LRSS]/4:
working_queries+=1
else:
non_working_queries.append(msg)
fail_msg = ''
for msg in non_working_queries[:3]:
fail_msg += msg
self.assertGreaterEqual(working_queries, 6, fail_msg)
#check to see each field had the correct number of queries
#ideally this number would be greater than 6 (the requested amount)
#but because the distribution used for unit testing is so small
#there is a greater margin of error at this scale
self.assertGreaterEqual(word, 3, self.seed_msg)
self.assertGreaterEqual(stem, 3, self.seed_msg)
|
the-stack_0_4550 | """Provides data related to paths."""
import sys
from pathlib import PurePosixPath, PureWindowsPath
from typing import Any, Final
from mimesis.data import (
FOLDERS,
PLATFORMS,
PROGRAMMING_LANGS,
PROJECT_NAMES,
USERNAMES,
)
from mimesis.providers.base import BaseProvider
__all__ = ["Path"]
class Path(BaseProvider):
"""Class that provides methods and property for generate paths."""
def __init__(
self,
platform: str = sys.platform,
*args: Any,
**kwargs: Any,
) -> None:
"""Initialize attributes.
Supported platforms: 'linux', 'darwin', 'win32', 'win64'.
:param platform: Required platform type.
"""
super().__init__(*args, **kwargs)
self.platform = platform
self._pathlib_home = PureWindowsPath() if "win" in platform else PurePosixPath()
self._pathlib_home /= PLATFORMS[platform]["home"]
class Meta:
"""Class for metadata."""
name: Final[str] = "path"
def root(self) -> str:
"""Generate a root dir path.
:return: Root dir.
:Example:
/
"""
return str(self._pathlib_home.parent)
def home(self) -> str:
"""Generate a home path.
:return: Home path.
:Example:
/home
"""
return str(self._pathlib_home)
def user(self) -> str:
"""Generate a random user.
:return: Path to user.
:Example:
/home/oretha
"""
user = self.random.choice(USERNAMES)
user = user.capitalize() if "win" in self.platform else user.lower()
return str(self._pathlib_home / user)
def users_folder(self) -> str:
"""Generate a random path to user's folders.
:return: Path.
:Example:
/home/taneka/Pictures
"""
user = self.user()
folder = self.random.choice(FOLDERS)
return str(self._pathlib_home / user / folder)
def dev_dir(self) -> str:
"""Generate a random path to development directory.
:return: Path.
:Example:
/home/sherrell/Development/Python
"""
user = self.user()
folder = self.random.choice(["Development", "Dev"])
stack = self.random.choice(PROGRAMMING_LANGS)
return str(self._pathlib_home / user / folder / stack)
def project_dir(self) -> str:
"""Generate a random path to project directory.
:return: Path to project.
:Example:
/home/sherika/Development/Falcon/mercenary
"""
dev_dir = self.dev_dir()
project = self.random.choice(PROJECT_NAMES)
return str(self._pathlib_home / dev_dir / project)
|
the-stack_0_4553 | from wurst.geo import geomatcher
from rmnd_lca import DATA_DIR
REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv")
class Geomap():
"""
Map ecoinvent locations to REMIND regions and vice-versa.
"""
def __init__(self):
self.geo = self.get_REMIND_geomatcher()
def get_REMIND_geomatcher(self):
"""
Load a geomatcher object from the `constructive_geometries`library and add definitions.
It is used to find correspondences between REMIND and ecoinvent region names.
:return: geomatcher object
:rtype: wurst.geo.geomatcher
"""
with open(REGION_MAPPING_FILEPATH) as f:
f.readline()
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
l = [(x[1], x[2]) for x in csv_list]
# List of countries not found
countries_not_found = ["CC", "CX", "GG", "JE", "BL"]
rmnd_to_iso = {}
iso_to_rmnd = {}
# Build a dictionary that maps region names (used by REMIND) to ISO country codes
# And a reverse dictionary that maps ISO country codes to region names
for ISO, region in l:
if ISO not in countries_not_found:
try:
rmnd_to_iso[region].append(ISO)
except KeyError:
rmnd_to_iso[region] = [ISO]
iso_to_rmnd[region] = ISO
geo = geomatcher
geo.add_definitions(rmnd_to_iso, "REMIND")
return geo
def remind_to_ecoinvent_location(self, location):
"""
Find the corresponding ecoinvent region given a REMIND region.
:param location: name of a REMIND region
:type location: str
:return: name of an ecoinvent region
:rtype: str
"""
if location != "World":
location = ("REMIND", location)
ecoinvent_locations = []
try:
for r in self.geo.intersects(location):
if not isinstance(r, tuple):
ecoinvent_locations.append(r)
return ecoinvent_locations
except KeyError as e:
print("Can't find location {} using the geomatcher.".format(location))
else:
return ["GLO"]
def ecoinvent_to_remind_location(self, location):
"""
Return a REMIND region name for a 2-digit ISO country code given.
Set rules in case two REMIND regions are within the ecoinvent region.
:param location: 2-digit ISO country code
:type location: str
:return: REMIND region name
:rtype: str
"""
mapping = {"GLO": "World", "RoW": "CAZ", "IAI Area, Russia & RER w/o EU27 & EFTA": "REF"}
if location in mapping:
return mapping[location]
remind_location = [
r[1]
for r in self.geo.within(location)
if r[0] == "REMIND" and r[1] != "World"
]
mapping = {
("AFR", "MEA"): "AFR",
("AFR", "SSA"): "AFR",
("EUR", "NEU"): "EUR",
("EUR", "REF"): "EUR",
("OAS", "CHA"): "OAS",
("OAS", "EUR"): "OAS",
("OAS", "IND"): "OAS",
("OAS", "JPN"): "OAS",
("OAS", "MEA"): "OAS",
("OAS", "REF"): "OAS",
("USA", "CAZ"): "USA",
}
# If we have more than one REMIND region
if len(remind_location) > 1:
# TODO: find a more elegant way to do that
for key, value in mapping.items():
# We need to find the most specific REMIND region
if len(set(remind_location).intersection(set(key))) == 2:
remind_location.remove(value)
return remind_location[0]
elif len(remind_location) == 0:
print("no location for {}".format(location))
else:
return remind_location[0]
|
the-stack_0_4559 |
import os
import re
import numpy as np
import argparse
import sys
from natsort import natsorted
from plyfile import PlyData, PlyElement
import pandas as pd
'''
script to evaluate a model
execution example:
- python3 evaluate_instances.py --path_run /home/uib/Desktop/test_evaluate_instances/ --path_cls /home/uib/Desktop/test_evaluate_instances/classes.txt --iou_thr 0.5 --test_name test --ref 0
'''
def get_iou(inst1,inst2):
inst1 = inst1[:, 0:3].tolist()
inst2 = inst2[:, 0:3].tolist()
intersection = 0
for i in inst1:
if i in inst2:
intersection += 1
union = len(inst1) + len(inst2) - intersection
iou = intersection/union
return iou
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z, r, g, b, c, i] for x,y,z,r,g,b,c,i in pc])
return pc_array
def get_info_classes(cls_path):
classes = []
colors = []
for line in open(cls_path):
data = line.split()
classes.append(data[0])
colors.append([int(data[1]), int(data[2]), int(data[3])])
labels = {cls: i for i, cls in enumerate(classes)}
label2color = {classes.index(cls): colors[classes.index(cls)] for cls in classes}
return classes, labels, label2color
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path_runs', help='path to the run folder.')
parser.add_argument('--path_cls', help='path to the class file.')
parser.add_argument('--iou_thr', default=0.5, help='min iou.')
parser.add_argument('--test_name', help='name of the test')
parser.add_argument('--ref', default=1, help='name of the test')
parsed_args = parser.parse_args(sys.argv[1:])
path_runs = parsed_args.path_runs
path_cls = parsed_args.path_cls # get class txt path
iou_thr = float(parsed_args.iou_thr)
test_name = parsed_args.test_name
ref = int(parsed_args.ref)
for run in listdir(path_runs):
print("evaluating run: " + run)
path_run = os.path.join(path_runs,run)
path_infer = os.path.join(path_run, 'dump_' + test_name)
classes, labels, label2color = get_info_classes(path_cls)
files = natsorted(os.listdir(path_infer))
cases = [s for s in files if s.endswith(".obj")]
names = natsorted(set([re.split("[.\_]+", string)[0] for string in cases]))
tp = np.zeros((len(classes),), dtype=int)
fp = np.zeros((len(classes),), dtype=int)
n_gt = np.zeros((len(classes),), dtype=int)
n_pred = np.zeros((len(classes),), dtype=int)
iou_max_sum = np.zeros((len(classes),), dtype=float)
for name in names:
print("evaluating case: " + name)
path_gt = os.path.join(path_infer, name + "_gt_inst.ply")
path_pred = os.path.join(path_infer, name + "_pred_inst.ply")
if ref==1:
path_pred = os.path.join(path_infer, name + "_pred_inst_ref.ply")
gt = read_ply(path_gt)
pred = read_ply(path_pred)
if (gt.shape[0]>2) and (pred.shape[0]>2): # IN CASE GT OR PRED ARE "EMPTY" - LOK AT GET_INSTANCES OUTPUT WHEN NO INSTANCES ORE FOUND (THEY SAVE "NULL" A TWO ROW NUMPY)
gt_list = list()
instances_gt = set(gt[..., 7])
instances_pred = set(pred[..., 7])
for i in instances_gt:
inst = gt[np.where(gt[..., 7] == float(i))]
gt_list.append(inst)
n_gt[int(inst[0, 6])] += 1
pred_list = list()
for j in instances_pred:
inst = pred[np.where(pred[..., 7] == float(j))]
pred_list.append(inst)
n_pred[int(inst[0, 6])] += 1
for i, pred_inst in enumerate(pred_list):
iou_list = list()
for j, gt_inst in enumerate(gt_list):
if pred_inst[0, 6] == gt_inst[0, 6]:
iou = get_iou(pred_inst,gt_inst)
else:
iou = 0
iou_list.append(iou)
iou_max = max(iou_list)
iou_max_sum[int(pred_inst[0, 6])]+= iou_max
if iou_max >= iou_thr:
tp[int(pred_inst[0, 6])] += 1
else:
fp[int(pred_inst[0, 6])] += 1
fn = n_gt - tp
iou_max_mean = iou_max_sum / n_pred
# hacer cambios para sacar una fila de excel por cada clase con su nombre
recall = tp/(tp+fn)
precision = tp/(tp+fp)
f1 = (2*recall*precision)/(recall+precision)
filepath = os.path.join(path_run, "evaluation_instance_" + test_name + ".xlsx")
if ref==1:
filepath = os.path.join(path_run, "evaluation_instance_ref_" + test_name + ".xlsx")
header = ['Recall', 'Precision', 'F1', 'mean_IoU']
csv = ({header[0]: recall, header[1]: precision, header[2]: f1, header[3]: iou_max_mean})
df = pd.DataFrame.from_records(csv, index=classes)
df.to_excel(filepath)
if __name__ == "__main__":
main()
|
the-stack_0_4560 | #
# Copyright 2013 eNovance <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
import wsme
from ceilometer.api.controllers.v2 import base as v2_base
class TestWsmeCustomType(base.BaseTestCase):
def test_advenum_default(self):
class dummybase(wsme.types.Base):
ae = v2_base.AdvEnum("name", str, "one", "other", default="other")
obj = dummybase()
self.assertEqual("other", obj.ae)
obj = dummybase(ae="one")
self.assertEqual("one", obj.ae)
self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists")
|
the-stack_0_4563 | import asyncio
from csv import DictReader
from io import BytesIO
import json
import zipfile
from gtfs_util.util import TextZipFile
from gtfs_util import constants
from gtfs_util.static.models import (
agency,
service,
service_update,
route,
point,
stop_time,
stop,
transfer,
trip,
)
FILENAME_MODEL_MAPPING = {
constants.AGENCY_FILENAME: agency.Agency,
constants.SERVICE_FILENAME: service.Service,
constants.SERVICE_UPDATE_FILENAME: service_update.ServiceUpdate,
constants.ROUTE_FILENAME: route.Route,
constants.POINT_FILENAME: point.Point,
constants.STOP_TIME_FILENAME: stop_time.StopTime,
constants.STOP_FILENAME: stop.Stop,
constants.TRANSFER_FILENAME: transfer.Transfer,
constants.TRIP_FILENAME: trip.Trip,
}
async def _read_async(data, file=True, mask=set()):
if file:
with zipfile.ZipFile(data, 'r') as f:
infos = f.infolist()
raw_data = {
i.filename: DictReader(f.read(i.filename).decode().split('\r\n'))
for i in infos if i not in mask
}
else:
with BytesIO(data) as buffer:
with zipfile.ZipFile(buffer, 'r') as f:
infos = f.infolist()
raw_data = {
i.filename: DictReader(f.read(i.filename).decode().split('\r\n'))
for i in infos if i not in mask
}
return raw_data
async def load_async(*args, model=False, file=True, mask=set()):
ops = (
_read_async(arg, file=fie, mask=mask)
for arg in args
)
feeds = await asyncio.gather(*ops)
return _parse(feeds, model=model)
async def load_aiter(*args, model=False, file=True, chunk_size=1, mask=set()):
for arg in args:
with TextZipFile(arg, 'r') as z:
infos = z.infolist()
for info in infos:
name = info.filename
if name in mask:
continue
with z.open(name, 'r') as f:
reader = DictReader(f)
static_model = FILENAME_MODEL_MAPPING[name]
reader.fieldnames = normalize_names(static_model, reader.fieldnames)
buffer = []
for line in reader:
normalized_line = normalize_data(static_model, line)
if model:
data = static_model(**normalized_line)
else:
data = (normalized_line, arg, name)
if chunk_size > 1:
buffer.append(data)
if len(buffer) == chunk_size:
yield buffer
buffer = []
else:
yield data
yield buffer
def _read(data, file=True, mask=set()):
if file:
with zipfile.ZipFile(data, 'r') as f:
infos = f.infolist()
raw_data = {
i.filename: DictReader(f.read(i.filename).decode().split('\r\n'))
for i in infos if i not in mask
}
else:
with BytesIO(data) as buffer:
with zipfile.ZipFile(buffer, 'r') as f:
infos = f.infolist()
raw_data = {
i.filename: DictReader(f.read(i.filename).decode().split('\r\n'))
for i in infos if i not in mask
}
return raw_data
def load(*args, model=False, file=True, mask=set()):
feeds = (
_read(arg, file=file, mask=mask)
for arg in args
)
return _parse(feeds, model=model)
def load_iter(*args, model=False, file=True, chunk_size=1, mask=set()):
if not file:
args = [BytesIO(arg) for arg in args]
for arg in args:
with TextZipFile(arg, 'r') as z:
infos = z.infolist()
for info in infos:
name = info.filename
if name in mask:
continue
with z.open(name, 'r') as f:
reader = DictReader(f)
static_model = FILENAME_MODEL_MAPPING[name]
reader.fieldnames = normalize_names(static_model, reader.fieldnames)
buffer = []
for line in reader:
normalized_line = normalize_data(static_model, line)
if model:
data = (static_model(**normalized_line), arg, name)
else:
data = (normalized_line, arg, name)
if chunk_size > 1:
buffer.append(data)
if len(buffer) == chunk_size:
yield buffer
buffer = []
else:
yield data
if chunk_size > 1:
yield buffer
if not file:
arg.close()
def _parse(feeds, model=False):
data = {
'agency.txt': [],
'stops.txt': [],
'routes.txt': [],
'trips.txt': [],
'stop_times.txt': [],
'calendar.txt': [],
'calendar_dates.txt': [],
'fare_attributes.txt': [],
'fare_rules.txt': [],
'shapes.txt': [],
'frequencies.txt': [],
'transfers.txt': [],
'feed_info.txt': [],
}
for feed in feeds:
for file, reader in feed.items():
static_model = FILENAME_MODEL_MAPPING[file]
reader.fieldnames = normalize_names(static_model, reader.fieldnames)
if model:
data[file] += [static_model(**normalize_data(static_model, x)) for x in reader]
else:
data[file] += [normalize_data(static_model, x) for x in reader]
return data
def normalize_names(model, raw_data):
transforms = model.NAME_MAPPING
return [
transforms.get(name, None) or name.replace(model.PREFIX, '')
for name in raw_data
]
def normalize_data(model, raw_data):
transforms = model.DATA_MAPPING
return {
k: v if not transforms.get(k) else transforms[k](v)
for k, v in raw_data.items()
}
|
the-stack_0_4565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not #
# use this file except in compliance with the License. A copy of the #
# License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, #
# express or implied. See the License for the specific language governing #
# permissions and limitations under the License. #
##############################################################################
from botocore.exceptions import ClientError
import boto3
import os
import logging
import base64
import json
import time
REKOGNITION_FACE_SIMILARITY_THRESHOLD = int(os.environ['RekognitionFaceSimilarityThreshold'])
COLLECTION_ID = os.environ['RekognitionCollectionName']
DYNAMODB_TABLE_NAME = os.environ['DynamoDBTableName']
LOG_LEVEL = os.environ['LogLevel']
SEND_ANONYMOUS_DATA = os.environ['SendAnonymousData']
dynamodb = boto3.client('dynamodb')
rekognition = boto3.client('rekognition')
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def generate_response(result, name, similarity):
return {
'statusCode': 200,
'headers': {"Content-Type": "application/json"},
'body': json.dumps({'result': result, 'name': name, 'similarity' : similarity})
}
def update_item(face_id, similarity):
ts = int(time.time())
dynamodb.update_item(
TableName=DYNAMODB_TABLE_NAME,
Key={'RekognitionId': {'S': face_id}},
UpdateExpression="SET GatePassed = :ts",
ExpressionAttributeValues={':ts':{'S': str(ts)}}
)
def lambda_handler(event, context):
# logger.info(event)
binary_image = base64.b64decode(event['body'])
try:
try:
response = rekognition.search_faces_by_image(
CollectionId=COLLECTION_ID,
Image={'Bytes': binary_image},
FaceMatchThreshold=REKOGNITION_FACE_SIMILARITY_THRESHOLD,
MaxFaces=1
)
except ClientError as err:
code = err.response['Error']['Code']
if code in ['ProvisionedThroughputExceededException', 'ThrottlingException']:
logger.exception()
elif code in ['InvalidParameterException']:
logger.info('No face in Rekognition')
else:
logger.exception(err)
return generate_response('INVALID', '', 0)
face_matches = response['FaceMatches']
if len(face_matches) > 0:
face_match = face_matches[0]
similarity = face_match['Similarity']
face = face_match['Face']
face_id = face['FaceId']
try:
response = dynamodb.get_item(
TableName=DYNAMODB_TABLE_NAME,
Key={'RekognitionId': {'S': face_id}}
)
name = response['Item']['Name']['S']
update_item(face_id, similarity)
except Exception as err:
logger.exception(err)
return generate_response('INVALID', '', 0)
logger.info('Above Rekognition Threshold. Similarity: {}'.format(similarity))
return generate_response('OK', name, similarity)
else:
logger.info('Similar Faces Not Found')
return generate_response('NO_MATCH', '', 0)
except Exception as err:
logger.exception(err)
return generate_response('INVALID', '', 0)
|
the-stack_0_4566 | # Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`testcommon` --- Common test classes/utilities
===================================================
"""
# Stdlib
from unittest.mock import MagicMock
# External
import nose.tools as ntools
# SCION
from lib.errors import SCIONBaseError
class SCIONTestError(SCIONBaseError):
pass
def create_mock(attrs=None, class_=None):
if attrs is None:
attrs = []
if class_:
attrs.append("__class__")
m = MagicMock(spec_set=attrs)
if class_:
m.__class__ = class_
for attr in attrs:
value = MagicMock(spec_set=[])
if attr == "__class__" and class_:
value = class_
setattr(m, attr, value)
return m
def create_mock_full(kv=None, class_=None, return_value=None, side_effect=None):
"""
'kv' is a dict
"attr": val - directly sets attr to val.
"attr()": val - sets the return value of attr() to val.
"attr()...": val - sets the side_effects of attr() to val.
"""
def base(name):
return name.rstrip("().")
if not kv:
kv = {}
attrs = []
for k in kv:
attrs.append(base(k))
m = create_mock(attrs, class_=class_)
if return_value is not None:
m.return_value = return_value
if side_effect is not None:
m.side_effect = side_effect
for k, v in kv.items():
a = base(k)
if k.endswith("()..."):
f = getattr(m, a)
setattr(f, "side_effect", v)
elif k.endswith("()"):
f = getattr(m, a)
setattr(f, "return_value", v)
else:
setattr(m, a, v)
return m
def assert_these_calls(mock, calls, any_order=False):
mock.assert_has_calls(calls, any_order=any_order)
ntools.eq_(len(mock.mock_calls), len(calls))
def assert_these_call_lists(mock, call_lists, any_order=False):
calls = []
for x in call_lists:
calls.extend(x.call_list())
assert_these_calls(mock, calls, any_order=any_order)
|
the-stack_0_4569 | # Complete list of functional components used by this extension.
#
# Components can be disabled with build options matching `goost_*_enabled=no`.
# A branch of components can be disabled as well, like: `goost_core_enabled=no`.
#
# NOTE: Components may not necessarily have structural meaning.
#
components = [
"core/image",
"core/math",
"scene/physics",
"editor",
]
def get_components():
comp = set()
for n in components:
parts = n.split("/")
comp.update(parts)
comp_list = list(comp)
comp_list.sort()
return comp_list
def get_child_components(parent):
comp_list = []
for n in components:
parts = n.split("/")
if not parent in parts:
continue
parts.reverse()
for p in parts:
if p == parent:
break
comp_list.append(p)
return comp_list
#
# Complete list of all classes currently implemented in the extension,
# excluding any classes provided from within `modules/` directory.
#
# This is used by config.py::get_doc_classes(), and potentially allow to disable
# each of the class in the future.
#
class GoostClass:
def __init__(self, name, deps=[]):
self.name = name
self.deps = []
def add_depencency(self, goost_class):
self.deps.append(goost_class)
classes = [
"GoostEngine",
"GoostGeometry2D",
"GoostImage",
"GradientTexture2D",
"ImageBlender",
"ImageIndexed",
"InvokeState",
"LightTexture",
"LinkedList",
"ListNode",
"PolyBoolean2D",
"PolyBooleanParameters2D",
"PolyDecomp2D",
"PolyDecompParameters2D",
"PolyOffset2D",
"PolyOffsetParameters2D",
"PolyNode2D",
"PolyCircle2D",
"PolyRectangle2D",
"PolyShape2D",
"PolyCollisionShape2D",
"Random",
"Random2D",
"ShapeCast2D",
"VariantMap",
"VariantResource",
"VisualShape2D",
]
# Convert to dictionary, because we need to instantiate `GoostClass` nodes.
_classes = {}
for c in classes:
_classes[c] = GoostClass(c)
classes = _classes
# Define dependencies.
classes["GoostEngine"].add_depencency(classes["InvokeState"])
classes["GoostGeometry2D"].add_depencency(classes["PolyBoolean2D"])
classes["GoostGeometry2D"].add_depencency(classes["PolyDecomp2D"])
classes["GoostGeometry2D"].add_depencency(classes["PolyOffset2D"])
classes["LightTexture"].add_depencency(classes["GradientTexture2D"])
classes["LinkedList"].add_depencency(classes["ListNode"])
classes["PolyBoolean2D"].add_depencency(classes["PolyBooleanParameters2D"])
classes["PolyBoolean2D"].add_depencency(classes["PolyNode2D"])
classes["PolyDecomp2D"].add_depencency(classes["PolyDecompParameters2D"])
classes["PolyOffset2D"].add_depencency(classes["PolyOffsetParameters2D"])
classes["PolyCircle2D"].add_depencency(classes["PolyNode2D"])
classes["PolyRectangle2D"].add_depencency(classes["PolyNode2D"])
classes["PolyShape2D"].add_depencency(classes["PolyNode2D"])
classes["PolyCollisionShape2D"].add_depencency(classes["PolyNode2D"])
classes["Random2D"].add_depencency(classes["Random"])
def resolve_dependency(goost_class):
resolved = set()
def resolve(c, r_resolved):
for n in c.deps:
resolve(n, r_resolved)
r_resolved.add(c)
resolve(goost_class, resolved)
resolved_list = []
for c in resolved:
resolved_list.append(c.name)
return resolved_list
classes_enabled = []
for c in classes:
classes_enabled.append(c)
classes_disabled = []
try:
import custom
try:
classes_disabled = custom.goost_classes_disabled
for c in classes_disabled:
if not c in classes:
raise NameError("Goost: Requested to disable non-existing class.")
classes_enabled.remove(c)
except AttributeError:
pass
except ImportError:
pass
|
the-stack_0_4570 | from typing import Optional, Dict
from cmd.package.HBShedPackageHandler import HBShedPackageHandler
from cmd.package.modules.Module import Module
from cmd.package.modules.ModulesHandler import ModulesHandler
class ApplyModulePeerDependencies:
def __init__(self, package: HBShedPackageHandler,
dependencies: Optional[Dict[str, str]]) -> None:
self.__package: HBShedPackageHandler = package
self.__dependencies: Optional[Dict[str, str]] = dependencies
def __apply(self):
self.__package.set_peer_dependencies(self.__dependencies)
self.__package.write()
def __apply_modules_peer_dependencies(self):
if self.__package.config().has_modules():
modules: ModulesHandler = ModulesHandler(self.__package)
module: Module
for module in modules.modules:
ApplyModulePeerDependencies(
package=module.package,
dependencies=self.__dependencies
).process()
def process(self):
self.__apply()
self.__apply_modules_peer_dependencies()
|
the-stack_0_4573 | # Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import six
from oslo_log import log as logging
from aim.aim_lib.db import model
from aim.api import resource
from aim import exceptions
from aim import utils as aim_utils
LOG = logging.getLogger(__name__)
class VrfNotVisibleFromExternalNetwork(exceptions.AimException):
message = "%(vrf)s is not visible from %(ext_net)s."
class L3OutsideVrfChangeDisallowed(exceptions.AimException):
message = ("Cannot change VRF referenced by no-NAT L3Out %(l3out)s from "
"%(old_vrf)s to %(vrf)s.")
@six.add_metaclass(abc.ABCMeta)
class NatStrategy(object):
"""Interface for NAT behavior strategies.
Defines interface for configuring L3Outside in AIM to support
various kinds of NAT-ing.
All methods expect AIM resources as input parameters.
Example usage:
1. Decide a NAT-strategy to use
mgr = AimManager()
ctx = AimContext()
ns = DistributedNatStrategy(mgr) # or NoNatEdgeStrategy(mgr),
# or EdgeNatStrategy(mgr)
2. Create L3Outside and one or more ExternalNetworks. Subnets
may be created in the L3Outside
l3out = L3Outside(tenant_name='t1', name='out')
ext_net1 = ExternalNetwork(tenant_name='t1', l3out_name='out',
name='inet1')
ext_net2 = ExternalNetwork(tenant_name='t1', l3out_name='out',
name='inet2')
ns.create_l3outside(ctx, l3out)
ns.create_subnet(ctx, l3out, '40.40.40.1/24')
ns.create_external_network(ctx, ext_net1)
ns.create_external_network(ctx, ext_net2)
3. Allow traffic for certain IP-addresses through the external
networks; by default no traffic is allowed.
ns.update_external_cidrs(ctx, ext_net1, ['0.0.0.0/0'])
ns.update_external_cidrs(ctx, ext_net2, ['200.200.0.0/16',
'300.0.0.0/8'])
4. To provide external-connectivity to a VRF, connect the VRF to
ExternalNetwork with appropriate contracts.
ext_net1.provided_contract_names = ['http', 'icmp']
ext_net1.consumed_contract_names = ['arp']
vrf = VRF(...)
ns.connect_vrf(ctx, ext_net1, vrf)
5. Call connect_vrf() again to update the contracts
ext_net1.provided_contract_names = ['http', 'https']
ext_net1.consumed_contract_names = ['ping']
ns.connect_vrf(ctx, ext_net1, vrf)
6. Disallow external-connectivity to VRF
ns.disconnect_vrf(ctx, ext_net1, vrf)
7. Delete ExternalNetwork, subnet and L3Outside
ns.delete_external_network(ctx, ext_net1)
ns.delete_external_network(ctx, ext_net2)
ns.delete_subnet(ctx, l3out, '40.40.40.1/24')
ns.delete_l3outside(ctx, l3out)
"""
@abc.abstractmethod
def create_l3outside(self, ctx, l3outside,
vmm_domains=None, phys_domains=None):
"""Create L3Outside object if needed.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:return: L3Outside resource
"""
@abc.abstractmethod
def delete_l3outside(self, ctx, l3outside):
"""Delete L3Outside object.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:return:
"""
@abc.abstractmethod
def get_l3outside_resources(self, ctx, l3outside):
"""Get AIM resources that are created for an L3Outside object.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:return: List of AIm resources
"""
@abc.abstractmethod
def create_subnet(self, ctx, l3outside, gw_ip_mask):
"""Create Subnet in L3Outside.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:param gw_ip_mask: Gateway+CIDR of subnet to create
:return:
"""
@abc.abstractmethod
def delete_subnet(self, ctx, l3outside, gw_ip_mask):
"""Delete Subnet in L3Outside.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:param gw_ip_mask: Gateway+CIDR of subnet to delete
:return:
"""
@abc.abstractmethod
def get_subnet(self, ctx, l3outside, gw_ip_mask):
"""Get Subnet in L3Outside with specified Gateway+CIDR.
:param ctx: AIM context
:param l3outside: L3Outside AIM resource
:param gw_ip_mask: Gateway+CIDR of subnet to fetch
:return: AIM Subnet if one is found
"""
@abc.abstractmethod
def create_external_network(self, ctx, external_network):
"""Create ExternalNetwork object if needed.
:param ctx: AIM context
:param external_network: ExternalNetwork AIM resource
:return: ExternalNetwork resource
"""
@abc.abstractmethod
def delete_external_network(self, ctx, external_network):
"""Delete ExternalNetwork object.
:param ctx: AIM context
:param external_network: ExternalNetwork AIM resource
:return:
"""
@abc.abstractmethod
def update_external_cidrs(self, ctx, external_network, external_cidrs):
"""Set the IP addresses for which external traffic is allowed.
:param ctx: AIM context
:param external_network: ExternalNetwork AIM resource
:param external_cidrs: List of CIDRs to allow
:return:
"""
@abc.abstractmethod
def connect_vrf(self, ctx, external_network, vrf):
"""Allow external connectivity to VRF.
Create or update NAT machinery to allow external
connectivity from a given VRF to an ExternalNetwork (L3Outside)
enforcing the policies specified in ExternalNetwork.
:param ctx: AIM context
:param external_network: AIM ExternalNetwork
:param vrf: AIM VRF
:return:
"""
@abc.abstractmethod
def disconnect_vrf(self, ctx, external_network, vrf):
"""Remove external connectivity for VRF.
Tear down connectivity between VRF and ExternalNetwork (L3Outside).
:param ctx: AIM context
:param external_network: AIM ExternalNetwork
:param vrf: AIM VRF
"""
@abc.abstractmethod
def read_vrfs(self, ctx, external_network):
"""Read external connectivity VRFs.
:param ctx: AIM context
:param external_network: AIM ExternalNetwork
"""
@abc.abstractmethod
def set_bd_l3out(self, ctx, bridge_domain, l3outside):
"""Add the l3out to the BD's associated l3out list if needed.
Right now only NoNat needs to do this.
:param ctx: AIM context
:param bridge_domain: BridgeDomain AIM resource
:param l3outside: L3Outside AIM resource
"""
@abc.abstractmethod
def unset_bd_l3out(self, ctx, bridge_domain, l3outside):
"""Remove the l3out from the BD's associated l3out list if needed.
Right now only NoNat needs to do this.
:param ctx: AIM context
:param bridge_domain: BridgeDomain AIM resource
:param l3outside: L3Outside AIM resource
"""
class NatStrategyMixin(NatStrategy):
"""Implements common functionality between different NAT strategies."""
def __init__(self, mgr):
self.mgr = mgr
self.db = model.CloneL3OutManager()
def create_l3outside(self, ctx, l3outside,
vmm_domains=None, phys_domains=None):
return self._create_l3out(ctx, l3outside,
vmm_domains=vmm_domains,
phys_domains=phys_domains)
def delete_l3outside(self, ctx, l3outside):
self._delete_l3out(ctx, l3outside)
def get_l3outside_resources(self, ctx, l3outside):
res = []
l3out = self.mgr.get(ctx, l3outside)
if l3out:
res.append(l3out)
for obj in self._get_nat_objects(ctx, l3out):
obj_db = self.mgr.get(ctx, obj)
if obj_db:
res.append(obj_db)
ext_vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name)
if ext_vrf:
res.append(ext_vrf)
return res
def create_external_network(self, ctx, external_network):
return self._create_ext_net(ctx, external_network)
def delete_external_network(self, ctx, external_network):
self._delete_ext_net(ctx, external_network)
def create_subnet(self, ctx, l3outside, gw_ip_mask):
l3outside = self.mgr.get(ctx, l3outside)
if l3outside:
nat_bd = self._get_nat_bd(ctx, l3outside)
sub = resource.Subnet(tenant_name=nat_bd.tenant_name,
bd_name=nat_bd.name,
gw_ip_mask=gw_ip_mask)
if not self.mgr.get(ctx, sub):
self.mgr.create(ctx, sub)
def delete_subnet(self, ctx, l3outside, gw_ip_mask):
l3outside = self.mgr.get(ctx, l3outside)
if l3outside:
nat_bd = self._get_nat_bd(ctx, l3outside)
sub = resource.Subnet(tenant_name=nat_bd.tenant_name,
bd_name=nat_bd.name,
gw_ip_mask=gw_ip_mask)
self.mgr.delete(ctx, sub)
def get_subnet(self, ctx, l3outside, gw_ip_mask):
l3outside = self.mgr.get(ctx, l3outside)
if l3outside:
nat_bd = self._get_nat_bd(ctx, l3outside)
sub = resource.Subnet(tenant_name=nat_bd.tenant_name,
bd_name=nat_bd.name,
gw_ip_mask=gw_ip_mask)
return self.mgr.get(ctx, sub)
def update_external_cidrs(self, ctx, external_network, external_cidrs):
ext_net_db = self.mgr.get(ctx, external_network)
if ext_net_db:
self._manage_external_subnets(ctx, ext_net_db, external_cidrs)
# This is only needed for NoNat
def set_bd_l3out(self, ctx, bridge_domain, l3outside):
pass
# This is only needed for NoNat
def unset_bd_l3out(self, ctx, bridge_domain, l3outside):
pass
def _create_l3out(self, ctx, l3out, vmm_domains=None, phys_domains=None):
"""Create NAT EPG etc. in addition to creating L3Out."""
with ctx.store.begin(subtransactions=True):
tenant = resource.Tenant(name=l3out.tenant_name)
if not self.mgr.get(ctx, tenant):
self.mgr.create(ctx, tenant)
l3out_db = self.mgr.get(ctx, l3out)
if not l3out_db:
ext_vrf = self._get_nat_vrf(ctx, l3out)
if not self.mgr.get(ctx, ext_vrf):
self.mgr.create(ctx, ext_vrf)
l3out_db = copy.copy(l3out)
l3out_db.vrf_name = ext_vrf.name
l3out_db = self.mgr.create(ctx, l3out_db)
self._create_nat_epg(ctx, l3out_db,
vmm_domains=vmm_domains,
phys_domains=phys_domains)
return l3out_db
def _delete_l3out(self, ctx, l3out, delete_epg=True):
"""Delete NAT EPG etc. in addition to deleting L3Out."""
with ctx.store.begin(subtransactions=True):
l3out_db = self.mgr.get(ctx, l3out)
if l3out_db:
for en in self.mgr.find(ctx, resource.ExternalNetwork,
tenant_name=l3out.tenant_name,
l3out_name=l3out.name):
self.delete_external_network(ctx, en)
if not l3out_db.monitored:
self.mgr.delete(ctx, l3out)
if delete_epg:
self._delete_nat_epg(ctx, l3out_db)
# delete NAT VRF if any
self.mgr.delete(ctx, self._get_nat_vrf(ctx, l3out_db))
def _create_ext_net(self, ctx, ext_net):
with ctx.store.begin(subtransactions=True):
ext_net_db = self.mgr.get(ctx, ext_net)
if not ext_net_db:
ext_net_db = self.mgr.create(ctx, ext_net)
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(ext_net))
contract = self._get_nat_contract(ctx, l3out)
ext_net_db = self._update_contract(ctx, ext_net_db, contract,
is_remove=False)
return ext_net_db
def _delete_ext_net(self, ctx, ext_net):
with ctx.store.begin(subtransactions=True):
ext_net_db = self.mgr.get(ctx, ext_net)
if ext_net_db:
self._manage_external_subnets(ctx, ext_net_db, [])
if not ext_net_db.monitored:
self.mgr.delete(ctx, ext_net)
else:
l3out = self.mgr.get(
ctx, self._ext_net_to_l3out(ext_net))
contract = self._get_nat_contract(ctx, l3out)
self._update_contract(ctx, ext_net_db, contract,
is_remove=True)
def _manage_external_subnets(self, ctx, ext_net, new_cidrs):
new_cidrs = new_cidrs[:] if new_cidrs else []
ext_sub_attr = dict(tenant_name=ext_net.tenant_name,
l3out_name=ext_net.l3out_name,
external_network_name=ext_net.name)
old_ext_subs = self.mgr.find(ctx, resource.ExternalSubnet,
**ext_sub_attr)
with ctx.store.begin(subtransactions=True):
for sub in old_ext_subs:
if sub.cidr in new_cidrs:
new_cidrs.remove(sub.cidr)
else:
self.mgr.delete(ctx, sub)
for c in new_cidrs:
self.mgr.create(ctx, resource.ExternalSubnet(cidr=c,
**ext_sub_attr))
def _ext_net_to_l3out(self, ext_net):
return resource.L3Outside(tenant_name=ext_net.tenant_name,
name=ext_net.l3out_name)
def _display_name(self, res):
return (getattr(res, 'display_name', None) or res.name)
def _get_nat_ap_epg(self, ctx, l3out):
d_name = self._display_name(l3out)
ap_name = getattr(self, 'app_profile_name', None) or l3out.name
ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name)
ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name)
ap = resource.ApplicationProfile(
tenant_name=l3out.tenant_name,
name=ap_name,
display_name=ap_display_name)
epg = resource.EndpointGroup(
tenant_name=ap.tenant_name,
app_profile_name=ap.name,
name='EXT-%s' % l3out.name,
display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name))
return (ap, epg)
def _get_nat_contract(self, ctx, l3out):
d_name = self._display_name(l3out)
contract_name = self._scope_name_if_common(l3out.tenant_name,
'EXT-%s' % l3out.name)
return resource.Contract(
tenant_name=l3out.tenant_name,
name=contract_name,
display_name=self._scope_name_if_common(
l3out.tenant_name,
aim_utils.sanitize_display_name('EXT-%s' % d_name)))
def _get_nat_bd(self, ctx, l3out):
d_name = self._display_name(l3out)
bd_name = self._scope_name_if_common(l3out.tenant_name,
'EXT-%s' % l3out.name)
return resource.BridgeDomain(
tenant_name=l3out.tenant_name,
name=bd_name,
display_name=self._scope_name_if_common(
l3out.tenant_name,
aim_utils.sanitize_display_name('EXT-%s' % d_name)),
limit_ip_learn_to_subnets=True,
l3out_names=[l3out.name])
def _get_nat_vrf(self, ctx, l3out):
d_name = self._display_name(l3out)
vrf_name = self._scope_name_if_common(l3out.tenant_name,
'EXT-%s' % l3out.name)
return resource.VRF(
tenant_name=l3out.tenant_name,
name=vrf_name,
display_name=self._scope_name_if_common(
l3out.tenant_name,
aim_utils.sanitize_display_name('EXT-%s' % d_name)))
def _get_nat_objects(self, ctx, l3out):
sani = aim_utils.sanitize_display_name
scope = self._scope_name_if_common
d_name = self._display_name(l3out)
filter_name = scope(l3out.tenant_name, 'EXT-%s' % l3out.name)
fltr = resource.Filter(
tenant_name=l3out.tenant_name,
name=filter_name,
display_name=sani(scope(l3out.tenant_name, 'EXT-%s' % d_name)))
entry = resource.FilterEntry(
tenant_name=fltr.tenant_name,
filter_name=fltr.name,
name='Any',
display_name='Any')
contract = self._get_nat_contract(ctx, l3out)
subject = resource.ContractSubject(
tenant_name=contract.tenant_name,
contract_name=contract.name,
name='Allow', display_name='Allow')
subject_filter = resource.ContractSubjFilter(
tenant_name=contract.tenant_name,
contract_name=contract.name,
contract_subject_name='Allow',
filter_name=fltr.name)
bd = self._get_nat_bd(ctx, l3out)
bd.vrf_name = l3out.vrf_name
ap, epg = self._get_nat_ap_epg(ctx, l3out)
vm_doms = getattr(
self, 'vmm_domains',
[{'type': d.type, 'name': d.name} for d in
self.mgr.find(ctx, resource.VMMDomain)])
phy_doms = getattr(
self, 'physical_domains',
[{'name': d.name} for d in
self.mgr.find(ctx, resource.PhysicalDomain)])
epg.bd_name = bd.name
epg.provided_contract_names = [contract.name]
epg.consumed_contract_names = [contract.name]
epg.vmm_domains = vm_doms
epg.physical_domains = phy_doms
return [fltr, entry, contract, subject, subject_filter, bd, ap, epg]
def _select_domains(self, objs, vmm_domains=None, phys_domains=None):
for obj in objs:
if isinstance(obj, resource.EndpointGroup):
if vmm_domains is not None:
obj.vmm_domains = vmm_domains
if phys_domains is not None:
obj.physical_domains = phys_domains
def _create_nat_epg(self, ctx, l3out, vmm_domains=None, phys_domains=None):
objs = self._get_nat_objects(ctx, l3out)
self._select_domains(objs, vmm_domains=vmm_domains,
phys_domains=phys_domains)
with ctx.store.begin(subtransactions=True):
for r in objs:
if not self.mgr.get(ctx, r):
self.mgr.create(ctx, r)
def _delete_nat_epg(self, ctx, l3out):
with ctx.store.begin(subtransactions=True):
nat_bd = self._get_nat_bd(ctx, l3out)
for sub in self.mgr.find(ctx, resource.Subnet,
tenant_name=nat_bd.tenant_name,
bd_name=nat_bd.name):
self.mgr.delete(ctx, sub)
for r in reversed(self._get_nat_objects(ctx, l3out)):
if isinstance(r, resource.ApplicationProfile):
epgs = self.mgr.find(ctx, resource.EndpointGroup,
tenant_name=r.tenant_name,
app_profile_name=r.name)
if epgs:
continue
self.mgr.delete(ctx, r)
def _update_contract(self, ctx, ext_net, contract, is_remove):
if is_remove:
prov = [c for c in ext_net.provided_contract_names
if c != contract.name]
cons = [c for c in ext_net.consumed_contract_names
if c != contract.name]
else:
prov = [contract.name]
prov.extend(ext_net.provided_contract_names)
cons = [contract.name]
cons.extend(ext_net.consumed_contract_names)
ext_net = self.mgr.update(ctx, ext_net,
provided_contract_names=prov,
consumed_contract_names=cons)
return ext_net
def _is_visible(self, target_tenant, from_tenant):
return (target_tenant == from_tenant or target_tenant == 'common')
def _vrf_by_name(self, ctx, vrf_name, tenant_name_hint):
vrfs = self.mgr.find(ctx, resource.VRF,
tenant_name=tenant_name_hint,
name=vrf_name)
if vrfs:
return vrfs[0]
vrfs = self.mgr.find(ctx, resource.VRF, tenant_name='common',
name=vrf_name)
if vrfs:
return vrfs[0]
def _scope_name_if_common(self, tenant_name, name):
if tenant_name == 'common':
scope = getattr(self, 'common_scope', None)
scope = scope + '_' if scope else ''
return aim_utils.sanitize_display_name(scope + name)
return name
class NoNatStrategy(NatStrategyMixin):
"""No NAT Strategy.
Provides direct external connectivity without any network
address translation.
"""
def __init__(self, mgr):
super(NoNatStrategy, self).__init__(mgr)
def delete_external_network(self, ctx, external_network):
"""Clean-up any connected VRFs before deleting the external network."""
with ctx.store.begin(subtransactions=True):
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name)
if vrf:
self._disconnect_vrf_from_l3out(ctx, l3out, vrf)
self._delete_ext_net(ctx, ext_net)
def connect_vrf(self, ctx, external_network, vrf):
"""Allow external connectivity to VRF.
Make external_network provide/consume specified contracts.
Locate BDs referring to the VRF, and include L3Outside
in their l3out_names.
"""
with ctx.store.begin(subtransactions=True):
if not self._is_visible(vrf.tenant_name,
external_network.tenant_name):
raise VrfNotVisibleFromExternalNetwork(
vrf=vrf, ext_net=external_network)
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
old_vrf = self._vrf_by_name(ctx, l3out.vrf_name,
l3out.tenant_name)
if not old_vrf or old_vrf.identity != vrf.identity:
LOG.error('connect_vrf: cannot change VRF connected to '
'no-NAT L3Outside %s',
l3out)
raise L3OutsideVrfChangeDisallowed(l3out=l3out,
old_vrf=old_vrf, vrf=vrf)
nat_bd = self._get_nat_bd(ctx, l3out)
self._set_bd_l3out(ctx, l3out, vrf, exclude_bd=nat_bd)
contract = self._get_nat_contract(ctx, l3out)
prov = list(set(external_network.provided_contract_names +
[contract.name]))
cons = list(set(external_network.consumed_contract_names +
[contract.name]))
self.mgr.update(ctx, external_network,
provided_contract_names=prov,
consumed_contract_names=cons)
def disconnect_vrf(self, ctx, external_network, vrf):
"""Remove external connectivity for VRF.
Remove contracts provided/consumed by external_network.
Locate BDs referring to the VRF, and exclude L3Outside
from their l3out_names.
"""
with ctx.store.begin(subtransactions=True):
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
old_vrf = self._vrf_by_name(ctx, l3out.vrf_name,
l3out.tenant_name)
if old_vrf and old_vrf.identity != vrf.identity:
LOG.info('disconnect_vrf: %s is not connected to %s',
ext_net, vrf)
return
self._disconnect_vrf_from_l3out(ctx, l3out, vrf)
contract = self._get_nat_contract(ctx, l3out)
self.mgr.update(ctx, external_network,
provided_contract_names=[contract.name],
consumed_contract_names=[contract.name])
def read_vrfs(self, ctx, external_network):
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name)
return [vrf] if vrf else []
def set_bd_l3out(self, ctx, bridge_domain, l3outside):
bridge_domain = self.mgr.get(ctx, bridge_domain)
if bridge_domain and l3outside.name not in bridge_domain.l3out_names:
self.mgr.update(
ctx, bridge_domain,
l3out_names=bridge_domain.l3out_names + [l3outside.name])
def unset_bd_l3out(self, ctx, bridge_domain, l3outside):
bridge_domain = self.mgr.get(ctx, bridge_domain)
if bridge_domain and l3outside.name in bridge_domain.l3out_names:
bridge_domain.l3out_names.remove(l3outside.name)
self.mgr.update(ctx, bridge_domain,
l3out_names=bridge_domain.l3out_names)
def _get_bds_in_vrf_for_l3out(self, ctx, vrf, l3out):
if vrf.tenant_name == 'common' and l3out.tenant_name == 'common':
# BDs in all tenants are candidates - locate all BDs whose
# vrf_name matches vrf.name, and exclude those that have a
# local VRF aliasing the given VRF.
all_bds = self.mgr.find(ctx, resource.BridgeDomain,
vrf_name=vrf.name)
bd_tenants = set([b.tenant_name for b in all_bds])
bd_tenants = [t for t in bd_tenants
if not self.mgr.get(
ctx, resource.VRF(tenant_name=t, name=vrf.name))]
return [b for b in all_bds if b.tenant_name in bd_tenants]
elif (vrf.tenant_name == 'common' or
vrf.tenant_name == l3out.tenant_name):
# VRF and L3out are visible only to BDs in l3out's tenant
return self.mgr.find(ctx, resource.BridgeDomain,
tenant_name=l3out.tenant_name,
vrf_name=vrf.name)
# Other combinations of L3Out and VRF are not valid
# configurations and can be excluded:
# 1. L3out in common, VRF not in common: VRF is not
# visible to L3out
# 2. L3Out and VRF are in different non-common tenants:
# VRF is not visible to L3out
return []
def _set_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None):
# update all the BDs
for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside):
if exclude_bd and exclude_bd.identity == bd.identity:
continue
# Add L3Out to existing list
if l3outside.name not in bd.l3out_names:
self.mgr.update(ctx, bd,
l3out_names=bd.l3out_names + [l3outside.name])
def _unset_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None):
# update all the BDs
for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside):
if exclude_bd and exclude_bd.identity == bd.identity:
continue
# Remove L3Out from existing list
if l3outside.name in bd.l3out_names:
bd.l3out_names.remove(l3outside.name)
self.mgr.update(ctx, bd, l3out_names=bd.l3out_names)
def _disconnect_vrf_from_l3out(self, ctx, l3outside, vrf):
nat_bd = self._get_nat_bd(ctx, l3outside)
self._unset_bd_l3out(ctx, l3outside, vrf, exclude_bd=nat_bd)
class DistributedNatStrategy(NatStrategyMixin):
"""Distributed NAT Strategy.
Provides external connectivity with network address
translation (DNAT/SNAT) where the translation is distributed
amongst nodes in the fabric.
"""
def delete_external_network(self, ctx, external_network):
"""Delete external-network from main and cloned L3Outs.
"""
with ctx.store.begin(subtransactions=True):
# Delete specified external-network from all cloned L3Outs.
# Delete external-network from main L3Out.
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
ext_net_db = self.mgr.get(ctx, external_network)
if l3out and ext_net_db:
clone_l3outs = self._find_l3out_clones(ctx, l3out)
for clone in clone_l3outs:
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone.tenant_name,
l3out_name=clone.name,
name=ext_net_db.name)
self._delete_ext_net(ctx, clone_ext_net)
self._delete_unused_l3out(ctx, clone)
self._delete_ext_net(ctx, ext_net_db)
def update_external_cidrs(self, ctx, external_network, external_cidrs):
"""Update external CIDRs in main and cloned ExternalNetworks."""
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network))
ext_net_db = self.mgr.get(ctx, external_network)
if l3out and ext_net_db:
clone_l3outs = self._find_l3out_clones(ctx, l3out)
with ctx.store.begin(subtransactions=True):
for clone in clone_l3outs:
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone.tenant_name,
l3out_name=clone.name,
name=external_network.name)
self._manage_external_subnets(ctx, clone_ext_net,
external_cidrs)
self._manage_external_subnets(ctx, ext_net_db,
external_cidrs)
def connect_vrf(self, ctx, external_network, vrf):
"""Allow external connectivity to VRF.
Create shadow L3Outside for L3Outside-VRF combination
in VRF's tenant, if required.
Create ExternalNetwork and ExternalSubnet(s) in the shadow
L3Out, if required.
Set vrf_name of shadow L3Outside to VRF.
"""
with ctx.store.begin(subtransactions=True):
return self._create_shadow(ctx, external_network, vrf)
def disconnect_vrf(self, ctx, external_network, vrf):
"""Remove external connectivity for VRF.
Delete ExternalNetwork and contained ExternalSubnet
in the shadow L3Outside. Remove shadow L3Outside if
there are no more ExternalNetworks in the shadow
L3Outside.
"""
with ctx.store.begin(subtransactions=True):
self._delete_shadow(ctx, external_network, vrf)
def read_vrfs(self, ctx, external_network):
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
result = []
for c in self.db.get_clones(ctx, l3out):
l3c = self.mgr.get(ctx, resource.L3Outside(tenant_name=c[0],
name=c[1]))
if l3c:
vrf = self.mgr.get(
ctx, resource.VRF(tenant_name=l3c.tenant_name,
name=l3c.vrf_name))
if vrf:
result.append(vrf)
return result
def _generate_l3out_name(self, l3outside, vrf):
# Generate a name based on its relationship with VRF
name = '%s-%s' % (l3outside.name, vrf.name)
display_name = aim_utils.sanitize_display_name(
'%s-%s' % (self._display_name(l3outside),
self._display_name(vrf)))
return (name, display_name)
def _make_l3out_clone(self, ctx, l3out, vrf):
new_tenant = vrf.tenant_name
new_name, new_display_name = self._generate_l3out_name(l3out, vrf)
clone_l3out = resource.L3Outside(
tenant_name=new_tenant,
name=new_name,
display_name=new_display_name,
vrf_name=vrf.name)
return clone_l3out
def _create_shadow(self, ctx, ext_net, vrf, with_nat_epg=True):
"""Clone ExternalNetwork as a shadow."""
ext_net_db = self.mgr.get(ctx, ext_net)
if not ext_net_db:
return
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net_db))
clone_l3out = self._make_l3out_clone(ctx, l3out, vrf)
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone_l3out.tenant_name,
l3out_name=clone_l3out.name,
display_name=ext_net_db.display_name,
**{k: getattr(ext_net, k)
for k in ['name',
'provided_contract_names',
'consumed_contract_names']})
if with_nat_epg:
_, nat_epg = self._get_nat_ap_epg(ctx, l3out)
clone_ext_net.nat_epg_dn = nat_epg.dn
with ctx.store.begin(subtransactions=True):
self.mgr.create(ctx, clone_l3out, overwrite=True)
self.mgr.create(ctx, clone_ext_net, overwrite=True)
cidrs = self.mgr.find(ctx, resource.ExternalSubnet,
tenant_name=ext_net_db.tenant_name,
l3out_name=ext_net_db.l3out_name,
external_network_name=ext_net_db.name)
cidrs = [c.cidr for c in cidrs]
self._manage_external_subnets(ctx, clone_ext_net, cidrs)
# Set this item as a clone
if not self.db.get(ctx, clone_l3out):
self.db.set(ctx, l3out, clone_l3out)
return clone_ext_net
def _delete_shadow(self, ctx, ext_net, vrf):
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net))
clone_l3out = resource.L3Outside(
tenant_name=vrf.tenant_name,
name=self._generate_l3out_name(l3out, vrf)[0])
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone_l3out.tenant_name,
l3out_name=clone_l3out.name,
name=ext_net.name)
with ctx.store.begin(subtransactions=True):
self._delete_ext_net(ctx, clone_ext_net)
self._delete_unused_l3out(ctx, clone_l3out)
def _find_l3out_clones(self, ctx, l3outside):
clone_keys = self.db.get_clones(ctx, l3outside)
return [resource.L3Outside(tenant_name=x[0], name=x[1])
for x in clone_keys]
def _delete_unused_l3out(self, ctx, l3out):
ens = self.mgr.find(ctx, resource.ExternalNetwork,
tenant_name=l3out.tenant_name,
l3out_name=l3out.name)
if not ens:
self._delete_l3out(ctx, l3out, delete_epg=False)
class EdgeNatStrategy(DistributedNatStrategy):
"""Edge NAT Strategy.
Provides external connectivity with network address
translation (DNAT/SNAT) where the translation is centralized
in a node at the edge of the fabric.
"""
def connect_vrf(self, ctx, external_network, vrf, external_cidrs=None):
"""Allow external connectivity to VRF.
Create shadow L3Outside for L3Outside-VRF combination
in VRF's tenant, if required.
Create ExternalNetwork and ExternalSubnet in the shadow
L3Out, if required.
Set vrf_name of shadow L3Outside to VRF.
"""
with ctx.store.begin(subtransactions=True):
return self._create_shadow(ctx, external_network, vrf,
with_nat_epg=False)
def _make_l3out_clone(self, ctx, l3out, vrf):
clone_l3out = super(EdgeNatStrategy, self)._make_l3out_clone(
ctx, l3out, vrf)
# TODO(amitbose) modify the clone_l3out node-profile etc
return clone_l3out
|
the-stack_0_4575 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import click
from polyaxon import settings
from polyaxon.api import POLYAXON_CLOUD_HOST
from polyaxon.logger import clean_outputs
from polyaxon.utils.formatting import Printer
from polyaxon.utils.http_utils import clean_host
def get_dashboard_url(
base: str = "ui", subpath: str = "", use_cloud: bool = False
) -> str:
host = POLYAXON_CLOUD_HOST if use_cloud else clean_host(settings.CLIENT_CONFIG.host)
dashboard_url = "{}/{}/".format(host, base)
if subpath:
return "{}{}/".format(dashboard_url, subpath.rstrip("/"))
return dashboard_url
def get_dashboard(dashboard_url: str, url_only: bool, yes: bool):
if url_only:
Printer.print_header("The dashboard is available at: {}".format(dashboard_url))
sys.exit(0)
if yes or click.confirm(
"Dashboard page will now open in your browser. Continue?",
default=True,
):
click.launch(dashboard_url)
@click.command()
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Automatic yes to prompts. "
'Assume "yes" as answer to all prompts and run non-interactively.',
)
@click.option(
"--url", is_flag=True, default=False, help="Print the url of the dashboard."
)
@clean_outputs
def dashboard(yes, url):
"""Open dashboard in browser."""
get_dashboard(dashboard_url=get_dashboard_url(), url_only=url, yes=yes)
|
the-stack_0_4576 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 22:04:17 2019
@author: Jiupeng
"""
#Modified from Jiupeng Hu
#from __future__ import print_function
from collections import defaultdict
import os
import sys
import datetime
def decdeg2dms(dd):
deg, mnt = divmod(dd*60.0, 60)
return int(deg), mnt
def gen_sta_hypo(stationin):
output = 'station.dat'
g = open(output,'w')
with open(stationin, 'r') as fr:
for line in fr.readlines():
line = line.strip().split()
latD, latM = decdeg2dms(float(line[1]))
lonD, lonM = decdeg2dms(float(line[0]))
for channel in ['001', '002', '003']:
g.write('{:<5s} {:2s} {:3s} {:3d} {:7.4f} {:3d} {:7.4f}E{:4d}\n'.format(line[3], line[2], channel ,latD, latM, lonD, lonM, int(float(line[5])*1000)))
#g.write('{:<5s} {:2s} {:3s} {:3d} {:7.4f} {:3d} {:7.4f}E{:4d}\n'.format(line[3], line[2], channel ,latD, latM, lonD, lonM, 0))
class Event(object):
def __init__(self,line):
eventParts = line.split()
# assert len(eventParts) == 10
self.no = eventParts[0]
self.year = eventParts[1]
self.month = eventParts[2]
self.day = eventParts[3]
self.stime = eventParts[4] # time string
self.dtime = eventParts[5] # time delta to 00:00
self.std = eventParts[6]
self.lat = eventParts[7]
self.lon = eventParts[8]
self.depth = eventParts[9]
self.mag = eventParts[10]
self.stations = set([])
def setSta(self, sta):
self.sta = sta
def setPicks(self, stationPicks):
self.stationPicks = stationPicks
def __repr__(self):
return ' '.join([self.year, self.month, self.day, self.stime, self.lat+self.lon])
class Pick(object):
def __init__(self, line):
phaseParts = line.split()
self.net = phaseParts[0]
self.staN = phaseParts[1]
self.sta = '.'.join([self.net, self.staN])
self.phase = phaseParts[2]
self.dtime = phaseParts[3] # time to 00:00
self.ttime = phaseParts[4] # travel time
self.pamp = phaseParts[5] # P phase amplitude
self.error = phaseParts[6] # travel time errors from taup_time
def __str__(self):
return self.net+self.sta+self.dtime
def __repr__(self):
return ' '.join([self.net, self.sta,self.phase,self.ttime, self.dtime, self.pamp, self.error])
def isEqLine(line):
if line[19] in ['P','S']:
return False
else:
return True
class SeismicReport(object):
def __init__(self, eventsFile):
self.events = []
self.readEventsFile(eventsFile)
def readEventsFile(self, eventsFile):
eventNo = 0
stationPicks = []
with open(eventsFile, 'r') as f:
line = f.readline()
# Process first line particularly
while line:
# if line[0].isspace(): # Event line start with spaces
if isEqLine(line):
pickNo = 0
eventNo += 1
eventTemp = Event(line)
line = f.readline()
break
else:
line = f.readline()
while line:
# if line[0].isspace():
if isEqLine(line):
if stationPicks:
eventTemp.setPicks(stationPicks)
self.events.append(eventTemp)
pickNo = 0
eventNo += 1
stationPicks = []
eventTemp = Event(line)
# elif line.startswith(stationPrompt):
# elif line[0].isalpha():
elif not isEqLine(line):
pickNo += 1
pickTemp = Pick(line)
eventTemp.stations.add(pickTemp.sta)
stationPicks.append(pickTemp)
else:
print('Error!')
line = f.readline()
eventTemp.setPicks(stationPicks)
self.events.append(eventTemp)
def makeCatlog(self, phases=['P', 'Pg']):
for event in self.events:
for pick in event.stationPicks:
if pick.phase.strip() in phases:
self.show([pick.net, pick.sta, pick.phase, event.year, event.month, event.day,
pick.dtime, event.lon, event.lat, event.depth])
print("Eq: ",event.no)
def makeHypoPhase(self):
# Event format from "Summary header format Y2000"
eventFormat="{:4s}{:2s}{:2s}{:02d}{:02d}{:02d}{:02d}{:02d} {:>2d}{:02d}{:>3d}E{:>2d}{:02d}{:>3d}{:02d} {:1s}{:>1d}{:02d}"
for event in self.events:
# if len(event.stations) < 15:
# continue
hour, minute, second = event.stime.split(':')
mag1,mag2 = event.mag.split('.')
mag1 = int(mag1)
mag2 = int(mag2[0:2])
if float(event.mag) < 0:
mag1 = 0
mag2 = 0
sec1 = second[0:2]
sec2 = second[3:5]
lat1, lat2, lat3 = self.processLatLon(event.lat)
lon1, lon2, lon3 = self.processLatLon(event.lon)
dep1, dep2 = self.processDep(event.depth)
# print("0123456789012345678901234567890123456789012345678901234567890")
print(eventFormat.format(event.year,event.month,event.day,int(hour),int(minute),int(sec1),int(sec2),lat1,lat2,lat3,lon1,lon2,lon3,dep1,dep2,'L',mag1,mag2))
otimeStr = event.year+event.month+event.day+" "+event.stime
otime = datetime.datetime.strptime(otimeStr, '%Y%m%d %H:%M:%S.%f')
baseTime = otime - datetime.timedelta(seconds=otime.second, microseconds=otime.microsecond)
tmpPicks = event.stationPicks.copy()
for sta in event.stations:
p_flag = False
p_label = ' '
s_flag = False
s_lable = ' '
sta_code = "001"
tRes = ' '
pWeight = ' '
pSec = ''
sSec =''
sec1 = ''
sec2 = ''
for pick in tmpPicks[::-1]:
if pick.sta == sta:
if pick.phase == 'P':
p_travel_time = pick.ttime
p_flag = True
if pick.phase == 'S':
s_travel_time = pick.ttime
s_flag = True
if p_flag:
ptime = otime + datetime.timedelta(seconds=float(p_travel_time))
pDelta = ptime - baseTime
p_label = 'P'
# if(int(pDelta.seconds) > 100):
# print('Error')
pSec = f'{pDelta.seconds:0>2}'
sec1 = f'{int(pDelta.microseconds/10000):0>2}'
if s_flag:
stime = otime + datetime.timedelta(seconds=float(s_travel_time))
sDelta = stime - baseTime
s_lable = 'S'
# if(int(sDelta.seconds) > 100):
# print('Error')
sSec = f'{sDelta.seconds:0>2}'
sec2 = f'{int(sDelta.microseconds/10000):0>2}'
phaseFormat="{:<5s}{:2s} {:3s} {:1s} {:4d}{:02d}{:02d}{:02d}{:02d}{:>3s}{:2s}{:4s}{:3s}{:>3s}{:2s} {:1s}"
print(phaseFormat.format(
sta.split('.')[1], sta.split('.')[0], sta_code, p_label,
baseTime.year, baseTime.month,baseTime.day,baseTime.hour,baseTime.minute,
pSec, sec1, tRes, pWeight, sSec, sec2, s_lable
))
print('')
# print("0123456789012345678901234567890123456789012345678901234567890")
def processLatLon(self, value:float):
value1, tmp = divmod(float(value)*60.0, 60)
try:
value2 = str(tmp).split('.')[0]
except:
value2 = 0
try:
value3 = str(tmp).split('.')[1][0:2]
if len(value3) == 1:
value3 = int(value3)*10
except:
value3 = 0
return int(value1), int(value2), int(value3)
def processDep(self, depth):
try:
dep1 = str(depth).split('.')[0]
except:
dep1 = 0
try:
dep2 = str(depth).split('.')[1][0:2]
if len(dep2) == 1:
dep2 = int(dep2)*10
except:
dep2 = 0
return int(dep1), int(dep2)
def show(self, list):
outStr = ''
for item in list:
outStr = outStr + item + ' '
print(outStr)
if __name__ == '__main__':
# test = SeismicReport('phase_sel_one')
if len(sys.argv) != 3:
print('mk_input.py phasefile stationfile')
sys.exit()
gen_sta_hypo(sys.argv[2])
test = SeismicReport(sys.argv[1])
#test.makeCatlog()
test.makeHypoPhase()
|
the-stack_0_4578 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script fixes links that contain common spelling mistakes.
This is only possible on wikis that have a template for these misspellings.
Command line options:
-always:XY instead of asking the user what to do, always perform the same
action. For example, XY can be "r0", "u" or "2". Be careful with
this option, and check the changes made by the bot. Note that
some choices for XY don't make sense and will result in a loop,
e.g. "l" or "m".
-start:XY goes through all misspellings in the category on your wiki
that is defined (to the bot) as the category containing
misspelling pages, starting at XY. If the -start argument is not
given, it starts at the beginning.
-main only check pages in the main namespace, not in the talk,
wikipedia, user, etc. namespaces.
"""
# (C) Daniel Herding, 2007
# (C) Pywikibot team, 2007-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 1a3aea17f846ffaea3ed95e354a0578136a3d103 $'
#
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.tools import PY2
from scripts.solve_disambiguation import DisambiguationRobot
if not PY2:
basestring = (str, )
HELP_MSG = """\n
mispelling.py does not support site {site}.
Help Pywikibot team to provide support for your wiki by submitting
a bug to:
https://phabricator.wikimedia.org/maniphest/task/create/?projects=pywikibot-core
with category containing misspelling pages or a template for
these misspellings.\n"""
class MisspellingRobot(DisambiguationRobot):
"""Spelling bot."""
misspellingTemplate = {
'de': ('Falschschreibung', 'Obsolete Schreibung'),
}
# Optional: if there is a category, one can use the -start
# parameter.
misspellingCategory = {
'da': u'Omdirigeringer af fejlstavninger', # only contains date redirects at the moment
'de': ('Kategorie:Wikipedia:Falschschreibung',
'Kategorie:Wikipedia:Obsolete Schreibung'),
'en': u'Redirects from misspellings',
'hu': u'Átirányítások hibás névről',
'nl': u'Categorie:Wikipedia:Redirect voor spelfout',
}
def __init__(self, always, firstPageTitle, main_only):
"""Constructor."""
super(MisspellingRobot, self).__init__(
always, [], True, False, None, False, main_only)
self.generator = self.createPageGenerator(firstPageTitle)
def createPageGenerator(self, firstPageTitle):
"""
Generator to retrieve misspelling pages or misspelling redirects.
@rtype: generator
"""
mylang = self.site.code
if mylang in self.misspellingCategory:
categories = self.misspellingCategory[mylang]
if isinstance(categories, basestring):
categories = (categories, )
generators = (
pagegenerators.CategorizedPageGenerator(
pywikibot.Category(self.site, misspellingCategoryTitle),
recurse=True, start=firstPageTitle)
for misspellingCategoryTitle in categories)
elif mylang in self.misspellingTemplate:
templates = self.misspellingTemplate[mylang]
if isinstance(templates, basestring):
templates = (templates, )
generators = (
pagegenerators.ReferringPageGenerator(
pywikibot.Page(self.site, misspellingTemplateName, ns=10),
onlyTemplateInclusion=True)
for misspellingTemplateName in templates)
if firstPageTitle:
pywikibot.output(
u'-start parameter unsupported on this wiki because there '
u'is no category for misspellings.')
else:
pywikibot.output(HELP_MSG.format(site=self.site))
empty_gen = (i for i in [])
return empty_gen
generator = pagegenerators.CombinedPageGenerator(generators)
preloadingGen = pagegenerators.PreloadingGenerator(generator)
return preloadingGen
def findAlternatives(self, disambPage):
"""
Append link target to a list of alternative links.
Overrides the DisambiguationRobot method.
@return: True if alternate link was appended
@rtype: bool or None
"""
if disambPage.isRedirectPage():
self.alternatives.append(disambPage.getRedirectTarget().title())
return True
if self.misspellingTemplate.get(disambPage.site.code) is not None:
for template, params in disambPage.templatesWithParams():
if (template.title(withNamespace=False) ==
self.misspellingTemplate[disambPage.site.code]):
# The correct spelling is in the last paramter.
correctSpelling = params[-1]
# On de.wikipedia, there are some cases where the
# misspelling is ambigous, see for example:
# https://de.wikipedia.org/wiki/Buthan
for match in self.linkR.finditer(correctSpelling):
self.alternatives.append(match.group('title'))
if not self.alternatives:
# There were no links in the parameter, so there is
# only one correct spelling.
self.alternatives.append(correctSpelling)
return True
def setSummaryMessage(self, disambPage, *args, **kwargs):
"""
Setup the summary message.
Overrides the DisambiguationRobot method.
"""
# TODO: setSummaryMessage() in solve_disambiguation now has parameters
# new_targets and unlink. Make use of these here.
self.comment = i18n.twtranslate(self.site, 'misspelling-fixing',
{'page': disambPage.title()})
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# the option that's always selected when the bot wonders what to do with
# a link. If it's None, the user is prompted (default behaviour).
always = None
main_only = False
firstPageTitle = None
for arg in pywikibot.handle_args(args):
arg, sep, value = arg.partition(':')
if arg == '-always':
always = value
elif arg == '-start':
firstPageTitle = value or pywikibot.input(
'At which page do you want to start?')
elif arg == '-main':
main_only = True
bot = MisspellingRobot(always, firstPageTitle, main_only)
bot.run()
if __name__ == "__main__":
main()
|
the-stack_0_4580 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import logging
from typing import Dict, Iterable, List
from .annotated_function_generator import (
AnnotatedFunctionGenerator,
FunctionVisitor,
)
from .decorator_parser import DecoratorParser
from .generator_specifications import DecoratorAnnotationSpecification
from .model import FunctionDefinitionModel
LOG: logging.Logger = logging.getLogger(__name__)
class FreeFunctionWithDecoratorVisitor(FunctionVisitor):
def __init__(
self, target_decorators: List[DecoratorAnnotationSpecification]
) -> None:
super().__init__()
self.decorator_parsers: Dict[
DecoratorAnnotationSpecification, DecoratorParser
] = {
target_decorator: DecoratorParser(target_decorator.decorator)
for target_decorator in target_decorators
}
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
for decorator_specification, parser in self.decorator_parsers.items():
if parser.function_matches_target_decorators(node):
self.found_functions[decorator_specification].append(node)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
for decorator_specification, parser in self.decorator_parsers.items():
if parser.function_matches_target_decorators(node):
self.found_functions[decorator_specification].append(node)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
# We only want free functions, so we should stop traversing the
# tree once we see a class definition
pass
class AnnotatedFreeFunctionWithDecoratorGenerator(AnnotatedFunctionGenerator):
def _annotate_functions(self, path: str) -> Iterable[FunctionDefinitionModel]:
visitor = FreeFunctionWithDecoratorVisitor(self.annotation_specifications)
return self._annotate_functions_with_visitor(path, visitor)
|
the-stack_0_4581 | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Utilities for parsing and applying quantsim configurations from json config file """
from abc import ABC, abstractmethod
from typing import Dict, List
from aimet_common.defs import QuantizationDataType, QuantDtypeBwInfo
from aimet_common.connected_graph.operation import Op
from aimet_common.graph_pattern_matcher import PatternType
from aimet_common.quantsim_config.json_config_importer import JsonConfigImporter, ConfigDictKeys, DefaultsType, \
ParamType, OpTypeType, SupergroupType, ConfigType
from aimet_common.utils import AimetLogger
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
# --------------------------------------------------------------------------------------------------------------------
# Overriding AIMET QuantSim data type and bit-width using supported_kernels specified in target driven config file.
# --------------------------------------------------------------------------------------------------------------------
# supported_kernels can be specified at default as well as op level in a given target specific config file
# Example rule in the target specific config file is as below:
# "supported_kernels": [
# {
# "activation": {
# "bitwidth": 16,
# "dtype": "int"
# },
# "param": {
# "bitwidth": 16,
# "dtype": "int"
# }
# },
# {
# "activation": {
# "bitwidth": 16,
# "dtype": "float"
# },
# "param": {
# "bitwidth": 16,
# "dtype": "float"
# }
# }
# ]
# supported_kernels includes data type and bit-width options for activation and param quantization
# applied together as a pair. In above rule act and param can be set to [int16, int16] OR [FP16, FP16]
# supported_kernels can be used to enforce target driven data type and bit-width during AIMET Quantsim
# by setting: ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
#
# AIMET Quantsim is created with specific defaults for data type/ bit-width using:
# default_data_type default_output_bw and default_param_bw arguments as below :
# sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
# config_file='./data/quantsim_config.json',
# dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
# default_data_type=QuantizationDataType.int,
# default_output_bw=8, default_param_bw=8)
# Rules for override :
# (i) If a given QuantSim default data type and bit-width is found at either the default or op-level
# supported_kernels list : override shall NOT be applied.
# (ii) AIMET supports overrides ONLY when a lower precision kernel is unavailable.
# For example :
# a) QuantSim default set to int 8, op level supported_kernels only has FP 16 available --> override supported
# b) QuantSim default set to int 8, op level supported_kernels only has int 4 available --> override NOT supported
#
# --------------------------------------------------------------------------------------------------------------------
# Flag to enforce target configs for data type and bit-width for params and activation.
ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX = 0
class SupergroupConfigCallback(ABC):
""" Class acting as a callback for when supergroups are found """
def __init__(self):
pass
@abstractmethod
def __call__(self, _, op_list: List[Op]):
""" Callback logic """
class OnnxConnectedGraphTypeMapper:
"""
Class maintaining dictionaries for two way mapping from onnx types to connected graph types
"""
def __init__(self, type_pairs: List[List[List[str]]]):
self._onnx_to_conn_graph_dict = {}
self._conn_graph_to_onnx_dict = {}
for onnx_types, conn_graph_types in type_pairs:
for onnx_type in onnx_types:
self._onnx_to_conn_graph_dict[onnx_type] = conn_graph_types
for conn_graph_type in conn_graph_types:
self._conn_graph_to_onnx_dict[conn_graph_type] = onnx_types
def get_conn_graph_type_from_onnx_type(self, onnx_type: str):
"""
Return connected graph type corresponding to onnx type
:param onnx_type: Onnx type to find corresponding connected graph type
:return: Connected graph type corresponding to onnx_type
"""
return self._onnx_to_conn_graph_dict.get(onnx_type)
def get_onnx_type_from_conn_graph_type(self, conn_graph_type: str):
"""
Return onnx type corresponding to connected graph type
:param conn_graph_type: Connected graph type to find corresponding onnx type
:return: Onnx type corresponding to conn_graph_type
"""
return self._conn_graph_to_onnx_dict.get(conn_graph_type)
class QuantSimConfigurator(ABC):
""" Class for parsing and applying quantsim configurations from json config file """
def __init__(self, config_file: str):
self._quantsim_configs = JsonConfigImporter.import_json_config_file(config_file)
def _set_quantsim_configs(self):
"""
Apply quantsim configurations to the given model
"""
self._set_default_configs(self._quantsim_configs[ConfigDictKeys.DEFAULTS])
self._set_param_configs(self._quantsim_configs[ConfigDictKeys.PARAMS])
self._set_op_type_configs(self._quantsim_configs[ConfigDictKeys.OP_TYPE])
self._set_supergroup_configs(self._quantsim_configs[ConfigDictKeys.SUPERGROUPS])
self._set_model_input_configs(self._quantsim_configs[ConfigDictKeys.MODEL_INPUT])
self._set_model_output_configs(self._quantsim_configs[ConfigDictKeys.MODEL_OUTPUT])
def check_correctness_of_dtype_bw_rules(self, quantsim_dtype_bw_info: QuantDtypeBwInfo):
"""
Validates correctness of data type and bitdiwth rules specified using config file supported_kernels option.
:param quantsim_dtype_bw_info: data type (int or float) as QuantizationDataType and act/param bit-width info.
:return:
"""
# validation rules:
# AIMET supports overrides ONLY when a lower precision kernel is unavailable.
# for example :
# 1) (default) int 8, but only FP16 kernel is available for a given op type --> override supported
# 2) (default) int 8, but only int 4 kernel is available is available for a given op type --> override not supported
default_config = self._quantsim_configs[ConfigDictKeys.DEFAULTS]
default_valid = False
op_level_valid = False
# user has provided default supported kernel options
if ConfigDictKeys.SUPPORTED_KERNELS in default_config:
default_supported_kernels = default_config[ConfigDictKeys.SUPPORTED_KERNELS]
# quantsim dtype/bw found in default supported kernels
if current_config_in_supported_kernels(quantsim_dtype_bw_info, default_supported_kernels) and \
is_current_config_same_as_override_option(quantsim_dtype_bw_info, default_supported_kernels):
default_valid = True
# default level override is not required
logger.info("Quantsim config found in default supported kernels, "
"skipping default level dtype and bitwidth override")
else:
# override is required, first validate the override option
# if valid, update default dtype, bw to be used to validate op level overrides.
if is_override_dtype_bw_valid(get_override_from_supported_kernels(default_supported_kernels),
quantsim_dtype_bw_info):
default_valid = True
quantsim_dtype_bw_info = get_override_from_supported_kernels(default_supported_kernels)
else:
logger.error(' Default supported_kernels override check failed, one way to rectify is to include \n'
' default quantsim data type and bit-width {act_bw = %s, param_bw = %s, data_type = %s} \n '
' in supported_kernels list under default section of target specific config file \n',
quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type)
raise NotImplementedError
else:
# user has not provided default supported_kernels, log quantsim defaults treated as default target kernel support
default_valid = True
logger.info(' Default supported_kernels not specified in given target specific config file. \n'
' Using default quantsim data type and bit-width {act_bw = %s, param_bw = %s, data_type = %s} \n '
' as default target support\n',
quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type)
# in either case, validate op level override options
if self._quantsim_configs[ConfigDictKeys.OP_TYPE]:
op_level_valid = validate_all_op_level_dtype_bw_overrides(self._quantsim_configs[ConfigDictKeys.OP_TYPE],
quantsim_dtype_bw_info)
return default_valid and op_level_valid
@abstractmethod
def _set_default_configs(self, default_configs: DefaultsType):
"""
Set default configurations for op and param quantizers in model (first level of specificity in configuration
file)
:param default_configs: Default configurations for quantizers
"""
@abstractmethod
def _set_param_configs(self, param_configs: ParamType):
"""
Set configurations for all params of specific types (second level of specificity in configuration file)
:param param_configs: Dictionary containing configurations for parameters of certain types
"""
@abstractmethod
def _set_op_type_configs(self, op_configs: OpTypeType):
"""
Set configurations for all ops of specific types (third level of specificity in configuration file)
:param op_configs: Dictionary containing configurations for ops of certain types
"""
@classmethod
def _build_supergroup_patterns(cls, supergroup_config: SupergroupType, callback: SupergroupConfigCallback,
onnx_conn_graph_type_mapper: OnnxConnectedGraphTypeMapper) \
-> List[PatternType]:
"""
Create a list holding pattern types corresponding to sequences specified in the supergroup config
:param supergroup_config: Quantsim wrapper configurations for supergroup ops
:return: List of PatternTypes holding supergroup ops and callback for when the supergroup is found
"""
op_list = supergroup_config[ConfigDictKeys.OP_LIST]
list_of_permutations = _build_list_of_permutations(op_list, onnx_conn_graph_type_mapper)
list_of_patterns = []
for permutation in list_of_permutations:
list_of_patterns.append(PatternType(pattern=permutation, action=callback))
return list_of_patterns
@abstractmethod
def _set_supergroup_configs(self, supergroups_configs: List[SupergroupType]):
"""
Set supergroup specific configurations (fourth level of specificity in configuration file)
:param supergroups_configs: Configurations for supergroups
"""
@abstractmethod
def _set_model_input_configs(self, model_input_configs: ConfigType):
"""
Set model input specific configurations (fifth level of specificity in configuration file)
:param model_input_configs: Configuration for model inputs
"""
@abstractmethod
def _set_model_output_configs(self, model_output_configs: ConfigType):
"""
Set model output specific configurations (sixth level of specificity in configuration file)
:param model_output_configs: Configuration for model outputs
"""
def _build_list_of_permutations(op_list: List[str], onnx_conn_graph_type_mapper: OnnxConnectedGraphTypeMapper) \
-> List[List[str]]:
"""
Given a list of onnx op types, where each onnx op type could potentially map to multiple connected graph types,
create a list of all permutations of lists of connected graph types that would satisfy the same ordering as the
original onnx op type list.
For example, for an onnx op type "o1" that maps to two connected graph types "c1_1" and
"c1_2", and an onnx op type "o2" that maps to two connected graph types "c2_1" and "c2_2", all permutations of
["o1", "o2"] would lead to ["c1_1", "c2_1"], ["c1_1", "c2_2"], ["c1_2", "c2_1"], and ["c1_2", "c2_2"].
:param op_list: List of onnx op types
:param onnx_conn_graph_type_mapper: Class that provides utilities for mapping onnx op types to connected graph types
:return: List of permutations of connected graph op types satisfying the ordering specified by op_list onnx types
"""
# base case, return list of lists of connected graph ops corresponding to the only op in the list
if len(op_list) == 1:
permutations_of_op_list = []
conn_graph_types_of_current_op = onnx_conn_graph_type_mapper.get_conn_graph_type_from_onnx_type(op_list[0])
for op in conn_graph_types_of_current_op:
permutations_of_op_list.append([op])
return permutations_of_op_list
permutations_of_op_list = []
permutations_of_succeeding_ops = _build_list_of_permutations(op_list[1:], onnx_conn_graph_type_mapper)
conn_graph_types_of_current_op = onnx_conn_graph_type_mapper.get_conn_graph_type_from_onnx_type(op_list[0])
for op in conn_graph_types_of_current_op:
for permutation in permutations_of_succeeding_ops:
new_permutation = [op] + permutation
permutations_of_op_list.append(new_permutation)
return permutations_of_op_list
def get_setting_type(setting_name: str) -> str:
"""
Return a string corresponding to the type of setting that is specified by setting_name.
:param setting_name: Name of the setting to change
:return: String corresponding to the type of setting that is specified by setting_name.
"""
if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:
return ConfigDictKeys.IS_QUANTIZED
if setting_name == ConfigDictKeys.IS_SYMMETRIC:
return ConfigDictKeys.IS_SYMMETRIC
logger.error('Unrecognized quantizer setter name %s', setting_name)
raise AssertionError
def get_all_ops_in_neighborhood(op: Op, direction: str, neighborhood=None):
"""
Given an op and a direction, populate neighborhood dictionary with all ops adjacent to that op, and which direction
they are adjacent in. If a neighboring op has other connections in the same direction as the op we began with, ops
connecting to the neighboring op in those other connections will also be part of the same neighborhood.
:param op: Op to find neighboring ops from
:param direction: Direction to search for neighboring ops (will be 'input' or 'output')
:param neighborhood: Dictionary mapping neighboring ops to the direction which they connect to op.
"""
if neighborhood is None:
neighborhood = {}
neighborhood[op] = direction
if direction == 'input' and op.inputs:
input_products = [inp for inp in op.inputs if inp.is_inter_module()]
input_ops = [inp.producer for inp in input_products]
for input_op in input_ops:
if input_op not in neighborhood:
neighborhood[input_op] = 'output'
if input_op.type == 'Split':
# Neighborhood ops include input of split, as well as all other consumers of split
get_all_ops_in_neighborhood(input_op, 'input', neighborhood)
get_all_ops_in_neighborhood(input_op, 'output', neighborhood)
elif op.output:
output_ops = [consumer for consumer in op.output.consumers]
for output_op in output_ops:
if output_op not in neighborhood:
neighborhood[output_op] = 'input'
if output_op.type == 'Split':
# Neighborhood ops include all consumers of split
get_all_ops_in_neighborhood(output_op, 'output', neighborhood)
return neighborhood
def current_config_in_supported_kernels(current_dtype_bw: QuantDtypeBwInfo, supported_kernels: List) -> bool:
"""
Checks if given bw/dtype config is in (act, param) in supported kernels provided.
:param current_dtype_bw : current data type and bitwidths for act and param as QuantDtypeBwInfo.
:param supported_kernels: supported kernels (Default level in config file).
:return: True, if current config is part of the supported Kernels, False otherwise.
"""
for supported_kernel_config in supported_kernels:
# retrieve one set of act/param kernel config support
act_config = supported_kernel_config[ConfigDictKeys.ACTIVATION]
param_config = supported_kernel_config[ConfigDictKeys.PARAM]
# we need to compare combination of act/param with default user provided config.
# Because a given kernel support is valid only as a combination.
if act_config[ConfigDictKeys.DTYPE] == current_dtype_bw.data_type and \
act_config[ConfigDictKeys.BITWIDTH] == current_dtype_bw.act_bw and \
param_config[ConfigDictKeys.DTYPE] == current_dtype_bw.data_type and \
param_config[ConfigDictKeys.BITWIDTH] == current_dtype_bw.param_bw:
return True
return False
def is_current_config_same_as_override_option(current_dtype_bw: QuantDtypeBwInfo, supported_kernels: List) -> bool:
"""
Checks if given bw/dtype config is in (act, param) is same as supported kernel provided as an
option at DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX.
:param current_dtype_bw : current data type and bitwidths for act and param as QuantDtypeBwInfo.
:param supported_kernels: supported kernels (Default level in config file).
:return: True, if current config is supported Kernel at index specified by , False otherwise.
"""
override_dtype_bw = get_override_from_supported_kernels(supported_kernels)
# we need to compare combination of act/param with default user provided config.
# Because a given kernel support is valid only as a combination.
if override_dtype_bw.data_type == current_dtype_bw.data_type and \
override_dtype_bw.act_bw == current_dtype_bw.act_bw and \
override_dtype_bw.data_type == current_dtype_bw.data_type and \
override_dtype_bw.param_bw == current_dtype_bw.param_bw:
return True
return False
def get_override_from_supported_kernels(supported_kernels: Dict) -> QuantDtypeBwInfo:
"""
extracts the first option from list of supported kernels configured as QuantDtypeBwInfo.
:param supported_kernels: Dictionary of supported kernels at default level.
:return:
"""
assert supported_kernels
config_file_default_act_bw_dtype_config = supported_kernels[DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX][ConfigDictKeys.ACTIVATION]
config_file_default_param_bw_dtype_config = supported_kernels[DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX][ConfigDictKeys.PARAM]
override_data_type = config_file_default_act_bw_dtype_config[ConfigDictKeys.DTYPE]
override_act_bw = config_file_default_act_bw_dtype_config[ConfigDictKeys.BITWIDTH]
override_param_bw = config_file_default_param_bw_dtype_config[ConfigDictKeys.BITWIDTH]
return QuantDtypeBwInfo(override_data_type, override_act_bw, override_param_bw)
def is_override_dtype_bw_valid(override_dtype_bw_info: QuantDtypeBwInfo, quantsim_dtype_bw_info: QuantDtypeBwInfo) -> bool:
"""
check if override dtype bw is valid given quantsim default dtype and bw.
:param override_dtype_bw_info: override data type, bitwidth info as QuantDtypeBwInfo.
:param quantsim_dtype_bw_info: quantsim default data type, bitwidth info as QuantDtypeBwInfo.
:return: bool, True if override option is valid, False otherwise.
"""
# Rule : When an Op does NOT have lower precision kernel support, supported_kernels based override can be applied =>
# quantsim default dtype/bw should be lower precision compared to override.
# case (i) if both are int or both are float dtype, compare bitwidths.
# ex : {quantsim default = int16, override = int8} or {quantsim default = int8, override = int4} are not supported
# case (ii) if quantsim default is float => override is not float, then it fails to satisfy criteria because:
# quantsim defaults are higher precision compared to overrides . (ex : quantsim default = Fp16 > override = int)
if (quantsim_dtype_bw_info.data_type == override_dtype_bw_info.data_type and
(quantsim_dtype_bw_info.act_bw > override_dtype_bw_info.act_bw or
quantsim_dtype_bw_info.param_bw > override_dtype_bw_info.param_bw)) or \
quantsim_dtype_bw_info.data_type == QuantizationDataType.float:
logger.error(' Target specfic op level override only with a higher precision kernel is supported \n,'
' (please check both quantsim defaults and default supported_kernels in config file specified at override index {%s}) \n'
' quantsim is configured with {act_bw = %s, param_bw = %s, data_type = %s} and \n'
' supported_kernels override configured as {act_bw = %s, param_bw = %s, data_type = %s} \n',
DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX,
quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type,
override_dtype_bw_info.act_bw, override_dtype_bw_info.param_bw, override_dtype_bw_info.data_type)
return False
return True
def validate_all_op_level_dtype_bw_overrides(op_configs: OpTypeType, default_dtype_bw: QuantDtypeBwInfo):
"""
Checks if given op level supported_kernel is supported (across all op types).
:param op_configs: Op level config information (Level 3 spec in target config file).
:param default_dtype_bw: default values configured for quantsim data_type/ bitwidths.
:return: bool, indicating valid or not.
"""
for op_name, op_config in op_configs.items():
if ConfigDictKeys.SUPPORTED_KERNELS in op_config:
op_level_supported_kernels = op_config[ConfigDictKeys.SUPPORTED_KERNELS]
# if current quantsim config or default level supported kernel is in op level supported kernels
# no override required at op level.
if current_config_in_supported_kernels(default_dtype_bw,
op_level_supported_kernels):
logger.info(" Default option found in op level supported kernels list, skip "
"op level override needed for op {%s} \n", op_name)
else:
# If there are multiple options - we always override with DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX
# in supported_kernels, check if the override option dtype and bitwidth is valid.
# option specified at DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX of default supported_kernels
# will be applied during override.
override_dtype_bw_info = get_override_from_supported_kernels(op_level_supported_kernels)
if not is_override_dtype_bw_valid(override_dtype_bw_info, default_dtype_bw):
logger.info(' Op level supported_kernels override check failed for op {%s} \n'
' Op level override only with higher precision kernel is supported \n'
' (please check both quantsim defaults and default supported_kernels in config file specified at override index {%s})\n'
' One way to rectify this is to specify lower precision data type and bit-width as defaults '
' \n ex : {act_bw = %s, param_bw = %s, data_type = %s} and'
' use op level supported_kernels override \n'
' for this op to indicate higher precision kernel that is supported on given target \n'
' ex: { act_bw = %s, param_bw = %s , data_type = %s} \n',
op_name,
DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX,
override_dtype_bw_info.act_bw, override_dtype_bw_info.param_bw, override_dtype_bw_info.data_type,
default_dtype_bw.act_bw, default_dtype_bw.param_bw, default_dtype_bw.data_type)
raise NotImplementedError
return True
|
the-stack_0_4583 | #!/usr/bin/python3
import sys
import copy
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.statespace.sarimax import SARIMAX,SARIMAXResults,SARIMAXParams
from statsmodels.tsa.statespace.mlemodel import MLEResults
from predictor.utility import msg2log
from clustgelDL.auxcfg import D_LOGS,listLogSet, closeLogs,log2All,exec_time,logList
from offlinepred.api import logMatrix, plotPredictDF
def readDataset(csv_file:str="", endogen:list=None, title:str="Time Series",f:object=None)->pd.DataFrame:
my_csv=Path(csv_file)
if my_csv.is_file():
df1=pd.read_csv(csv_file)
if endogen is not None and len(endogen)>0:
plot = df1.plot(y=endogen,figsize=(14,8),legend=True,title=title)
title1=title.replace(' ','_')
file_png=Path(D_LOGS['plot'] / Path(title1)).with_suffix('.png')
fig=plot.get_figure()
fig.savefig(str(file_png))
else:
df1=None
return df1
def checkStationarity(df:pd.DataFrame=None, data_col_name:str=None,title:str="Time Series",f:object=None):
if df is None or data_col_name is None:
return
series=df[data_col_name].values
# ADF Test
result = adfuller(series, autolag='AIC')
msg2log(None, f'ADF Statistic: {result[0]}',f)
msg2log(None, f'n_lags: {result[1]}',f)
msg2log(None,f'p-value: {result[1]}',f)
for key, value in result[4].items():
msg2log(None,'Critial Values:',f)
msg2log(None,f' {key}, {value}',f)
plt.rcParams.update({'figure.figsize':(9,7), 'figure.dpi':120})
# Original Series
fig, axes = plt.subplots(3, 3, sharex=True)
axes[0, 0].plot(df[data_col_name]); axes[0, 0].set_title('Original Series')
plot_acf(df[data_col_name], ax=axes[0, 1])
# 1st Differencing
axes[1, 0].plot(df[data_col_name].diff()); axes[1, 0].set_title('1st Order Differencing')
plot_acf(df[data_col_name].diff().dropna(), ax=axes[1, 1])
# 2nd Differencing
axes[2, 0].plot(df[data_col_name].diff().diff()); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(df[data_col_name].diff().diff().dropna(), ax=axes[2, 1])
title1 = "{}_{}".format(title.replace(' ', '_'),data_col_name)
file_png = Path(D_LOGS['plot'] / Path(title1)).with_suffix('.png')
plt.savefig(file_png)
plt.close("all")
return
def arima_order(df:pd.DataFrame=None, data_col_name:str=None,training_size:int=512,title:str="Time Series",
max_order:tuple=(2,2,2), max_seasonal_order:tuple=(1,0,1,6),
f:object=None)->((int,int,int),(int,int,int,int)):
n = len(df[data_col_name])
start_index=0
if n>training_size:
start_index=n-training_size
(P,D,Q)=max_order
(SP,SD,SQ,S)=max_seasonal_order
opt_order=(0,0,0)
opt_aic=1e+12
for p in range(P+1):
for d in range(D+1):
for q in range(Q+1):
order=(p,d,q)
seasonal_order = (0, 0, 0, 0)
errmsg=""
try:
model = SARIMAX(df[data_col_name][start_index:], order=order, seasonal_order=seasonal_order)
model_fit = model.fit(disp=0)
except:
errmsg = f""" SARIMA optimal order searching
({p},{d},{q})X(0,0,0)
Oops!! Unexpected error...
Error : {sys.exc_info()[0]}
"""
finally:
if len(errmsg) > 0:
msg2log(None, errmsg, D_LOGS['except'])
break
if model_fit.aic<opt_aic:
opt_aic=model_fit.aic
opt_order=(p,d,q)
msg2log(None,"ARIMA({},{},{}): AIC={}".format(p,d,q,model_fit.aic))
opt_seasonal_order = (0, 0, 0, S)
opt_seasonal_aic = 1e+12
opt_sarima_aic = opt_aic+1.0
if S>0:
opt_seasonal_order = (0, 0, 0,S)
opt_seasonal_aic = 1e+12
for sp in range(SP+1):
for sd in range(SD+1):
for sq in range(SQ+1):
seasonal_order=(sp,sd,sq,S)
order=(0,0,0)
errmsg=""
try:
model = SARIMAX(df[data_col_name][start_index:], order=order, seasonal_order=seasonal_order)
model_fit = model.fit(disp=0)
except:
errmsg = f""" SARIMA optimal order searching
(0,0,0)X({sp},{sd},{sq}):{S}
Oops!! Unexpected error...
Error : {sys.exc_info()[0]}
"""
finally:
if len(errmsg)>0:
msg2log(None,errmsg,D_LOGS['except'])
break
if model_fit.aic < opt_seasonal_aic:
opt_seasonal_aic = model_fit.aic
opt_seasonal_order = (sp, sd, sq,S)
msg2log(None, "ARIMA(0,0,0)x({},{},{},{}): AIC={}".format(sp, sd, sq, S, model_fit.aic))
seasonal_order = (0, 0, 0, 0)
opt_sarima_aic=1e+12
model = SARIMAX(df[data_col_name][start_index:], order=opt_order, seasonal_order=opt_seasonal_order)
model_fit = model.fit(disp=0)
opt_sarima_aic=model_fit.aic
message=f"""SARIMA models comparison
SARIMA({opt_order})x(0,0,0,0) : AIC={opt_aic}
SARIMA(0,0,0)x({opt_seasonal_order}) : AIC={opt_seasonal_aic}
SARIMA({opt_order})x({opt_seasonal_order}) : AIC={opt_sarima_aic}
"""
msg2log(None,message,f)
if opt_aic<opt_seasonal_aic and opt_aic<opt_sarima_aic:
order=opt_order
seasonal_order=(0,0,0,0)
elif opt_seasonal_aic<opt_aic and opt_seasonal_aic<opt_sarima_aic:
opder=(0,0,0)
seasonal_order=opt_seasonal_order
elif opt_sarima_aic<opt_aic and opt_sarima_aic < opt_seasonal_aic:
order=opt_order
seasonal_order=opt_seasonal_order
return order,seasonal_order
def arima_run(df:pd.DataFrame=None, data_col_name:str=None,dt_col_name:str="Date Time",chunk_offset:int=0,
chunk_size:int=8192, in_sample_start:int=0,in_sample_size:int=512, forecast_period:int=4,
title:str="Time Series", order:tuple=(1,0,0),seasonal_order:tuple=(0,0,0,0), f:object=None):
pass
n=len(df[data_col_name])
if chunk_size+chunk_offset>n:
chunk_size=n-chunk_offset
cho=chunk_offset
tcho=df[dt_col_name][chunk_offset]
chs=chunk_size
tchs=df[dt_col_name][chunk_offset+chunk_size-1]
if in_sample_start + in_sample_size>n:
in_sample_size=n-in_sample_start
iss=in_sample_start
tiss=df[dt_col_name][iss]
isl=in_sample_start+in_sample_size-1
tisl=df[dt_col_name][isl]
message=f"""{data_col_name}
TS length: {n}
The chunk of TS for ARIMA estimation:
start offset : {cho} timestamp: {tcho}
chunk size : {chs} last timesamp: {tchs}
In-sample predict from index: {iss}, timestamp {tiss}
till index: {isl}, last timestamp {tisl}
ARIMA order: p = {order[0]} d = {order[1]} q = {order[2]}
SARIMA order: P = {seasonal_order[0]} D = {seasonal_order[1]} Q ={seasonal_order[2]}
Seasonal Period = {seasonal_order[3]}
"""
msg2log(None,message,f)
msg2log(None,message,D_LOGS['main'])
log2All()
model = SARIMAX(df[data_col_name][chunk_offset:], order=order, seasonal_order=seasonal_order)
model_fit = model.fit(disp=0)
msg2log(None,model_fit.summary(),D_LOGS['predict'])
msg2log(None, model_fit.param_names, D_LOGS['predict'])
y_pred_series = model_fit.predict(start=in_sample_start, end=in_sample_start + in_sample_size-1)
y_pred=np.array(y_pred_series)
y=np.array(df[data_col_name][in_sample_start:in_sample_start+in_sample_size])
err = np.round(np.subtract(y, y_pred),decimals=4)
X,predict_dict=predict_bundle(y = y, y_pred = y_pred, err = err, start_index=in_sample_start,f = f)
title = "{:^60s}\n{:^10s} {:^10s} {:^10s} {:^10s} {:^10s} {:^10s}".format(data_col_name, "NN","Index","Obs Value",
"Predict","Error","Abs.Err")
logMatrix(X, title= title, wideformat = '10.4f', f=D_LOGS['predict'])
plotPredict(predict_dict=predict_dict, data_col_name=data_col_name, title= "in_sample_predict", f=D_LOGS['predict'])
forecast_arr_series=model_fit.forecast(steps=forecast_period)
forecast_arr=np.array(forecast_arr_series)
X1,forecast_dict = predict_bundle(y=forecast_arr, y_pred=None, err=None, start_index=n, f=f)
title = "{:^60s}\n{:^14s} {:^14s} {:^14s} ".format(data_col_name, "NN", "Index", "Forecast")
logMatrix(X1, title=title, wideformat='14.8f', f=D_LOGS['predict'])
plotPredict(predict_dict=forecast_dict, data_col_name=data_col_name, title="forecasting", f=D_LOGS['predict'])
return
def predict_bundle(y:np.array=None, y_pred:np.array=None, err:np.array=None,start_index:int=0,f:object=None)->(np.array,
dict):
predict_dict={}
(n,) = y.shape
z = np.array([i for i in range(start_index, start_index + n)])
predict_dict["ind"] = copy.copy(z)
if y_pred is None:
predict_dict["forecast"]=y
else:
predict_dict["observation"]=y
predict_dict["in_sample_predict"]=y_pred
y1 = y.reshape((n, 1))
err_abs = None
pred_err_abs = None
y2=None
if err is not None:
abserr=np.round(np.absolute(err),decimals=4)
predict_dict["error"]=copy.copy(err)
predict_dict["abserror"] = copy.copy(abserr)
err=err.reshape((n,1))
abserr=abserr.reshape((n,1))
err_abs=np.append(err,abserr,axis=1)
if y_pred is not None and err_abs is not None:
y2=y_pred.reshape((n,1))
pred_err_abs = np.append(y2, err_abs, axis=1)
elif y_pred is not None and err_abs is None:
pred_err_abs = y_pred.reshape((n,1))
if pred_err_abs is not None:
y_pred_err_abs = np.append(y1, pred_err_abs, axis=1)
else:
y_pred_err_abs=y1
z = np.array([i for i in range(start_index,start_index+n)])
predict_dict["ind"] = copy.copy(z)
z = z.reshape((n, 1))
X = np.append(z, y_pred_err_abs, axis=1)
return X, predict_dict
def plotPredict(predict_dict:dict=None, data_col_name:str="",title:str="in_sample_predict",f:object=None):
df = pd.DataFrame(predict_dict)
sFolder = Path(D_LOGS['plot'] / Path(data_col_name) / Path(title))
sFolder.mkdir(parents=True, exist_ok=True)
title1 = "{}_{}".format(title, data_col_name)
test_predictions_file = Path(sFolder / Path(title1)).with_suffix('.csv')
df.to_csv(test_predictions_file, index=False)
msg = "{} test sequence predict by {} ARIMA model saved in \n{}\n".format(data_col_name, title,
test_predictions_file)
msg2log(None, msg, D_LOGS['predict'])
plotPredictDF(test_predictions_file, data_col_name, title=title1)
return
|
the-stack_0_4586 | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
# Create your tests here.
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
query = Question.objects.create(question_text=question_text, pub_date=time)
return query
# ============== VIEWS ==============
class QuestionIndexViewTests(TestCase):
url = reverse('polls:index')
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(self.url)
self.assertQuerysetEqual(
response.context['latest_question_list'], [
'<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(self.url)
self.assertContains(response, "No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Future question.", days=30)
create_question(question_text="Past question.", days=-30)
response = self.client.get(self.url)
self.assertQuerysetEqual(
response.context['latest_question_list'], [
'<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(self.url)
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(
question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(
question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
# ============== MODELS ==============
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
|
the-stack_0_4587 | # Source: https://gist.githubusercontent.com/rogerallen/1583593/raw/0fffdee6149ab1d993dffa51b1fa9aa466704e18/us_state_abbrev.py
# United States of America Python Dictionary to translate States,
# Districts & Territories to Two-Letter codes and vice versa.
#
# https://gist.github.com/rogerallen/1583593
#
# Dedicated to the public domain. To the extent possible under law,
# Roger Allen has waived all copyright and related or neighboring
# rights to this code.
US_STATE_ABBREV = {
"Alabama": "AL",
"Alaska": "AK",
"American Samoa": "AS",
"Arizona": "AZ",
"Arkansas": "AR",
"California": "CA",
"Colorado": "CO",
"Connecticut": "CT",
"Delaware": "DE",
"District of Columbia": "DC",
"Florida": "FL",
"Georgia": "GA",
"Guam": "GU",
"Hawaii": "HI",
"Idaho": "ID",
"Illinois": "IL",
"Indiana": "IN",
"Iowa": "IA",
"Kansas": "KS",
"Kentucky": "KY",
"Louisiana": "LA",
"Maine": "ME",
"Maryland": "MD",
"Massachusetts": "MA",
"Michigan": "MI",
"Minnesota": "MN",
"Mississippi": "MS",
"Missouri": "MO",
"Montana": "MT",
"Nebraska": "NE",
"Nevada": "NV",
"New Hampshire": "NH",
"New Jersey": "NJ",
"New Mexico": "NM",
"New York": "NY",
"North Carolina": "NC",
"North Dakota": "ND",
"Northern Mariana Islands": "MP",
"Ohio": "OH",
"Oklahoma": "OK",
"Oregon": "OR",
"Pennsylvania": "PA",
"Puerto Rico": "PR",
"Rhode Island": "RI",
"South Carolina": "SC",
"South Dakota": "SD",
"Tennessee": "TN",
"Texas": "TX",
"Utah": "UT",
"Vermont": "VT",
"Virgin Islands": "VI",
"Virginia": "VA",
"Washington": "WA",
"West Virginia": "WV",
"Wisconsin": "WI",
"Wyoming": "WY",
}
STATES_50 = {
"Alabama": "AL",
"Alaska": "AK",
"Arizona": "AZ",
"Arkansas": "AR",
"California": "CA",
"Colorado": "CO",
"Connecticut": "CT",
"Delaware": "DE",
"District of Columbia": "DC",
"Florida": "FL",
"Georgia": "GA",
"Hawaii": "HI",
"Idaho": "ID",
"Illinois": "IL",
"Indiana": "IN",
"Iowa": "IA",
"Kansas": "KS",
"Kentucky": "KY",
"Louisiana": "LA",
"Maine": "ME",
"Maryland": "MD",
"Massachusetts": "MA",
"Michigan": "MI",
"Minnesota": "MN",
"Mississippi": "MS",
"Missouri": "MO",
"Montana": "MT",
"Nebraska": "NE",
"Nevada": "NV",
"New Hampshire": "NH",
"New Jersey": "NJ",
"New Mexico": "NM",
"New York": "NY",
"North Carolina": "NC",
"North Dakota": "ND",
"Ohio": "OH",
"Oklahoma": "OK",
"Oregon": "OR",
"Pennsylvania": "PA",
"Rhode Island": "RI",
"South Carolina": "SC",
"South Dakota": "SD",
"Tennessee": "TN",
"Texas": "TX",
"Utah": "UT",
"Vermont": "VT",
"Virginia": "VA",
"Washington": "WA",
"West Virginia": "WV",
"Wisconsin": "WI",
"Wyoming": "WY",
}
us_fips = {
"Alabama": "01",
"Alaska": "02",
"Arizona": "04",
"Arkansas": "05",
"California": "06",
"Colorado": "08",
"Connecticut": "09",
"Delaware": "10",
"District of Columbia": "11",
"Florida": "12",
"Georgia": "13",
"Hawaii": "15",
"Idaho": "16",
"Illinois": "17",
"Indiana": "18",
"Iowa": "19",
"Kansas": "20",
"Kentucky": "21",
"Louisiana": "22",
"Maine": "23",
"Maryland": "24",
"Massachusetts": "25",
"Michigan": "26",
"Minnesota": "27",
"Mississippi": "28",
"Missouri": "29",
"Montana": "30",
"Nebraska": "31",
"Nevada": "32",
"New Hampshire": "33",
"New Jersey": "34",
"New Mexico": "35",
"New York": "36",
"North Carolina": "37",
"North Dakota": "38",
"Ohio": "39",
"Oklahoma": "40",
"Oregon": "41",
"Pennsylvania": "42",
"Rhode Island": "44",
"South Carolina": "45",
"South Dakota": "46",
"Tennessee": "47",
"Texas": "48",
"Utah": "49",
"Vermont": "50",
"Virginia": "51",
"Washington": "53",
"West Virginia": "54",
"Wisconsin": "55",
"Wyoming": "56",
"American Samoa": "60",
"Guam": "66",
"Northern Mariana Islands": "69",
"Puerto Rico": "72",
"Virgin Islands": "78",
}
ABBREV_US_FIPS = {US_STATE_ABBREV[state]: fips for state, fips in us_fips.items()}
# thank you to @kinghelix and @trevormarburger for this idea
abbrev_us_state = dict(map(reversed, US_STATE_ABBREV.items()))
# Simple test examples
if __name__ == "__main__":
print("Wisconin --> WI?", US_STATE_ABBREV["Wisconsin"] == "WI")
print("WI --> Wisconin?", abbrev_us_state["WI"] == "Wisconsin")
print(
"Number of entries (50 states, DC, 5 Territories) == 56? ", 56 == len(US_STATE_ABBREV),
)
|
the-stack_0_4589 | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.15'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
the-stack_0_4591 | """JSON input interface."""
import gzip
import json
from pathlib import Path
from typing import Optional, TextIO, Union
from ..music import Music
def load_json(
path: Union[str, Path, TextIO], compressed: Optional[bool] = None
) -> Music:
"""Load a JSON file into a Music object.
Parameters
----------
path : str, Path or TextIO
Path to the file or the file to load.
compressed : bool, optional
Whether the file is a compressed JSON file (`.json.gz`). Has no
effect when `path` is a file object. Defaults to infer from the
extension (`.gz`).
Returns
-------
:class:`muspy.Music`
Loaded Music object.
Notes
-----
When a path is given, assume UTF-8 encoding and gzip compression if
`compressed=True`.
"""
if isinstance(path, (str, Path)):
if compressed is None:
if str(path).lower().endswith(".gz"):
compressed = True
else:
compressed = False
if compressed:
with gzip.open(path, "rt", encoding="utf-8") as f:
return Music.from_dict(json.load(f))
with open(path, encoding="utf-8") as f:
return Music.from_dict(json.load(f))
return Music.from_dict(json.load(path))
|
the-stack_0_4592 | """Draw the histgram of the pose distributions
Run it like this:
`python3 -m experimental.distribution.py`
Do not forget to set the dataset file path.
"""
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from dataset import get_parsed_dataset
from experimental.pose_estimator import PoseEstimator
if __name__ == "__main__":
ds = get_parsed_dataset("data/helen.record", 1, False)
# Counters
n_faces = 0
pitches = []
yaws = []
rolls = []
for image, marks in ds:
# image = (image.numpy()[0]*255).astype(np.uint8)
height, width = image.shape[1:3]
pose_estimator = PoseEstimator(img_size=(height, width))
marks = np.reshape(marks, (-1, 2))*width
pose = pose_estimator.solve_pose_by_68_points(marks)
# Solve the pitch, yaw and roll angels.
r_mat, _ = cv2.Rodrigues(pose[0])
p_mat = np.hstack((r_mat, np.array([[0], [0], [0]])))
_, _, _, _, _, _, u_angle = cv2.decomposeProjectionMatrix(p_mat)
pitch, yaw, roll = u_angle.flatten()
# I do not know why the roll axis seems flipted 180 degree. Manually by pass
# this issue.
if roll > 0:
roll = 180-roll
elif roll < 0:
roll = -(180 + roll)
pitches.append(pitch)
yaws.append(yaw)
rolls.append(roll)
n_faces += 1
# print("pitch: {:.2f}, yaw: {:.2f}, roll: {:.2f}".format(
# pitch, yaw, roll))
# for mark in marks:
# cv2.circle(image, tuple(mark), 1, (0, 255, 0), 1)
# cv2.imshow("image", image)
# if cv2.waitKey() == 27:
# break
fig, ax = plt.subplots(3, 1)
ax[0].hist(pitches, 40, (-60, 60), density=True)
ax[1].hist(yaws, 40, (-60, 60), density=True)
ax[2].hist(rolls, 40, (-60, 60), density=True)
plt.show()
print(n_faces)
|
the-stack_0_4594 | # -*- coding: utf-8 -*-
# Opções para formulários
ufs = ['SP', 'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA',
'MT', 'MS', 'MG', 'PR', 'PB', 'PA', 'PE', 'PI', 'RJ', 'RN', 'RS',
'RO', 'RR', 'SC', 'SE', 'TO']
escolaridade = ['Nenhuma', '1º grau', '2º grau', 'Superior']
cor = ['Branca', 'Negra', 'Parda', 'Indígena', 'Asiática']
estadocivil = ['Casada', 'Solteira (sem união estável)',
'Solteira (com união estável)', 'Outra']
sexo = ['Feminino', 'Masculino']
tipo = ['Particular', 'Convênio']
db.define_table('pacientes',
Field('nome', label='Nome', requires=IS_NOT_EMPTY()),
Field('sexo', label='Sexo', default='Feminino', requires=IS_IN_SET(sexo)),
Field('cpf', label='CPF'),
Field('profissao', label='Profissão'),
Field('nascimento', label='Data de nascimento', type='date',
requires=[IS_NOT_EMPTY(), IS_DATE(format='%d/%m/%Y')]),
Field('telefone', label='Telefone', requires=IS_NOT_EMPTY()),
Field('escolaridade', label='Escolaridade',
requires=IS_EMPTY_OR(IS_IN_SET(escolaridade))),
Field('estadocivil', label='Estado civil',
requires=IS_EMPTY_OR(IS_IN_SET(estadocivil))),
Field('cor', label='Cor',
requires=IS_EMPTY_OR(IS_IN_SET(cor))),
Field('image', 'upload', label='Foto'),
Field('endereco', label='Endereço'),
Field('cidade', label='Cidade'),
Field('uf', label='UF', default='SP',
requires=IS_EMPTY_OR(IS_IN_SET(ufs))),
Field('cep', label='CEP'),
Field('observacoes', label='Observações', type='text'),
Field('tipo', label='Tipo de atendimento', requires=IS_IN_SET(tipo)),
format='%(nome)s'
)
db.pacientes.cpf.represent = lambda field, x: field if field else 'Não informado'
def BuscaPaciente(id):
if not id:
raise HTTP(404, 'ID paciente não encontrado')
try:
paciente = db(db.pacientes.id == id).select().first()
except ValueError:
raise HTTP(404, 'Argumento PACIENTE inválido')
if not paciente:
raise HTTP(404, 'Paciente não encontrado')
if paciente.nascimento:
paciente.nascimento = paciente.nascimento.strftime('%d/%m/%Y')
NI = 'Não informado'
campos = []
campos.append(paciente.endereco)
campos.append(paciente.cidade)
campos.append(paciente.telefone)
campos.append(paciente.escolaridade)
campos.append(paciente.observacoes)
campos.append(paciente.cpf)
campos.append(paciente.uf)
campos.append(paciente.estadocivil)
campos.append(paciente.cor)
campos.append(paciente.cep)
for campo in campos:
if not campo:
campo = NI
return paciente
def BuscaTodosPacientes():
pacientes = db(db.pacientes).select()
return pacientes
|
the-stack_0_4595 | """Utility functions for transducer models."""
import os
import numpy as np
import torch
from espnet_pytorch_library.nets_utils import pad_list
def prepare_loss_inputs(ys_pad, hlens, blank_id=0, ignore_id=-1):
"""Prepare tensors for transducer loss computation.
Args:
ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax)
hlens (torch.Tensor): batch of hidden sequence lengthts (B)
or batch of masks (B, 1, Tmax)
blank_id (int): index of blank label
ignore_id (int): index of initial padding
Returns:
ys_in_pad (torch.Tensor): batch of padded target sequences + blank (B, Lmax + 1)
target (torch.Tensor): batch of padded target sequences (B, Lmax)
pred_len (torch.Tensor): batch of hidden sequence lengths (B)
target_len (torch.Tensor): batch of output sequence lengths (B)
"""
device = ys_pad.device
ys = [y[y != ignore_id] for y in ys_pad]
blank = ys[0].new([blank_id])
ys_in_pad = pad_list([torch.cat([blank, y], dim=0) for y in ys], blank_id)
ys_out_pad = pad_list([torch.cat([y, blank], dim=0) for y in ys], ignore_id)
target = pad_list(ys, blank_id).type(torch.int32).to(device)
target_len = torch.IntTensor([y.size(0) for y in ys]).to(device)
if torch.is_tensor(hlens):
if hlens.dim() > 1:
hs = [h[h != 0] for h in hlens]
hlens = list(map(int, [h.size(0) for h in hs]))
else:
hlens = list(map(int, hlens))
pred_len = torch.IntTensor(hlens).to(device)
return ys_in_pad, ys_out_pad, target, pred_len, target_len
def valid_aux_task_layer_list(aux_layer_ids, enc_num_layers):
"""Check whether input list of auxiliary layer ids is valid.
Return the valid list sorted with duplicated removed.
Args:
aux_layer_ids (list): Auxiliary layers ids
enc_num_layers (int): Number of encoder layers
Returns:
valid (list): Validated list of layers for auxiliary task
"""
if (
not isinstance(aux_layer_ids, list)
or not aux_layer_ids
or not all(isinstance(layer, int) for layer in aux_layer_ids)
):
raise ValueError("--aux-task-layer-list argument takes a list of layer ids.")
sorted_list = sorted(aux_layer_ids, key=int, reverse=False)
valid = list(filter(lambda x: 0 <= x < enc_num_layers, sorted_list))
if sorted_list != valid:
raise ValueError(
"Provided list of layer ids for auxiliary task is incorrect. "
"IDs should be between [0, %d]" % (enc_num_layers - 1)
)
return valid
def is_prefix(x, pref):
"""Check prefix.
Args:
x (list): token id sequence
pref (list): token id sequence
Returns:
(boolean): whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref)):
if pref[i] != x[i]:
return False
return True
def substract(x, subset):
"""Remove elements of subset if corresponding token id sequence exist in x.
Args:
x (list): set of hypotheses
subset (list): subset of hypotheses
Returns:
final (list): new set
"""
final = []
for x_ in x:
if any(x_.yseq == sub.yseq for sub in subset):
continue
final.append(x_)
return final
def select_lm_state(lm_states, idx, lm_layers, is_wordlm):
"""Get LM state from batch for given id.
Args:
lm_states (list or dict): batch of LM states
idx (int): index to extract state from batch state
lm_layers (int): number of LM layers
is_wordlm (bool): whether provided LM is a word-LM
Returns:
idx_state (dict): LM state for given id
"""
if is_wordlm:
idx_state = lm_states[idx]
else:
idx_state = {}
idx_state["c"] = [lm_states["c"][layer][idx] for layer in range(lm_layers)]
idx_state["h"] = [lm_states["h"][layer][idx] for layer in range(lm_layers)]
return idx_state
def create_lm_batch_state(lm_states_list, lm_layers, is_wordlm):
"""Create batch of LM states.
Args:
lm_states (list or dict): list of individual LM states
lm_layers (int): number of LM layers
is_wordlm (bool): whether provided LM is a word-LM
Returns:
batch_states (list): batch of LM states
"""
if is_wordlm:
batch_states = lm_states_list
else:
batch_states = {}
batch_states["c"] = [
torch.stack([state["c"][layer] for state in lm_states_list])
for layer in range(lm_layers)
]
batch_states["h"] = [
torch.stack([state["h"][layer] for state in lm_states_list])
for layer in range(lm_layers)
]
return batch_states
def init_lm_state(lm_model):
"""Initialize LM state.
Args:
lm_model (torch.nn.Module): LM module
Returns:
lm_state (dict): initial LM state
"""
lm_layers = len(lm_model.rnn)
lm_units_typ = lm_model.typ
lm_units = lm_model.n_units
p = next(lm_model.parameters())
h = [
torch.zeros(lm_units).to(device=p.device, dtype=p.dtype)
for _ in range(lm_layers)
]
lm_state = {"h": h}
if lm_units_typ == "lstm":
lm_state["c"] = [
torch.zeros(lm_units).to(device=p.device, dtype=p.dtype)
for _ in range(lm_layers)
]
return lm_state
def recombine_hyps(hyps):
"""Recombine hypotheses with equivalent output sequence.
Args:
hyps (list): list of hypotheses
Returns:
final (list): list of recombined hypotheses
"""
final = []
for hyp in hyps:
seq_final = [f.yseq for f in final if f.yseq]
if hyp.yseq in seq_final:
seq_pos = seq_final.index(hyp.yseq)
final[seq_pos].score = np.logaddexp(final[seq_pos].score, hyp.score)
else:
final.append(hyp)
return hyps
def pad_sequence(seqlist, pad_token):
"""Left pad list of token id sequences.
Args:
seqlist (list): list of token id sequences
pad_token (int): padding token id
Returns:
final (list): list of padded token id sequences
"""
maxlen = max(len(x) for x in seqlist)
final = [([pad_token] * (maxlen - len(x))) + x for x in seqlist]
return final
def check_state(state, max_len, pad_token):
"""Check state and left pad or trim if necessary.
Args:
state (list): list of of L decoder states (in_len, dec_dim)
max_len (int): maximum length authorized
pad_token (int): padding token id
Returns:
final (list): list of L padded decoder states (1, max_len, dec_dim)
"""
if state is None or max_len < 1 or state[0].size(1) == max_len:
return state
curr_len = state[0].size(1)
if curr_len > max_len:
trim_val = int(state[0].size(1) - max_len)
for i, s in enumerate(state):
state[i] = s[:, trim_val:, :]
else:
layers = len(state)
ddim = state[0].size(2)
final_dims = (1, max_len, ddim)
final = [state[0].data.new(*final_dims).fill_(pad_token) for _ in range(layers)]
for i, s in enumerate(state):
final[i][:, (max_len - s.size(1)) : max_len, :] = s
return final
return state
def check_batch_state(state, max_len, pad_token):
"""Check batch of states and left pad or trim if necessary.
Args:
state (list): list of of L decoder states (B, ?, dec_dim)
max_len (int): maximum length authorized
pad_token (int): padding token id
Returns:
final (list): list of L decoder states (B, pred_len, dec_dim)
"""
final_dims = (len(state), max_len, state[0].size(1))
final = state[0].data.new(*final_dims).fill_(pad_token)
for i, s in enumerate(state):
curr_len = s.size(0)
if curr_len < max_len:
final[i, (max_len - curr_len) : max_len, :] = s
else:
final[i, :, :] = s[(curr_len - max_len) :, :]
return final
def custom_torch_load(model_path, model, training=True):
"""Load transducer model modules and parameters with training-only ones removed.
Args:
model_path (str): Model path
model (torch.nn.Module): The model with pretrained modules
"""
if "snapshot" in os.path.basename(model_path):
model_state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage
)["model"]
else:
model_state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage
)
if not training:
model_state_dict = {
k: v for k, v in model_state_dict.items() if not k.startswith("aux")
}
model.load_state_dict(model_state_dict)
del model_state_dict
|
the-stack_0_4597 | import os
import re
import sys
import textwrap
from typing import Dict
from typing import Generator
import pytest
from _pytest.compat import TYPE_CHECKING
from _pytest.monkeypatch import MonkeyPatch
if TYPE_CHECKING:
from typing import Type
@pytest.fixture
def mp() -> Generator[MonkeyPatch, None, None]:
cwd = os.getcwd()
sys_path = list(sys.path)
yield MonkeyPatch()
sys.path[:] = sys_path
os.chdir(cwd)
def test_setattr() -> None:
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, monkeypatch.setattr, A, "notexists", 2)
monkeypatch.setattr(A, "y", 2, raising=False)
assert A.y == 2 # type: ignore
monkeypatch.undo()
assert not hasattr(A, "y")
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, "x", 2)
assert A.x == 2
monkeypatch.setattr(A, "x", 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath:
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42 # type: ignore
def test_unicode_string(self, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42 # type: ignore
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(ImportError, lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(
AttributeError, lambda: monkeypatch.setattr("os.path.qweqwe", None)
)
def test_unknown_attr_non_raising(self, monkeypatch: MonkeyPatch) -> None:
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr("os.path.qweqwe", 42, raising=False)
assert os.path.qweqwe == 42 # type: ignore
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
assert not hasattr(A, "x")
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
pytest.raises(AttributeError, monkeypatch.delattr, A, "y")
monkeypatch.delattr(A, "y", raising=False)
monkeypatch.setattr(A, "x", 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
monkeypatch.setitem(d, "y", 1700)
monkeypatch.setitem(d, "y", 1700)
assert d["x"] == 2
assert d["y"] == 1700
monkeypatch.setitem(d, "x", 3)
assert d["x"] == 3
monkeypatch.undo()
assert d["x"] == 1
assert "y" not in d
d["x"] = 5
monkeypatch.undo()
assert d["x"] == 5
def test_setitem_deleted_meanwhile() -> None:
d = {} # type: Dict[str, object]
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
del d["x"]
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, "hello")
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem() -> None:
d = {"x": 1} # type: Dict[str, object]
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, "x")
assert "x" not in d
monkeypatch.delitem(d, "y", raising=False)
pytest.raises(KeyError, monkeypatch.delitem, d, "y")
assert not d
monkeypatch.setitem(d, "y", 1700)
assert d["y"] == 1700
d["hello"] = "world"
monkeypatch.setitem(d, "x", 1500)
assert d["x"] == 1500
monkeypatch.undo()
assert d == {"hello": "world", "x": 1}
def test_setenv():
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2)
import os
assert os.environ["XYZ123"] == "2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_delenv():
name = "xyz1234"
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, monkeypatch.delenv, name, raising=True)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
class TestEnvironWarnings:
"""
os.environ keys and values should be native strings, otherwise it will cause problems with other modules (notably
subprocess). On Python 2 os.environ accepts anything without complaining, while Python 3 does the right thing
and raises an error.
"""
VAR_NAME = "PYTEST_INTERNAL_MY_VAR"
def test_setenv_non_str_warning(self, monkeypatch):
value = 2
msg = (
"Value of environment variable PYTEST_INTERNAL_MY_VAR type should be str, "
"but got 2 (type: int); converted to str implicitly"
)
with pytest.warns(pytest.PytestWarning, match=re.escape(msg)):
monkeypatch.setenv(str(self.VAR_NAME), value)
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2, prepend="-")
assert os.environ["XYZ123"] == "2"
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 3, prepend="-")
assert os.environ["XYZ123"] == "3-2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource(
"""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
"""
)
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp: MonkeyPatch):
old = list(sys.path)
mp.syspath_prepend("world")
mp.syspath_prepend("hello")
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp: MonkeyPatch):
old_syspath = sys.path[:]
try:
mp.syspath_prepend("hello world")
mp.undo()
sys.path.append("more hello world")
mp.undo()
assert sys.path[-1] == "more hello world"
finally:
sys.path[:] = old_syspath
def test_chdir_with_path_local(mp: MonkeyPatch, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp: MonkeyPatch, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp: MonkeyPatch, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp: MonkeyPatch, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile(
"""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*
"""
)
def test_importerror(testdir):
p = testdir.mkpydir("package")
p.join("a.py").write(
textwrap.dedent(
"""\
import doesnotexist
x = 1
"""
)
)
testdir.tmpdir.join("test_importerror.py").write(
textwrap.dedent(
"""\
def test_importerror(monkeypatch):
monkeypatch.setattr('package.a.x', 2)
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*import error in package.a: No module named 'doesnotexist'*
"""
)
class Sample:
@staticmethod
def hello() -> bool:
return True
class SampleInherit(Sample):
pass
@pytest.mark.parametrize(
"Sample", [Sample, SampleInherit], ids=["new", "new-inherit"],
)
def test_issue156_undo_staticmethod(Sample: "Type[Sample]") -> None:
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, "hello", None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
def test_undo_class_descriptors_delattr():
class SampleParent:
@classmethod
def hello(_cls):
pass
@staticmethod
def world():
pass
class SampleChild(SampleParent):
pass
monkeypatch = MonkeyPatch()
original_hello = SampleChild.hello
original_world = SampleChild.world
monkeypatch.delattr(SampleParent, "hello")
monkeypatch.delattr(SampleParent, "world")
assert getattr(SampleParent, "hello", None) is None
assert getattr(SampleParent, "world", None) is None
monkeypatch.undo()
assert original_hello == SampleChild.hello
assert original_world == SampleChild.world
def test_issue1338_name_resolving():
pytest.importorskip("requests")
monkeypatch = MonkeyPatch()
try:
monkeypatch.delattr("requests.sessions.Session.request")
finally:
monkeypatch.undo()
def test_context():
monkeypatch = MonkeyPatch()
import functools
import inspect
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
assert not inspect.isclass(functools.partial)
assert inspect.isclass(functools.partial)
def test_syspath_prepend_with_namespace_packages(testdir, monkeypatch):
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.join("__init__.py").write("def check(): return %r" % dirname)
monkeypatch.syspath_prepend("hello")
import ns_pkg.hello
assert ns_pkg.hello.check() == "hello"
with pytest.raises(ImportError):
import ns_pkg.world
# Prepending should call fixup_namespace_packages.
monkeypatch.syspath_prepend("world")
import ns_pkg.world
assert ns_pkg.world.check() == "world"
# Should invalidate caches via importlib.invalidate_caches.
tmpdir = testdir.tmpdir
modules_tmpdir = tmpdir.mkdir("modules_tmpdir")
monkeypatch.syspath_prepend(str(modules_tmpdir))
modules_tmpdir.join("main_app.py").write("app = True")
from main_app import app # noqa: F401
|
the-stack_0_4598 | import os
def screen_clear():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
screen_clear()
print("Starting....\n")
import time
import cv2
import face_recognition
import numpy
print("Dependecies imported Successfully")
print("Initializing Camera")
cap = cv2.VideoCapture(0)
print("Camera Initialized Successfully")
time.sleep(2)
knownFaces = []
knownNames = []
while True:
screen_clear()
print ("\n\n\t\t\tSMART SURVEILLANCE SYSTEM")
n = input("\n\n[Press 1] To Start SURVEILLANCE\n[Press 2] To Register new Face\n[Press 99] To exit\n\n\nEnter Your choice : ")
if n == '1':
while True:
success, img = cap.read()
locs = face_recognition.face_locations(img)
encoding = face_recognition.face_encodings(img,locs)
cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for face,loc in zip(encoding,locs):
result = face_recognition.compare_faces(knownFaces, face, 0.5)
match = "UNKNOWN"
if True in result:
match = knownNames[result.index(True)]
cv2.rectangle(img, (loc[3],loc[0]), (loc[1],loc[2]), (0,255,0), 2)
cv2.rectangle(img, (loc[3],loc[2]-25), (loc[1],loc[2]), (0,255,0),cv2.FILLED)
cv2.putText(img, match, (loc[3]+6, loc[2]-6), cv2.FONT_HERSHEY_COMPLEX, 0.65, (255,255,255), 1)
cv2.imshow("Searching",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyWindow("Searching")
break;
elif n == '2':
name = input("Enter Your Name : ")
while True:
success, img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
loc = face_recognition.face_locations(img)
if len(loc) == 1:
cv2.rectangle(img, (loc[0][3],loc[0][0]), (loc[0][1],loc[0][2]), (255,255,255), 1)
elif len(loc) < 1:
print("NO face Detected")
else:
print("Many faces Detected")
for i in loc:
cv2.rectangle(img, (i[3],i[0]), (i[1],i[2]), (255,0,255), 2)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("VideoCapture",img)
if cv2.waitKey(1) & 0xFF == ord('q') and len(loc) == 1:
encode = face_recognition.face_encodings(img,loc)[0]
knownFaces.append(encode)
knownNames.append(name)
cv2.destroyWindow("VideoCapture")
break;
print(loc)
print(knownFaces)
print(knownNames)
elif n == '99':
screen_clear()
print("\n\n\n\t\t\t[Exting...] ThankYou\n\n\n\n\n\n\n\n\n ")
exit()
else:
screen_clear()
print("\n\n\n\t\t\tWrong Choice")
time.sleep(2)
|
the-stack_0_4599 | from django.shortcuts import render
# Create your views here.
from datetime import datetime
from info.forms import CovidDataForm
from info.models import CovidData
from django.views.generic import ListView,CreateView
from django import forms
#REST
from django.http import HttpResponse
from django.shortcuts import redirect, get_object_or_404
from rest_framework import status, viewsets
from rest_framework.views import APIView
from . serializers import CovidDataSerializer
class HomeListViewNew(ListView):
queryset = CovidData.objects.all()
template_name = "info/home_new.html"
class CovidDataCreateForm(forms.ModelForm):
class Meta:
model= CovidData
fields = '__all__'
#fields = ['country_region', 'province_state', 'fips', 'active_cases' ]
class CovidDataCreateViewV2(CreateView):
model = CovidData
form_class = CovidDataCreateForm
template_name = "info/covid_create.html"
def about(request):
return render(request, "info/about.html")
def contact(request):
return render(request, "info/contact.html")
# Add this code elsewhere in the file:
def logdata(request):
form = CovidDataForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
message = form.save(commit=False)
message.save()
return redirect("home-new")
else:
return render(request, "info/enter_data.html", {"form": form})
class CovidDataViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = CovidData.objects.all()
serializer_class = CovidDataSerializer
|
the-stack_0_4600 | import pandas as pd
import pickle
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')
print('Starting data labelling...')
with open('cache/data_labeler_vars.pickle', 'rb') as f:
lastIndex, datajobs = pickle.load(f)
print('Starting from index: ' + str(lastIndex))
for i in range(lastIndex, datajobs.shape[0]):
print('\n\n\n\n\n\n\n')
print('Current Index: ' + str(i))
print("JOB TITLE: " + datajobs.loc[i, "job_title"])
print("JOB DESCRIPTION:")
print(*datajobs.loc[i, "job_description"].split('.'), sep='\n')
print('Entry level job? (Y/n) \n Type Q to quit.')
while True:
response = input()
if response == 'Y':
#Save response as 1
datajobs.loc[i, "entry_level_q"] = 1
print('Response saved as yes.')
break
elif response == 'n':
#Save response as 0
datajobs.loc[i, "entry_level_q"] = 0
print('Response saved as no.')
break
elif response == 'Q':
print('Quiting...')
break
else:
print('Input not recognized. Please enter either Y for yes, n for no, or Q to quit.')
if response == 'Q':
break
lastIndex += 1
#Save updated lastIndex
with open('cache/data_labeler_vars.pickle', 'wb') as f:
pickle.dump((lastIndex, datajobs), f)
|
the-stack_0_4602 | import sublime
import sublime_plugin
import os
from collections import OrderedDict, namedtuple
from hyperhelp.common import log, hh_syntax
from hyperhelp.core import help_index_list, lookup_help_topic
###----------------------------------------------------------------------------
# A representation of what is going to be linted.
LintTarget = namedtuple("LintTarget", [
"target_type", "pkg_info", "files"
])
# Linters produce an array of these tuples to indicate problems found in files.
# type can be one of "info", "warning" or "error".
LintResult = namedtuple("LintResult", [
"type", "file", "line", "column", "message"
])
###----------------------------------------------------------------------------
class LinterBase():
"""
The base class for all lint operations in the help linter.
"""
def __init__(self, pkg_info):
self.pkg_info = pkg_info
self.issues = list()
self.index_file = os.path.relpath(
pkg_info.index_file,
"Packages/%s/" % (self.pkg_info.doc_root))
def lint(self, view, file_name):
"""
This is invoked with a view that contains raw help text from the help
file, which is contained in the help index given in the constructor.
This will be invoked for each file to be linted.
"""
pass
def add(self, view, m_type, file, point, msg, *args):
"""
Add a result to the internal result list. point is the location that is
the focus of the error. If view is None, the point is ignored and the
issue is added at line 1, column 1.
"""
pos = view.rowcol(point) if view is not None else (0, 0)
msg = msg % args
self.issues.append(LintResult(m_type, file, pos[0] + 1, pos[1]+1, msg))
def add_index(self, m_type, msg, *args):
"""
Add a result that is focused on the help index. As there is no way to
know the proper location except by hand parsing the index, no view is
needed and the position of the issue is always row 1, column 1.
"""
return self.add(None, m_type, self.index_file, 0, msg, *args)
def results(self):
"""
This is invoked after all calls to the lint() method have finished to
collect the final results of the lint operation.
This should return a list of LintResult tuples that indicate the issues
that have been found or an empty list if there are no issues.
The default is to return the results instance variable.
"""
return self.issues
###----------------------------------------------------------------------------
def can_lint_view(view):
"""
Determine if the provided view can be the source of a lint. To be valid
the view must represent a hyperhelp data file that has a path rooted in the
Packages folder inside of a package whose help index is known.
"""
if (view is not None and view.file_name() is not None and
view.file_name().startswith(sublime.packages_path()) and
view.match_selector(0, "text.hyperhelp")):
name = os.path.relpath(view.file_name(), sublime.packages_path())
pkg_name = name[:name.index(os.sep)]
return pkg_name in help_index_list()
return False
def find_lint_target(view):
"""
Examine a given view and return a LintTarget that describes what is being
linted. None is returned if the view is not a valid lint target.
"""
if not can_lint_view(view):
return None
name = view.file_name()
parts = os.path.relpath(name, sublime.packages_path()).split(os.sep)
pkg_name = parts[0]
target = parts[-1]
pkg_info = help_index_list().get(pkg_name)
if view.match_selector(0, "text.hyperhelp.help"):
return LintTarget("single", pkg_info, [target])
return LintTarget("package", pkg_info, list(pkg_info.help_files))
def get_linters(target):
"""
Given a LintTarget, return back an array of all of the linters that should
be run for that target.
Some targets may only be run on the package as a whole while others may be
allowed on a file by file basis. The returned linters may also be affected
by user settings.
"""
linters = []
linters.append(MissingLinkAnchorLinter(target.pkg_info))
if target.target_type == "package":
linters.append(MissingHelpSourceLinter(target.pkg_info))
return linters
def get_lint_file(filename):
"""
Return a view that that contains the contents of the provided file name.
If the file is not aready loaded, it is loaded into a hidden view and that
is returned instead.
Can return None if the file is not open and cannot be loaded.
"""
for window in sublime.windows():
view = window.find_open_file(filename)
if view is not None:
return view
content = None
try:
with open(filename, 'r') as file:
content = file.read()
except:
pass
if content:
view = sublime.active_window().create_output_panel("_hha_tmp", True)
view.run_command("select_all")
view.run_command("left_delete")
view.run_command("append", {"characters": content})
view.assign_syntax(hh_syntax("HyperHelp.sublime-syntax"))
return view
return None
def format_lint(pkg_info, issues, window=None):
"""
Takes a list of LintResult issues for a package and returns back output
suitable for passing to display_lint().
If a window is provided, display_lint() is called prior to returning in
order to display the output first.
"""
files = OrderedDict()
for issue in issues:
if issue.file not in files:
files[issue.file] = []
files[issue.file].append(issue)
output = ["Linting in help package: %s\n" % pkg_info.package]
warn = 0
err = 0
for file in files:
output.append("%s:" % file)
for issue in files[file]:
issue_pos = "%d:%d" % (issue.line, issue.column)
output.append(" %-7s @ %-7s %s" % (
issue.type, issue_pos, issue.message))
if issue.type == "warning":
warn += 1
elif issue.type == "error":
err += 1
output.append("")
output.append("%d warning%s, %d error%s" % (
warn,
"" if warn == 1 else "s",
err,
"" if err == 1 else "s"))
if window:
display_lint(window, pkg_info, output)
return output
def display_lint(window, pkg_info, output):
"""
Display the lint output provided into the given window. The output is
assumed to have been generated from the provided package, which is used to
know where the help files are located.
"""
view = window.create_output_panel("HyperHelpAuthor Lint", False)
basedir = os.path.join(sublime.packages_path(), pkg_info.doc_root)
if not isinstance(output, str):
output = "\n".join(output)
view.assign_syntax(hh_syntax("HyperHelpLinter.sublime-syntax"))
settings = view.settings()
settings.set("result_base_dir", basedir)
settings.set("result_file_regex", r"^([^:]+):$")
settings.set("result_line_regex", r"^.*?@ (\d+):(\d+)\s+(.*)$")
view.set_read_only(False)
view.run_command("append", {"characters": output})
view.set_read_only(True)
window.run_command("show_panel", {"panel": "output.HyperHelpAuthor Lint"})
###----------------------------------------------------------------------------
class MissingLinkAnchorLinter(LinterBase):
"""
Lint one or more help files to find all links that are currently broken
because their targets are not known.
"""
def lint(self, view, file_name):
topics = self.pkg_info.help_topics
regions = view.find_by_selector("meta.link, meta.anchor")
for pos in regions:
link = view.substr(pos)
if lookup_help_topic(self.pkg_info, link) is not None:
continue
stub = "link references unknown anchor '%s'"
if view.match_selector(pos.begin(), "meta.anchor"):
stub = "anchor '%s' is not in the help index"
self.add(view, "warning", file_name, pos.begin(),
stub % link.replace("\t", " "))
class MissingHelpSourceLinter(LinterBase):
"""
Lint the help index to determine if the list of help files listed in the
index matches the list of help files that exist for the package.
"""
def __init__(self, pkg_info):
super().__init__(pkg_info)
root = "Packages/%s/" % (self.pkg_info.doc_root)
d_files = {file[len(root):] for file in sublime.find_resources("*.txt")
if file.startswith(root)}
i_files = {key for key in self.pkg_info.help_files.keys()}
for file in d_files - i_files:
self.add_index(
"warning",
"Help file '%s' is in Packages/%s/ but missing from the index",
file, self.pkg_info.doc_root)
for file in i_files - d_files:
self.add_index(
"error",
"Help file '%s' is in the index but not in Packages/%s/",
file, self.pkg_info.doc_root)
###----------------------------------------------------------------------------
|
the-stack_0_4603 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create two boxes and interpolate between them
#
pts = vtk.vtkPoints()
pts.InsertNextPoint(-1,-1,-1)
pts.InsertNextPoint(1,-1,-1)
pts.InsertNextPoint(1,1,-1)
pts.InsertNextPoint(-1,1,-1)
pts.InsertNextPoint(-1,-1,1)
pts.InsertNextPoint(1,-1,1)
pts.InsertNextPoint(1,1,1)
pts.InsertNextPoint(-1,1,1)
faces = vtk.vtkCellArray()
faces.InsertNextCell(4)
faces.InsertCellPoint(0)
faces.InsertCellPoint(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(1)
faces.InsertNextCell(4)
faces.InsertCellPoint(4)
faces.InsertCellPoint(5)
faces.InsertCellPoint(6)
faces.InsertCellPoint(7)
faces.InsertNextCell(4)
faces.InsertCellPoint(0)
faces.InsertCellPoint(1)
faces.InsertCellPoint(5)
faces.InsertCellPoint(4)
faces.InsertNextCell(4)
faces.InsertCellPoint(1)
faces.InsertCellPoint(2)
faces.InsertCellPoint(6)
faces.InsertCellPoint(5)
faces.InsertNextCell(4)
faces.InsertCellPoint(2)
faces.InsertCellPoint(3)
faces.InsertCellPoint(7)
faces.InsertCellPoint(6)
faces.InsertNextCell(4)
faces.InsertCellPoint(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(4)
faces.InsertCellPoint(7)
faceColors = vtk.vtkUnsignedCharArray()
faceColors.SetNumberOfComponents(3)
faceColors.SetNumberOfTuples(3)
faceColors.InsertComponent(0,0,255)
faceColors.InsertComponent(0,1,0)
faceColors.InsertComponent(0,2,0)
faceColors.InsertComponent(1,0,0)
faceColors.InsertComponent(1,1,255)
faceColors.InsertComponent(1,2,0)
faceColors.InsertComponent(2,0,255)
faceColors.InsertComponent(2,1,255)
faceColors.InsertComponent(2,2,0)
faceColors.InsertComponent(3,0,0)
faceColors.InsertComponent(3,1,0)
faceColors.InsertComponent(3,2,255)
faceColors.InsertComponent(4,0,255)
faceColors.InsertComponent(4,1,0)
faceColors.InsertComponent(4,2,255)
faceColors.InsertComponent(5,0,0)
faceColors.InsertComponent(5,1,255)
faceColors.InsertComponent(5,2,255)
cube = vtk.vtkPolyData()
cube.SetPoints(pts)
cube.SetPolys(faces)
cube.GetCellData().SetScalars(faceColors)
t1 = vtk.vtkTransform()
t1.Translate(1,2,3)
t1.RotateX(15)
t1.Scale(4,2,1)
tpdf1 = vtk.vtkTransformPolyDataFilter()
tpdf1.SetInputData(cube)
tpdf1.SetTransform(t1)
cube1Mapper = vtk.vtkPolyDataMapper()
cube1Mapper.SetInputConnection(tpdf1.GetOutputPort())
cube1 = vtk.vtkActor()
cube1.SetMapper(cube1Mapper)
t2 = vtk.vtkTransform()
t2.Translate(5,10,15)
t2.RotateX(22.5)
t2.RotateY(15)
t2.RotateZ(85)
t2.Scale(1,2,4)
tpdf2 = vtk.vtkTransformPolyDataFilter()
tpdf2.SetInputData(cube)
tpdf2.SetTransform(t2)
cube2Mapper = vtk.vtkPolyDataMapper()
cube2Mapper.SetInputConnection(tpdf2.GetOutputPort())
cube2 = vtk.vtkActor()
cube2.SetMapper(cube2Mapper)
t3 = vtk.vtkTransform()
t3.Translate(5,-10,15)
t3.RotateX(13)
t3.RotateY(72)
t3.RotateZ(-15)
t3.Scale(2,4,1)
tpdf3 = vtk.vtkTransformPolyDataFilter()
tpdf3.SetInputData(cube)
tpdf3.SetTransform(t3)
cube3Mapper = vtk.vtkPolyDataMapper()
cube3Mapper.SetInputConnection(tpdf3.GetOutputPort())
cube3 = vtk.vtkActor()
cube3.SetMapper(cube3Mapper)
t4 = vtk.vtkTransform()
t4.Translate(10,-5,5)
t4.RotateX(66)
t4.RotateY(19)
t4.RotateZ(24)
t4.Scale(2,.5,1)
tpdf4 = vtk.vtkTransformPolyDataFilter()
tpdf4.SetInputData(cube)
tpdf4.SetTransform(t4)
cube4Mapper = vtk.vtkPolyDataMapper()
cube4Mapper.SetInputConnection(tpdf4.GetOutputPort())
cube4 = vtk.vtkActor()
cube4.SetMapper(cube4Mapper)
# Interpolate the transformation
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputData(cube)
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
# Interpolate some transformations, test along the way
interpolator = vtk.vtkTransformInterpolator()
#interpolator SetInterpolationTypeToLinear
interpolator.SetInterpolationTypeToSpline()
interpolator.AddTransform(0.0,cube1)
interpolator.AddTransform(8.0,cube2)
interpolator.AddTransform(18.2,cube3)
interpolator.AddTransform(24.4,cube4)
interpolator.Initialize()
#puts [interpolator GetNumberOfTransforms]
interpolator.AddTransform(0.0,t1)
interpolator.AddTransform(8.0,t2)
interpolator.AddTransform(18.2,t3)
interpolator.AddTransform(24.4,t4)
#puts [interpolator GetNumberOfTransforms]
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(cube1)
ren1.AddActor(cube2)
ren1.AddActor(cube3)
ren1.AddActor(cube4)
ren1.AddActor(cubeActor)
ren1.SetBackground(0,0,0)
renWin.SetSize(300,300)
ren1.SetBackground(0.1,0.2,0.4)
# render the image
#
camera = vtk.vtkCamera()
camera.SetClippingRange(31.2977,81.697)
camera.SetFocalPoint(3.0991,-2.00445,9.78648)
camera.SetPosition(-44.8481,-25.871,10.0645)
camera.SetViewAngle(30)
camera.SetViewUp(-0.0356378,0.0599728,-0.997564)
ren1.SetActiveCamera(camera)
renWin.Render()
# prevent the tk window from showing up then start the event loop
xform = vtk.vtkTransform()
def animate():
numSteps = 250
min = interpolator.GetMinimumT()
max = interpolator.GetMaximumT()
i = 0
while i <= numSteps:
t = float(i)*(max-min)/float(numSteps)
interpolator.InterpolateTransform(t,xform)
cubeActor.SetUserMatrix(xform.GetMatrix())
renWin.Render()
i = i + 1
interpolator.InterpolateTransform(13.2,xform)
cubeActor.SetUserMatrix(xform.GetMatrix())
renWin.Render()
#animate()
# --- end of script --
|
the-stack_0_4607 | #!/usr/bin/env python3
"""Shows the crash in the faucet log produced by given input"""
import logging
import os
import sys
from faucet import faucet
from ryu.controller import dpset
from faucet import faucet_experimental_api
import Fake
def main():
# go through all files in directory
# read file and store in string
with open(sys.argv[1]) as pkt:
packet_data = pkt.read()
# start faucet
application = faucet.Faucet(dpset=dpset.DPSet(), faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
for dp_id, valve in list(application.valves.items()):
valve.dp.running = True
# create data from read file
byte_data = None
try:
byte_data = bytearray.fromhex(packet_data)
except (ValueError, TypeError):
pass
if byte_data is not None:
# create fake packet
dp = Fake.Datapath(1)
msg = Fake.Message(datapath=dp, cookie=1524372928, port=1, data=byte_data, in_port=1)
pkt = Fake.RyuEvent(msg)
# send packet to faucet and display error produced
application.packet_in_handler(pkt)
if __name__ == "__main__":
# make sure user specifies the afl crash folder
if len(sys.argv) == 2:
main()
else:
print('USAGE: python3 display_packet_crash.py <AFL_CRASH_FILE>')
os._exit(0)
|
the-stack_0_4609 | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Optional
from prometheus_client import Counter
from synapse.logging.context import make_deferred_yieldable
from synapse.util import json_decoder, json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
set_counter = Counter(
"synapse_external_cache_set",
"Number of times we set a cache",
labelnames=["cache_name"],
)
get_counter = Counter(
"synapse_external_cache_get",
"Number of times we get a cache",
labelnames=["cache_name", "hit"],
)
logger = logging.getLogger(__name__)
class ExternalCache:
"""A cache backed by an external Redis. Does nothing if no Redis is
configured.
"""
def __init__(self, hs: "HomeServer"):
self._redis_connection = hs.get_outbound_redis_connection()
def _get_redis_key(self, cache_name: str, key: str) -> str:
return "cache_v1:%s:%s" % (cache_name, key)
def is_enabled(self) -> bool:
"""Whether the external cache is used or not.
It's safe to use the cache when this returns false, the methods will
just no-op, but the function is useful to avoid doing unnecessary work.
"""
return self._redis_connection is not None
async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> None:
"""Add the key/value to the named cache, with the expiry time given."""
if self._redis_connection is None:
return
set_counter.labels(cache_name).inc()
# txredisapi requires the value to be string, bytes or numbers, so we
# encode stuff in JSON.
encoded_value = json_encoder.encode(value)
logger.debug("Caching %s %s: %r", cache_name, key, encoded_value)
return await make_deferred_yieldable(
self._redis_connection.set(
self._get_redis_key(cache_name, key),
encoded_value,
pexpire=expiry_ms,
)
)
async def get(self, cache_name: str, key: str) -> Optional[Any]:
"""Look up a key/value in the named cache."""
if self._redis_connection is None:
return None
result = await make_deferred_yieldable(
self._redis_connection.get(self._get_redis_key(cache_name, key))
)
logger.debug("Got cache result %s %s: %r", cache_name, key, result)
get_counter.labels(cache_name, result is not None).inc()
if not result:
return None
# For some reason the integers get magically converted back to integers
if isinstance(result, int):
return result
return json_decoder.decode(result)
|
the-stack_0_4610 | import logging
from django.contrib import auth
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin # https://stackoverflow.com/questions/42232606/django
# -exception-middleware-typeerror-object-takes-no-parameters
from django.conf import settings
from ...permissions import is_authenticated
from ...utils import set_session_key
from ... import app_settings
logger = logging.getLogger('django_sso_app')
ADMIN_URL = '/{}'.format(getattr(settings, 'ADMIN_URL', 'admin/'))
PROFILE_INCOMPLETE_ENABLED_PATHS = [
reverse('javascript-catalog'),
reverse('profile.complete'),
]
USER_TO_SUBSCRIBE_ENABLED_PATHS = PROFILE_INCOMPLETE_ENABLED_PATHS
class DjangoSsoAppAuthenticationBaseMiddleware(MiddlewareMixin):
"""
See django.contrib.auth.middleware.RemoteUserMiddleware.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
consumer_id_header = app_settings.APIGATEWAY_CONSUMER_CUSTOM_ID_HEADER
anonymous_consumer_custom_ids = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_IDS
anonymous_consumer_header = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER
anonymous_consumer_header_value = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER_VALUE
@staticmethod
def _clear_response_jwt(request):
set_session_key(request, '__dssoa__clear_response_jwt', True)
@staticmethod
def _remove_invalid_user(request):
"""
Removes the current authenticated user in the request which is invalid.
"""
if is_authenticated(request.user):
logger.info('removing invalid user "{}"'.format(request.user))
auth.logout(request)
@staticmethod
def _is_admin_path(request):
return request.path.startswith(ADMIN_URL)
@staticmethod
def _request_path_is_disabled_for_incomplete_users(request):
request_path = request.path
return (request_path not in PROFILE_INCOMPLETE_ENABLED_PATHS) and \
not (request_path.startswith('/static/')) and \
not (request_path.startswith('/media/')) and \
not (request_path.startswith('/logout/')) and \
not (request_path.startswith('/password/reset/')) and \
not (request_path.startswith('/confirm-email/')) and \
not (request_path.startswith('/__debug__/')) and \
not (request_path.startswith('/api/v1/')) # keep api endpoints enabled
@staticmethod
def _request_path_is_disabled_for_users_to_subscribe(request):
request_path = request.path
return (request_path not in USER_TO_SUBSCRIBE_ENABLED_PATHS) and \
not (request_path.startswith('/static/')) and \
not (request_path.startswith('/media/')) and \
not (request_path.startswith('/logout/')) and \
not (request_path.startswith('/password/reset/')) and \
not (request_path.startswith('/confirm-email/')) and \
not (request_path.startswith('/api/v1/')) # keep api endpoints enabled
def process_request(self, request):
raise NotImplementedError('process_request')
def process_response(self, request, response):
raise NotImplementedError('process_response')
|
the-stack_0_4612 | from ripsaw.genetics.selection import roulette
from ripsaw.genetics.crossovers import point_crossover
from ripsaw.genetics.genotype import Chromosome
from ripsaw.util.logging import Logger
import math
import time
import logging
import multiprocessing as mp
from datetime import datetime
class Optimiser:
def __init__(self, population_size, chromosome_function,
num_xovers, num_xover_points,
p_gene_mutate, p_total_mutate,
cwd, parallel_exe, exe_file_path, target_dir_path,
input_file_path, region_identifier,
output_score_func, output_file_path,
output_log_func, output_log_file,
target_score=math.inf, num_epochs=math.inf, max_time=math.inf,
population=list()):
# Object parameterisation
self.population_size = population_size
self.chromosome_function = chromosome_function
self.num_xovers = num_xovers
self.num_xover_points = num_xover_points
self.p_gene_mutate = p_gene_mutate
self.p_total_mutate = p_total_mutate
self.cwd = cwd
self.exe_file_path = exe_file_path
self.parallel_exe = parallel_exe
self.target_dir_path = target_dir_path
self.input_file_path = input_file_path
self.region_identifier = region_identifier
self.output_score_func = output_score_func
self.output_file_path = output_file_path
self.output_log_func = output_log_func
self.output_log_file = output_log_file
self.target_score = target_score
self.num_epochs = num_epochs
self.max_time = max_time
self.population = population
# Internal Fields
self.epoch_number = None
self.logger = None
self.best_score = None
self.mean_score = None
self.std_dev_score = None
self.internal_dict = {"epoch_num": 0}
@staticmethod
def evaluate(chromosome):
""" For the purposes of multiprocessing, this is a mapped function for a list of chromosomes."""
chromosome.evaluate()
return chromosome
@staticmethod
def sort_chromosome_key(chromosome):
""" Designed to put None before lowest fitness. None at the end was interfering with immortal logic on sort."""
fitness = chromosome.get_fitness()
if fitness is None:
return -math.inf
else:
return fitness
@staticmethod
def stopping_criteria_met(start_time, max_time, current_epoch, max_epochs, best_score, target_score):
""" We go through some stopping criteria. If any are met, True is returned."""
if best_score >= target_score:
return True
if time.time() - start_time > max_time:
return True
if current_epoch >= max_epochs:
return True
return False
def epoch(self, chromosomes=list()):
""" Going through the Evaluate -> Selection -> Crossover -> Mutation process once as an epoch."""
logging.debug("At start of epoch - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
# 1. Generate new chromosomes for each missing
to_generate = self.population_size - len(chromosomes)
for _ in range(to_generate):
chromosomes.append(Chromosome(chromosome_function=self.chromosome_function))
# 2. Evaluate every chromosome which doesn't have a fitness.
for chromosome in chromosomes:
chromosome.setup(self.cwd, self.exe_file_path, self.target_dir_path,
self.input_file_path, self.region_identifier,
self.output_score_func, self.output_file_path,
self.output_log_func, self.output_log_file,
self.internal_dict
)
if self.parallel_exe:
with mp.Pool(int(mp.cpu_count())-2) as p:
chromosomes = p.map(Optimiser.evaluate, chromosomes)
else:
for chromosome in chromosomes:
chromosome.evaluate()
# 3. Logging
for chromosome in chromosomes:
optimiser_log = [self.internal_dict["epoch_num"]]
optimiser_log.extend(chromosome.get_log_row())
self.logger.log_to_csv(optimiser_log)
chromosomes.sort(key=Optimiser.sort_chromosome_key)
logging.debug("Before Crossover - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
scores = [chromosome.get_fitness() for chromosome in chromosomes]
self.best_score = max(scores)
self.mean_score = sum([chromosome.get_fitness() for chromosome in chromosomes]) / self.population_size
self.std_dev_score = sum([abs(self.mean_score - score) for score in scores]) / self.population_size
# 4. Crossovers
selection = roulette(population=chromosomes, num_samples=self.num_xovers)
offspring = point_crossover(chromosomes=selection, num_points=self.num_xover_points)
chromosomes = chromosomes[len(offspring):] # Cull the weakest.
chromosomes.extend(offspring)
# 5. Mutate
chromosomes.sort(key=Optimiser.sort_chromosome_key)
logging.debug("After Crossover - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
for i, chromosome in enumerate(chromosomes):
if chromosome.fitness != self.best_score: # The minus one offset is to protect the immortal.
chromosome.mutate(p_gene_mutate=self.p_gene_mutate,
p_total_mutate=self.p_total_mutate)
else:
logging.debug("Immortal protected, fitness:" + str(chromosomes[i].get_fitness()))
return chromosomes
def run(self):
""" In Charge of running epochs until a stopping criteria is met."""
self.best_score = -math.inf
start_time_s = time.time()
start_time_dt = datetime.now()
start_time_hhmmss = start_time_dt.strftime("%H:%M:%S")
self.logger = Logger()
print("Starting the optimiser...")
print("\tStart Time: ", start_time_hhmmss)
while Optimiser.stopping_criteria_met(start_time=start_time_s, max_time=self.max_time,
current_epoch=self.internal_dict["epoch_num"], max_epochs=self.num_epochs,
best_score=self.best_score, target_score=self.target_score) is not True:
self.population = self.epoch(chromosomes=self.population)
self.internal_dict["epoch_num"] += 1
current_time_dt = datetime.now()
print("Epoch", str(self.internal_dict["epoch_num"]), "done.")
print("\tBest score: ", self.best_score)
print("\tAverage Score: ", self.mean_score)
print("\tStandard Deviation: ", self.std_dev_score)
print("\tTime elapsed: ", current_time_dt - start_time_dt)
|
the-stack_0_4614 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.ai.luis import LuisApplication, LuisRecognizer
from botbuilder.core import Recognizer, RecognizerResult, TurnContext
class FlightBookingRecognizer(Recognizer):
def __init__(self, configuration: object):
self._recognizer = None
luis_is_configured = (
configuration.luis_app_id
and configuration.luis_api_key
and configuration.luis_api_host_name
)
if luis_is_configured:
luis_application = LuisApplication(
configuration.luis_app_id,
configuration.luis_api_key,
"https://" + configuration.luis_api_host_name,
)
self._recognizer = LuisRecognizer(luis_application)
@property
def is_configured(self) -> bool:
# Returns true if luis is configured in the config.py and initialized.
return self._recognizer is not None
async def recognize(self, turn_context: TurnContext) -> RecognizerResult:
return await self._recognizer.recognize(turn_context)
|
the-stack_0_4616 | """
This script shows how to count all files in a specific directory.
"""
from argparse import ArgumentParser
import os
from collections import Counter
parser = ArgumentParser()
parser.add_argument('dir', type=str, help='target path')
args = parser.parse_args()
def get_extention(file_name=None):
"""
Return the file name extention, or None if the file doesn't have one.
"""
crumbs = file_name.split(".")
crumbs_num = len(crumbs)
if crumbs_num == 1:
return None
else:
return crumbs[-1]
def count_files(directory=None):
"""
Count all files in directory, and return the dict contains the result.
"""
file_extentions = []
none_extentions_num = 0
for _, _, files in os.walk(directory):
for file in files:
extention = get_extention(file)
if extention is None:
none_extentions_num += 1
else:
file_extentions.append(extention)
ext_counter = Counter(file_extentions)
if none_extentions_num != 0:
ext_counter.update({"None": none_extentions_num})
return ext_counter
def main():
"""
The main entrance.
"""
extention_dict = dict(count_files(args.dir))
total_count = sum(extention_dict.values())
print("Total files:", total_count)
print(extention_dict)
print("Done!")
if __name__ == '__main__':
main()
|
the-stack_0_4617 | from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from rankings.views import athlete_redirect_athlete_id_to_slug, athlete_redirect_event_id_to_slug, \
redirect_event_id_to_slug, add_result, report_duplicate, request_competition
from . import views
urlpatterns = [
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)/(?P<event_id>[0-9]+)$',
view=athlete_redirect_event_id_to_slug,
name='athlete-event-redirect'
),
url(
regex=r'^athlete/add-time/(?P<athlete_slug>[a-z0-9\-]+)/',
view=add_result,
name='athlete-add-time'
),
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)/(?P<event_name>[a-z0-9\-()]+)/(?P<pool_length>25|50)$',
view=views.EventByAthlete.as_view(),
name='athlete-event'
),
url(
regex=r'^athlete/(?P<athlete_id>[0-9]+)',
view=athlete_redirect_athlete_id_to_slug,
name='athlete-redirect'
),
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)',
view=views.PersonalBests.as_view(),
name='athlete-overview'
),
url(
regex=r'^events',
view=views.EventOverview.as_view(),
name='event-overview'
),
url(
regex=r'^top/(?P<event_id>[0-9]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=redirect_event_id_to_slug,
name='best-by-event-redirect'
),
url(
regex=r'^top/(?P<event_name>[a-z0-9\-()]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=views.BestByEvent.as_view(),
name='best-by-event'
),
url(
regex=r'^merge-athletes',
view=views.merge_athletes,
name='merge-athletes'
),
url(
regex=r'^list-empty-athletes',
view=views.DeleteEmptyAthletes.as_view(),
name='list-empty-athletes'
),
url(
regex=r'^delete-empty-athletes',
view=views.delete_empty_athletes,
name='delete-empty-athletes'
),
url(
regex=r'^search',
view=views.Search.as_view(),
name='search'
),
url(
regex=r'^competitions/request',
view=request_competition,
name='request-competition'
),
url(
regex=r'^competitions',
view=views.CompetitionListView.as_view(),
name='competition-list'
),
url(
regex=r'^competition/(?P<competition_slug>[a-z0-9\-]+)/(?P<event_name>[a-z0-9\-()]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=views.CompetitionEvent.as_view(),
name='competition-event'
),
url(
regex=r'^competition/(?P<competition_slug>[a-z0-9\-]+)',
view=views.CompetitionOverview.as_view(),
name='competition-overview'
),
url(
regex=r'^report-duplicate',
view=report_duplicate
)
]
|
the-stack_0_4618 | import logging
from typing import Collection, Container, Iterable, Optional, Tuple, Type
from eth.abc import AtomicDatabaseAPI
from eth.exceptions import BlockNotFound
from eth_utils import to_dict, toolz
from eth2._utils.ssz import validate_imported_block_unchanged
from eth2.beacon.chains.abc import BaseBeaconChain
from eth2.beacon.chains.exceptions import ParentNotFoundError, SlashableBlockError
from eth2.beacon.constants import FAR_FUTURE_SLOT, GENESIS_SLOT
from eth2.beacon.db.abc import BaseBeaconChainDB
from eth2.beacon.db.chain2 import BeaconChainDB, StateNotFound
from eth2.beacon.fork_choice.abc import BaseForkChoice, BlockSink
from eth2.beacon.state_machines.forks.medalla.eth2fastspec import get_attesting_indices
from eth2.beacon.state_machines.forks.medalla.state_machine import (
MedallaStateMachineFast,
MedallaStateMachineTest,
)
from eth2.beacon.tools.misc.ssz_vector import override_lengths
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
BaseSignedBeaconBlock,
BeaconBlock,
SignedBeaconBlock,
)
from eth2.beacon.types.checkpoints import Checkpoint, default_checkpoint
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Root, Slot, ValidatorIndex
from eth2.clock import Tick
StateMachineConfiguration = Tuple[Tuple[Slot, Type[MedallaStateMachineFast]], ...]
def _sm_configuration_has_increasing_slot(
sm_configuration: StateMachineConfiguration
) -> bool:
last_slot = GENESIS_SLOT
for (slot, _state_machine_class) in sm_configuration:
if slot < last_slot:
return False
else:
last_slot = slot
return True
def _validate_sm_configuration(sm_configuration: StateMachineConfiguration) -> None:
if not sm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `sm_configuration`"
)
if not _sm_configuration_has_increasing_slot(sm_configuration):
raise ValueError(
"The Chain class requires a state machine configuration"
" with monotonically increasing slot number"
)
@to_dict
def _load_state_machines(
sm_configuration: StateMachineConfiguration
) -> Iterable[Tuple[Container[int], MedallaStateMachineFast]]:
sm_configuration += ((FAR_FUTURE_SLOT, None),)
for (first_fork, second_fork) in toolz.sliding_window(2, sm_configuration):
valid_range = range(first_fork[0], second_fork[0])
valid_sm = first_fork[1]()
yield (valid_range, valid_sm)
class ChainDBBlockSink(BlockSink):
def __init__(self, chain_db: BaseBeaconChainDB) -> None:
self._chain_db = chain_db
def on_pruned_block(self, block: BaseBeaconBlock, canonical: bool) -> None:
if canonical:
self._chain_db.mark_canonical_block(block)
class BeaconChain(BaseBeaconChain):
logger = logging.getLogger("eth2.beacon.chains.BeaconChain")
_chain_db: BaseBeaconChainDB
_sm_configuration = ((GENESIS_SLOT, MedallaStateMachineFast),)
_fork_choice: BaseForkChoice
_current_head: BeaconBlock
_justified_checkpoint: Checkpoint = default_checkpoint
_finalized_checkpoint: Checkpoint = default_checkpoint
def __init__(
self, chain_db: BaseBeaconChainDB, fork_choice: BaseForkChoice
) -> None:
self._chain_db = chain_db
_validate_sm_configuration(self._sm_configuration)
self._state_machines_by_range = _load_state_machines(self._sm_configuration)
self._fork_choice = fork_choice
self._current_head = fork_choice.find_head()
head_state = self._chain_db.get_state_by_root(
self._current_head.state_root, BeaconState
)
self._reconcile_justification_and_finality(head_state)
@classmethod
def from_genesis(
cls, base_db: AtomicDatabaseAPI, genesis_state: BeaconState
) -> "BeaconChain":
for starting_slot, state_machine_class in cls._sm_configuration:
if starting_slot == GENESIS_SLOT:
signed_block_class = state_machine_class.signed_block_class
fork_choice_class = state_machine_class.fork_choice_class
config = state_machine_class.config
# NOTE: important this happens as soon as it can...
override_lengths(config)
break
else:
raise Exception("state machine configuration missing genesis era")
assert genesis_state.slot == GENESIS_SLOT
chain_db = BeaconChainDB.from_genesis(
base_db, genesis_state, signed_block_class, config
)
block_sink = ChainDBBlockSink(chain_db)
fork_choice = fork_choice_class.from_genesis(genesis_state, config, block_sink)
return cls(chain_db, fork_choice)
@property
def db(self) -> BaseBeaconChainDB:
return self._chain_db
def _get_fork_choice(self, slot: Slot) -> BaseForkChoice:
# NOTE: ignoring slot polymorphism for now...
expected_class = self.get_state_machine(slot).fork_choice_class
if expected_class == self._fork_choice.__class__:
return self._fork_choice
else:
raise NotImplementedError(
"a fork choice different than the one implemented was requested by slot"
)
def get_state_machine(self, slot: Slot) -> MedallaStateMachineFast:
"""
Return the ``StateMachine`` instance for the given slot number.
"""
# TODO iterate over ``reversed(....items())`` once we support only >=py3.8
for (slot_range, state_machine) in self._state_machines_by_range.items():
if slot in slot_range:
return state_machine
else:
raise Exception("state machine configuration was incorrect")
def get_canonical_head(self) -> BeaconBlock:
return self._current_head
def get_canonical_head_state(self) -> BeaconState:
head = self.get_canonical_head()
return self._chain_db.get_state_by_root(head.state_root, BeaconState)
def on_tick(self, tick: Tick) -> None:
if tick.is_first_in_slot():
fork_choice = self._get_fork_choice(tick.slot)
head = fork_choice.find_head()
self._update_head_if_new(head)
def get_block_by_slot(self, slot: Slot) -> Optional[BaseSignedBeaconBlock]:
state_machine = self.get_state_machine(slot)
return self._get_block_by_slot(slot, state_machine.signed_block_class)
def _get_block_by_slot(
self, slot: Slot, block_class: Type[SignedBeaconBlock]
) -> Optional[SignedBeaconBlock]:
# check in db first, implying a finalized chain
block = self._chain_db.get_block_by_slot(slot, block_class.block_class)
if block:
signature = self._chain_db.get_block_signature_by_root(block.hash_tree_root)
return SignedBeaconBlock.create(message=block, signature=signature)
else:
# check in the canonical chain according to fork choice
# NOTE: likely want a more efficient way to determine block by slot...
for block in self._fork_choice.get_canonical_chain():
if block.slot == slot:
signature = self._chain_db.get_block_signature_by_root(
block.hash_tree_root
)
return SignedBeaconBlock.create(message=block, signature=signature)
else:
return None
def _import_block(
self, block: BaseSignedBeaconBlock, perform_validation: bool = True
) -> BaseSignedBeaconBlock:
try:
# NOTE: would need to introduce a "root to slot" look up here for polymorphism
parent_block = self._chain_db.get_block_by_root(
block.parent_root, BeaconBlock
)
except BlockNotFound:
raise ParentNotFoundError(
f"attempt to import block {block} but missing parent block"
)
# NOTE: check if block is in the canonical chain
# First, see if we have a block already at that slot...
existing_block = self._get_block_by_slot(block.slot, block.__class__)
if existing_block:
if existing_block != block:
# NOTE: we want to keep the block but avoid heavy state transition for now...
# Rationale: this block may simply be a slashable block. It could also be on
# a fork. Choose to defer the state materialization until we re-org via fork choice.
self._chain_db.persist_block(block)
raise SlashableBlockError(
block,
f"attempt to import {block} but canonical chain"
" already has a block at this slot",
)
else:
# NOTE: block already imported!
return block
else:
head = self.get_canonical_head()
extension_of_head = block.parent_root == head.hash_tree_root
if not extension_of_head:
# NOTE: this arm implies we received a block for a slot _ahead_ of our head
# on the canonical chain...
# NOTE: block validity _should_ reject a block before it gets to this layer
# but we will double-check in the event that invariant is violated or does not hold
# NOTE: we elect to the block in the event of a
# re-org later, but do no further processing.
self._chain_db.persist_block(block)
raise SlashableBlockError(
block,
f"attempt to import {block} but canonical chain is not as far ahead",
)
state_machine = self.get_state_machine(block.slot)
state_class = state_machine.state_class
pre_state = self._chain_db.get_state_by_root(
parent_block.state_root, state_class
)
state, imported_block = state_machine.apply_state_transition(
pre_state, block, check_proposer_signature=perform_validation
)
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
# NOTE: if we have a valid block/state, then record in the database.
self._chain_db.persist_block(block)
self._chain_db.persist_state(state, state_machine.config)
self._reconcile_justification_and_finality(state)
return imported_block
def _reconcile_justification_and_finality(self, state: BeaconState) -> None:
justified_checkpoint = state.current_justified_checkpoint
finalized_checkpoint = state.finalized_checkpoint
if justified_checkpoint.epoch > self._justified_checkpoint.epoch:
self._justified_checkpoint = justified_checkpoint
self._fork_choice.update_justified(state)
if finalized_checkpoint.epoch > self._finalized_checkpoint.epoch:
self._finalized_checkpoint = finalized_checkpoint
finalized_head = self._chain_db.get_block_by_root(
self._finalized_checkpoint.root, BeaconBlock
)
self._chain_db.mark_finalized_head(finalized_head)
def _update_head_if_new(self, block: BeaconBlock) -> None:
if block != self._current_head:
self._current_head = block
self.logger.debug("new head of chain: %s", block)
def _update_fork_choice_with_block(self, block: BeaconBlock) -> None:
"""
NOTE: it is assumed that ``_import_block`` has successfully be called
before this method is run as the fork choice shares state with the underlying
chain db.
Adding a new ``block`` likely updates the head so we also call
``_update_head_if_new`` after registering the new data with the
fork choice module.
"""
fork_choice = self._get_fork_choice(block.slot)
fork_choice.on_block(block)
for attestation in block.body.attestations:
self._update_fork_choice_with_attestation(fork_choice, attestation)
head = fork_choice.find_head()
self._update_head_if_new(head)
def _update_fork_choice_with_attestation(
self, fork_choice: BaseForkChoice, attestation: Attestation
) -> None:
block_root = attestation.data.beacon_block_root
target_epoch = attestation.data.target.epoch
indices = self._get_indices_from_attestation(attestation)
fork_choice.on_attestation(block_root, target_epoch, *indices)
def _find_present_ancestor_state(
self, block_root: Root
) -> Tuple[BeaconState, Tuple[SignedBeaconBlock, ...]]:
"""
Find the first state we have persisted that is an ancestor of ``target_block``.
"""
try:
block = self._chain_db.get_block_by_root(block_root, BeaconBlock)
blocks: Tuple[SignedBeaconBlock, ...] = ()
# NOTE: re: bounds here; worst case, we return the genesis state.
for slot in range(block.slot, GENESIS_SLOT - 1, -1):
try:
state_machine = self.get_state_machine(Slot(slot))
state = self._chain_db.get_state_by_root(
block.state_root, state_machine.state_class
)
return (state, blocks)
except StateNotFound:
signature = self._chain_db.get_block_signature_by_root(
block.hash_tree_root
)
blocks += (
SignedBeaconBlock.create(message=block, signature=signature),
)
block = self._chain_db.get_block_by_root(
block.parent_root, BeaconBlock
)
except BlockNotFound:
raise Exception(
"invariant violated: querying a block that has not been persisted"
)
# NOTE: `mypy` complains without this although execution should never get here...
return (None, ())
def _compute_missing_state(self, target_block: BeaconBlock) -> BeaconState:
"""
Calculate the state for the ``target_block``.
The chain persist states for canonical blocks.
In the even that we need a state that has not been persisted;
for example, if we are executing a re-org, then we will
need to compute it.
NOTE: this method will persist the new (potentially non-canonical) states.
"""
state, blocks = self._find_present_ancestor_state(target_block.parent_root)
for block in reversed(blocks):
state_machine = self.get_state_machine(block.slot)
state, _ = state_machine.apply_state_transition(state, block)
self._chain_db.persist_state(state, state_machine.config)
return state
def _get_indices_from_attestation(
self, attestation: Attestation
) -> Collection[ValidatorIndex]:
sm = self.get_state_machine(attestation.data.slot)
return get_attesting_indices(
sm._epochs_ctx, attestation.data, attestation.aggregation_bits
)
def on_block(
self, block: BaseSignedBeaconBlock, perform_validation: bool = True
) -> None:
self.logger.debug("attempting import of block %s", block)
try:
imported_block = self._import_block(block, perform_validation)
self.logger.debug("imported new block: %s", imported_block)
self._update_fork_choice_with_block(block.message)
except SlashableBlockError:
# still register a block if it is a duplicate, in event of a re-org
# other exceptions should not add the block to the fork choice
self._update_fork_choice_with_block(block.message)
raise
def on_attestation(self, attestation: Attestation) -> None:
"""
This method expects ``attestation`` to come from the wire, not one in a
(valid) block; attestations in blocks are handled in ``on_block``
"""
fork_choice = self._get_fork_choice(attestation.data.slot)
self._update_fork_choice_with_attestation(fork_choice, attestation)
class BeaconChainTest(BeaconChain):
_sm_configuration = ((GENESIS_SLOT, MedallaStateMachineTest),) # type: ignore
|
the-stack_0_4621 | import urllib.request
import os, sys
content = ''
attempts = 0
protocol = 'http'
subdomain = 'web135/'
domain = 'j3'
'''
lvl3 = ['']
lvl4 = ['']
lvl5 = ['']
lvl6 = ['']
lvl7 = ['']
lvl8 = ['']'''
root_dir = '../inetpub/wwwroot/wwwlive'
links = ['',
'news',
'emergency-info',
'county-departments',
'contact-us',
'elected-officials',
'commissioners-and-directors',
'other-appointed-officials',
'advisory-boards',
'directions',
'subscribe',
'foilform',
'contact-us/e-mail-ce',
'emergency-info/emergency-alert-sign-up',
]
'''while attempts < 3:
try:
for link in links:
final_link = protocol +'://'+ subdomain + '.' + domain +'/'+ link
print('Downloading Webpage: ' + final_link)
response = urllib.request.urlopen( final_link , timeout = 5 )
content = str(response.read().decode('utf-8'))
f = open( link, 'w' )
print('Writing to: ' + link)
f.write( content )
f.close()
break
except urllib.request.URLError as e:
attempts += 1
print (type(e))'''
for link in links:
dir = root_dir +'/'+ link
file = dir + '/'+ 'index.html'
final_link = protocol +'://'+ subdomain + domain +'/'+ link
print('Downloading Webpage: ' + final_link)
try:
response = urllib.request.urlopen( final_link , timeout = 15 )
content = str(response.read().decode('utf-8'))
except urllib.request.URLError as e:
print (str(e))
if not os.path.isdir(dir):
os.mkdir( dir)
print ("Created " + dir + " Folder")
f = open( file , 'w' )
print('Writing to: ' + dir)
f.write( content )
f.close()
|
the-stack_0_4622 | import time
from basetestcase import BaseTestCase
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection, Bucket, RestHelper
from membase.api.exception import BucketCreationException
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator
from testconstants import STANDARD_BUCKET_PORT
from testconstants import LINUX_COUCHBASE_BIN_PATH
from testconstants import LINUX_COUCHBASE_SAMPLE_PATH
from testconstants import WIN_COUCHBASE_BIN_PATH
from testconstants import WIN_COUCHBASE_SAMPLE_PATH_C
from testconstants import COUCHBASE_FROM_WATSON, COUCHBASE_FROM_4DOT6,\
COUCHBASE_FROM_SPOCK, COUCHBASE_FROM_VULCAN,\
COUCHBASE_FROM_CHESHIRE_CAT
from scripts.install import InstallerJob
class CreateBucketTests(BaseTestCase):
def setUp(self):
super(CreateBucketTests, self).setUp()
self._init_parameters()
def _init_parameters(self):
self.bucket_name = self.input.param("bucket_name", 'default')
self.bucket_type = self.input.param("bucket_type", 'sasl')
self.reset_node_services = self.input.param("reset_node_services", True)
self.bucket_size = self.quota
self.password = 'password'
self.server = self.master
self.rest = RestConnection(self.server)
self.node_version = self.rest.get_nodes_version()
self.total_items_travel_sample = 31569
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
if self.node_version[:5] in COUCHBASE_FROM_CHESHIRE_CAT:
self.total_items_travel_sample = 63288
else:
self.total_items_travel_sample = 63182
shell = RemoteMachineShellConnection(self.master)
type = shell.extract_remote_info().distribution_type
shell.disconnect()
self.sample_path = LINUX_COUCHBASE_SAMPLE_PATH
self.bin_path = LINUX_COUCHBASE_BIN_PATH
if self.nonroot:
self.sample_path = "/home/%s%s" % (self.master.ssh_username,
LINUX_COUCHBASE_SAMPLE_PATH)
self.bin_path = "/home/%s%s" % (self.master.ssh_username,
LINUX_COUCHBASE_BIN_PATH)
if type.lower() == 'windows':
self.sample_path = WIN_COUCHBASE_SAMPLE_PATH_C
self.bin_path = WIN_COUCHBASE_BIN_PATH
elif type.lower() == "mac":
self.sample_path = MAC_COUCHBASE_SAMPLE_PATH
self.bin_path = MAC_COUCHBASE_BIN_PATH
def tearDown(self):
super(CreateBucketTests, self).tearDown()
# Bucket creation with names as mentioned in MB-5844(.delete, _replicator.couch.1, _users.couch.1)
def test_banned_bucket_name(self, password='password'):
try:
if self.bucket_type == 'sasl':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200)
elif self.bucket_type == 'standard':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200, proxyPort=STANDARD_BUCKET_PORT + 1)
elif self.bucket_type == 'memcached':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200, proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='memcached')
else:
self.log.error('Bucket type not specified')
return
self.fail('created a bucket with invalid name {0}'.format(self.bucket_name))
except BucketCreationException as ex:
self.log.info(ex)
def test_win_specific_names(self):
version = self._get_cb_version()
if self._get_cb_os() != 'windows':
self.log.warning('This test is windows specific')
return
try:
self.test_banned_bucket_name()
finally:
try:
self.log.info('Will check if ns_server is running')
rest = RestConnection(self.master)
self.assertTrue(RestHelper(rest).is_ns_server_running(timeout_in_seconds=60))
except:
self._reinstall(version)
self.fail("ns_server is not running after bucket '%s' creation" %(
self.bucket_name))
# Bucket creation with names as mentioned in MB-5844(isasl.pw, ns_log)
def test_valid_bucket_name(self, password='password'):
tasks = []
shared_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
replicas=self.num_replicas)
if self.bucket_type == 'sasl':
self.cluster.create_sasl_bucket(name=self.bucket_name, password=password, bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name, num_replicas=self.num_replicas,
bucket_size=self.bucket_size, master_id=self.server))
elif self.bucket_type == 'standard':
self.cluster.create_standard_bucket(name=self.bucket_name, port=STANDARD_BUCKET_PORT+1,
bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name, num_replicas=self.num_replicas,
bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
elif self.bucket_type == "memcached":
tasks.append(self.cluster.async_create_memcached_bucket(name=self.bucket_name,
port=STANDARD_BUCKET_PORT+1,
bucket_params=shared_params))
self.buckets.append(Bucket(name=self.bucket_name,
num_replicas=self.num_replicas, bucket_size=self.bucket_size,
port=STANDARD_BUCKET_PORT + 1, master_id=self.server, type='memcached'))
for task in tasks:
task.result()
else:
self.log.error('Bucket type not specified')
return
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
self._load_all_buckets(self.server, gen_load, "create", 0)
self.cluster.bucket_delete(self.server, self.bucket_name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
""" put param like -p log_message="Created bucket". If test need a cluster,
put nodes_init=x in param to create cluster """
def test_log_message_in_log_page(self):
if self.log_message is not None:
self._load_doc_data_all_buckets(data_op="create", batch_size=5000)
serverInfo = self.servers[0]
shell = RemoteMachineShellConnection(serverInfo)
time.sleep(5)
output, error = shell.execute_command("curl -g -v -u Administrator:password \
http://{0}:8091/logs | grep '{1}'".format(serverInfo.ip,
self.log_message))
if not output:
self.log.info("message {0} is not in log".format(self.log_message))
elif output:
raise Exception("The message %s is in log." % self.log_message)
else:
raise Exception("No thing to test. You need to put log_message=something_to_test")
def test_travel_sample_bucket(self):
sample = "travel-sample"
if self.reset_node_services:
""" reset node to set services correctly: index,kv,n1ql """
self.rest.force_eject_node()
status = False
try:
status = self.rest.init_node_services(hostname=self.master.ip,
services= ["index,kv,n1ql,fts"])
init_node = self.cluster.async_init_node(self.master,
services = ["index,kv,n1ql,fts"])
except Exception as e:
if e:
print(e)
self.sleep(10)
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
if status:
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
self.rest.set_indexer_storage_mode(storageMode="memory_optimized")
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("""curl -g -v -u Administrator:password \
-X POST http://{0}:8091/sampleBuckets/install \
-d '["travel-sample"]'""".format(self.master.ip))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "travel-sample":
self.fail("travel-sample bucket did not create")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "travel-sample")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s" % (
self.total_items_travel_sample, num_actual))
""" check all indexes are completed """
index_name = []
index_count = 8
if self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
index_count = 10
result = self.rest.index_tool_stats(False)
self.log.info("check if all %s indexes built." % index_count)
end_time_i = time.time() + 60
while time.time() < end_time_i and len(index_name) < index_count:
for map in result:
if result["indexes"]:
for x in result["indexes"]:
if x["bucket"] == "travel-sample":
if x["progress"] < 100:
self.sleep(7, "waiting for indexing {0} complete"
.format(x["index"]))
result = self.rest.index_tool_stats(False)
elif x["progress"] == 100:
if x["index"] not in index_name:
index_name.append(x["index"])
self.sleep(7, "waiting for other indexing complete")
result = self.rest.index_tool_stats(False)
else:
self.sleep(7, "waiting for indexing start")
result = self.rest.index_tool_stats()
if time.time() >= end_time_i and len(index_name) < index_count:
self.log.info("index list {0}".format(index_name))
self.fail("some indexing may not complete")
elif len(index_name) == index_count:
self.log.info("travel-sample bucket is created and complete indexing")
self.log.info("index list in travel-sample bucket: {0}"
.format(index_name))
else:
self.log.info("There is extra index %s" % index_name)
def test_cli_travel_sample_bucket(self):
sample = "travel-sample"
""" couchbase-cli does not have option to reset the node yet
use rest to reset node to set services correctly: index,kv,n1ql """
self.rest.force_eject_node()
shell = RemoteMachineShellConnection(self.master)
set_index_storage_type = ""
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
set_index_storage_type = " --index-storage-setting=memopt "
options = ' --cluster-port=8091 \
--cluster-ramsize=1000 \
--cluster-index-ramsize=300 \
--services=data,index,query,fts %s ' % set_index_storage_type
o, e = shell.execute_couchbase_cli(cli_command="cluster-init", options=options)
if self.node_version[:5] in COUCHBASE_FROM_SPOCK:
self.assertTrue(self._check_output("SUCCESS: Cluster initialized", o),
"Failed to initialize cluster")
else:
self.assertTrue(self._check_output("SUCCESS: init/edit localhost", o),
"Failed to init/edit localhost")
self.sleep(7, "wait for services up completely")
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
shell = RemoteMachineShellConnection(self.master)
cluster_flag = "-n"
bucket_quota_flag = "-s"
data_set_location_flag = " "
if self.node_version[:5] in COUCHBASE_FROM_SPOCK:
cluster_flag = "-c"
bucket_quota_flag = "-m"
data_set_location_flag = "-d"
shell.execute_command("{0}cbdocloader -u Administrator -p password \
{3} {1}:{6} -b travel-sample {4} 200 {5} {2}travel-sample.zip" \
.format(self.bin_path,
self.master.ip,
self.sample_path,
cluster_flag,
bucket_quota_flag,
data_set_location_flag,
self.master.port))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "travel-sample":
self.fail("travel-sample bucket did not create")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "travel-sample")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s" % (
self.total_items_travel_sample, num_actual))
self.log.info("Total items %s " % num_actual)
""" check all indexes are completed """
index_name = []
index_count = 8
if self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
index_count = 10
result = self.rest.index_tool_stats(False)
""" check all indexes are completed """
self.log.info("check if all %s indexes built." % index_count)
end_time_i = time.time() + 180
while time.time() < end_time_i and len(index_name) < index_count:
if result["indexes"]:
for x in result["indexes"]:
if x["bucket"] == "travel-sample":
if x["progress"] == 100 and \
x["index"] not in index_name:
index_name.append(x["index"])
self.sleep(7, "waiting for indexing complete")
result = self.rest.index_tool_stats(False)
else:
self.sleep(2, "waiting for indexing start")
result = self.rest.index_tool_stats(False)
if time.time() >= end_time_i and len(index_name) < index_count:
self.log.info("index list {0}".format(index_name))
self.fail("some indexing may not complete")
elif len(index_name) == index_count:
self.log.info("travel-sample bucket is created and complete indexing")
self.log.info("index list in travel-sample bucket: {0}"
.format(index_name))
else:
self.log.info("There is extra index %s" % index_name)
def test_cli_bucket_maxttl_setting(self):
""" couchbase-cli does not have option to reset the node yet
use rest to reset node to set services correctly: index,kv,n1ql """
if self.node_version[:5] in COUCHBASE_FROM_VULCAN:
self.rest.force_eject_node()
shell = RemoteMachineShellConnection(self.master)
set_index_storage_type = " --index-storage-setting=memopt "
options = ' --cluster-port=8091 \
--cluster-ramsize=300 \
--cluster-index-ramsize=300 \
--services=data,index,query %s ' \
% set_index_storage_type
o, e = shell.execute_couchbase_cli(cli_command="cluster-init",
options=options)
self.assertEqual(o[0], 'SUCCESS: Cluster initialized')
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
shell = RemoteMachineShellConnection(self.master)
bucket_type = self.input.param("bucket_type", "couchbase")
options = ' --bucket=default \
--bucket-type={0} \
--bucket-ramsize=200 \
--max-ttl=400 \
--wait '.format(bucket_type)
o, e = shell.execute_couchbase_cli(cli_command="bucket-create",
options=options)
self.assertEqual(o[0], 'SUCCESS: Bucket created')
self.sleep(30, "Sleep before loading doc using cbdocloader")
cluster_flag = "-c"
bucket_quota_flag = "-m"
data_set_location_flag = "-d"
shell.execute_command(
"{0}cbdocloader -u Administrator -p password "
"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip"
.format(self.bin_path, self.master.ip, self.sample_path,
cluster_flag, bucket_quota_flag,
data_set_location_flag))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "default":
self.fail("default bucket did not get created")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
num_actual = 0
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "default")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s"
% (self.total_items_travel_sample, num_actual))
self.log.info("Total items %s " % num_actual)
self.sleep(400, "Waiting for docs to expire as per maxttl")
self.expire_pager([self.master])
self.sleep(20, "Wait for expiry_purger to run")
num_actual = self.get_item_count(self.master, "default")
if int(num_actual) != 0:
self.fail("Item count is not 0 after maxttl has elapsed")
else:
self.log.info("SUCCESS: Item count is 0 after maxttl has elapsed")
else:
self.log.info("This test is not designed to run in pre-vulcan(5.5.0) versions")
# Start of tests for ephemeral buckets
#
def test_ephemeral_buckets(self):
eviction_policy = self.input.param("eviction_policy", 'noEviction')
shared_params = self._create_bucket_params(server=self.server, size=100,
replicas=self.num_replicas, bucket_type='ephemeral',
eviction_policy=eviction_policy)
# just do sasl for now, pending decision on support of non-sasl buckets in 5.0
self.cluster.create_sasl_bucket(name=self.bucket_name, password=self.sasl_password, bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name,
num_replicas=self.num_replicas,
bucket_size=self.bucket_size, master_id=self.server))
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
self._load_all_buckets(self.server, gen_load, "create", 0)
self.cluster.bucket_delete(self.server, self.bucket_name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
def _get_cb_version(self):
rest = RestConnection(self.master)
version = rest.get_nodes_self().version
return version[:version.rfind('-')]
def _get_cb_os(self):
rest = RestConnection(self.master)
return rest.get_nodes_self().os
def _reinstall(self, version):
servs = self.servers[:self.nodes_init]
params = {}
params['num_nodes'] = len(servs)
params['product'] = 'cb'
params['version'] = version
params['vbuckets'] = [self.input.param('vbuckets', 1024)]
self.log.info("will install {0} on {1}".format(version, [s.ip for s in servs]))
InstallerJob().parallel_install(servs, params)
if params['product'] in ["couchbase", "couchbase-server", "cb"]:
success = True
for server in servs:
success &= RemoteMachineShellConnection(server).is_couchbase_installed()
if not success:
self.input.test_params["stop-on-failure"] = True
self.log.error("Couchbase wasn't recovered. All downstream tests will be skipped")
self.fail("some nodes were not install successfully!")
def _check_output(self, word_check, output):
found = False
if len(output) >= 1:
if isinstance(word_check, list):
for ele in word_check:
for x in output:
if ele.lower() in str(x.lower()):
self.log.info("Found '{0} in CLI output".format(ele))
found = True
break
elif isinstance(word_check, str):
for x in output:
if word_check.lower() in str(x.lower()):
self.log.info("Found '{0}' in CLI output".format(word_check))
found = True
break
else:
self.log.error("invalid {0}".format(word_check))
return found |
the-stack_0_4623 |
import math
import numpy as np
from numpy import sqrt
from direct_kinematic import DirectKinematic
class FitnessFunctions(object):
def __init__(self, parameters={}):
self.energy_constants = [31.1,21.1,26.6,8.3,5.00,2.6]
self.velocity_constants = [90,90,90,120,120,190]
return
def evaluate_energy(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_energy=0
all_data=[0]*trajectory_len
for coo in range(0, trajectory_len):
for joint in range(0, 6):
angle_in_rad_tmp=abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])
#if verbose:
# print(tmp)
all_data[coo]=all_data[coo]+angle_in_rad_tmp*self.energy_constants[joint]
total_energy=np.sum(all_data)
if verbose:
return total_energy, all_data
return total_energy
def evaluate_operation_time(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_operation_time = 0
angles_in_rad_tmp= [0] * 6
all_data=[0]*trajectory_len
for coo in range(0, trajectory_len):
angles_in_rad=[]
for joint in range(0, 6):
# rotation=|joint_b-joint_a|
angles_in_rad.append(abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint]))
#maybe there is an error between degree and radiants
all_data[coo]=math.degrees(max(angles_in_rad))/self.velocity_constants[angles_in_rad.index(max(angles_in_rad))]
total_operation_time=np.sum(all_data)
if verbose:
return total_operation_time, all_data
return total_operation_time
def evaluate_rotations(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_rotations = 0
all_data=[0]*trajectory_len
for joint in range(0, 6):
for coo in range(0, trajectory_len):
# rotation=|joint_b-joint_a|
all_data[coo]=all_data[coo]+abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])
#print(str(array_of_joints_coordinates[coo][joint])+" - "+ str(array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])+" = "+ str(abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])))
total_rotations=np.sum(all_data)
if verbose:
return total_rotations, all_data
return total_rotations
def evaluate_position_accuracy(self, array_of_joints_coordinates, points, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
directKinematics = DirectKinematic()
total_accuracy = 0
all_data=[]
for i in range(0, trajectory_len):
homogenousPred = directKinematics.evaluate(array_of_joints_coordinates[i]) # This is in homogenous coordinates
#homogenousPred = directKinematics.evaluate([0.345,0.720,-0.153, 2.120,0.874,1.620]) # This is in homogenous coordinates
predictedPosition = np.array([[homogenousPred[0][0] / homogenousPred[3][0]],
[homogenousPred[1][0] / homogenousPred[3][0]],
[homogenousPred[2][0] / homogenousPred[3][0]]]) #This is cartesian coordinates
x_diff = (points[i][0] - predictedPosition[0])**2
y_diff = (points[i][1] - predictedPosition[1])**2
z_diff = (points[i][2] - predictedPosition[2])**2
differences = x_diff + y_diff + z_diff
all_data.append(sqrt(differences)[0])
total_accuracy += all_data[i]
if verbose:
return total_accuracy, all_data
return total_accuracy
if __name__ == '__main__':
import os
import pickle
import pandas as pd
trajectory_points = [[2.25, 1.1, 0.25],
[0.9, 1.5, 0.25],
[-0.85, 1.14, 2.22],
[-1.8, 1.25, 1.17],
[1.8, 1.25, 1.17],
[-1.25, -1.1, 0.25],
[-2.25, -1.48, 0.25],
[0.45, -1.14, 2.22],
[0.8, -1.25, 2.35],
[0.8, -1.25, -1.35]]
outputs = [[0.473, 0.592, -0.230, 0.130, 0.008,-0.617],
[1.026, 0.293, -0.008, 0.132, 1.155, -0.617],
[2.086, -0.014, -0.270, 2.890, 1.155, -0.617],
[2.523, 0.179, -0.270, 2.890, -0.440, -0.617],
[0.597, 0.179, -0.270, 2.890, -0.440, -0.617],
[-2.417, 0.179, 0.434, 2.887, -0.665, -0.617],
[-2.465, 0.794, -0.459, 1.342, -0.665, -0.617],
[-1.087, -0.189, -0.462, 0.324, -0.665, -0.617],
[-0.951, -0.100, -0.462, 0.130, -0.526, -0.617],
[-0.966, 1.188, 0.215, 0.130, 0.008, -0.617]]
fitnessFunctions = FitnessFunctions()
total_accuracy, accuracies = fitnessFunctions.evaluate_position_accuracy(outputs, trajectory_points, True)
print("accuracy____" + str(total_accuracy))
print(accuracies)
fitnesses={"total":[],"rotation_A":[],"energy_E":[],"operation_T":[],"accuracy":[]}
data={}
verbose=True
data["outputs"]=outputs
total_rotation, data["rotation_A"] = fitnessFunctions.evaluate_rotations(outputs, verbose)
total_energy, data["energy_E"] = fitnessFunctions.evaluate_energy(outputs, verbose)
total_operation_time, data["operation_T"] = fitnessFunctions.evaluate_operation_time(outputs, verbose)
total_accuracy, data["accuracy"] = fitnessFunctions.evaluate_position_accuracy(outputs, trajectory_points, verbose)
data["total_rotation_A"]=total_rotation
data["total_energy_E"]=total_energy
data["total_operation_time"]=total_operation_time
data["total_accuracy"]=total_accuracy
fitness = -(total_accuracy)#ACCURACY OPTIMAL
#fitness = -(total_accuracy+20/200*total_operation_time)#TIME OPTIMAL
#fitness = -(total_accuracy+20*total_energy)#ENERGY OPTIMAL
#fitness = -(total_accuracy+20*total_rotation)#MINIMUM ROTATION
#fitness = -(total_accuracy+5*total_energy+10*total_operation_time+5*total_rotation)#COMBINED CONTROL
fitnesses["total"].append(-fitness)
fitnesses["rotation_A"].append(total_rotation)
fitnesses["energy_E"].append(total_energy)
fitnesses["operation_T"].append(total_operation_time)
fitnesses["accuracy"].append(total_accuracy)
if verbose:
data["fitness"]=total_accuracy
df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in data.items() ]))
df.to_csv('tableResults_TEST_PAPER.csv', index=False)
|
the-stack_0_4625 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Pivot helper functions ."""
from collections import abc
from functools import wraps
from typing import Any, Callable, Dict, Optional, Union
import attr
import pandas as pd
from .._version import VERSION
from . import entities
__version__ = VERSION
__author__ = "Ian Hellen"
_DF_SRC_COL_PARAM_NAMES = [
"column",
"input_column",
"input_col",
"src_column",
"src_col",
]
@attr.s(auto_attribs=True)
class PivotRegistration:
"""
Pivot registration for function.
Notes
-----
src_module : str
The src_module to import
src_class : str, optional
class to import and instantiate that contains the function/method
(not needed if the target function is a pure Python function)
src_func_name: Callable
The function to wrap.
func_new_name: str, optional
Rename the function to this, defaults to `src_func_name`
input_type : str
The input data type that the function is expecting.
One of 'dataframe', 'iterable', 'value'
can_iterate: bool, optional
True if the function supports being called multiple times
(for iterable input). Default is True
entity_map: Dict[str, str]
dict of entities supported (keys) and attribute to use from
entity as input to the function
func_df_param_name: str
The name of the parameter that `func` takes the input value
e.g. func(ip=my_address) => 'ip' == func_df_col_param_name.
In the case of a DataFrame, this is usually 'data'
func_df_col_param_name: str
The name that the target function uses to identify the
column to use for input in the input DataFrame.
func_out_column_name: str, optional
The name of the column in the output DF to use as a key to join
to the input. If None, use `func_df_col_param_name`
func_static_params: Optional[Dict[str, Any]]
static parameters (kwargs) that are always passed
to the target function
func_input_value_arg: Optional[str]
The name of kwarg passed to the function that contain
the input value. If function supports DF input, `func_df_col_param_name`
will be used and this is not needed.
src_config_path : Optional[str]
The source path that the configuration was read from, default None.
src_config_entry : Optional[str]
The entry name in the configuration file, default None.
entity_container_name : Optional[str]
The name of the container in the entity that will hold
this pivot function.
return_raw_output : bool
Return raw output from the wrapped function, do not
try to format into a DataFrame. Default is False.
"""
input_type: str
entity_map: Dict[str, str]
func_df_param_name: Optional[str] = None
func_out_column_name: Optional[str] = None
func_df_col_param_name: Optional[str] = None
func_new_name: Optional[str] = None
src_module: Optional[str] = None
src_class: Optional[str] = None
src_func_name: Optional[str] = None
can_iterate: bool = True
func_static_params: Optional[Dict[str, Any]] = None
func_input_value_arg: Optional[str] = None
src_config_path: Optional[str] = None
src_config_entry: Optional[str] = None
entity_container_name: Optional[str] = None
return_raw_output: bool = False
def attr_for_entity(self, entity: Union[entities.Entity, str]) -> Optional[str]:
"""
Return the attribute to use for the specified entity.
Parameters
----------
entity : Union[entities.Entity, str]
Entity instance or name
Returns
-------
Optional[str]
Attribute name to use.
"""
if isinstance(entity, entities.Entity):
ent_name = entity.__class__.__name__
else:
ent_name = entity
return self.entity_map.get(ent_name)
def create_pivot_func(
target_func: Callable[[Any], Any],
pivot_reg: PivotRegistration,
) -> Callable[..., pd.DataFrame]:
"""
Create function wrapper for pivot function.
Parameters
----------
target_func: Callable
The target function to wrap.
pivot_reg : PivotRegistration
The pivot function registration object.
Returns
-------
Callable[[Any], pd.DataFrame]
The original `target_func` wrapped in pre-processing
and post-processing code.
"""
@wraps(target_func)
def pivot_lookup(*args, **kwargs) -> pd.DataFrame:
"""
Lookup Pivot function from Entity or parameter values.
Parameters
----------
data: Union[str, List[str], pd.DataFrame]
Not used if querying the entity value itself
Returns
-------
pd.DataFrame
DataFrame of Pivot function results.
"""
# remove and save the join kw, if specified (so it doesn't interfere
# with other operations and doesn't get sent to the function)
join_type = kwargs.pop("join", None)
input_value = _get_input_value(*args, pivot_reg=pivot_reg, parent_kwargs=kwargs)
_check_valid_settings_for_input(input_value, pivot_reg)
# If the input_value is not a DF convert it into one and return the DF,
# the column with the input value(s) plus the param dict that we're going
# to send to the function. This is going to look like:
# {"data": input_df, "src_column": input_column}
input_df, input_column, param_dict = _create_input_df(
input_value, pivot_reg, parent_kwargs=kwargs
)
# Add any static parameters for the function to our params dict
param_dict.update(pivot_reg.func_static_params or {})
# Call the target function and collect the results
if pivot_reg.input_type == "value":
if not pivot_reg.can_iterate and len(input_df) > 1:
raise TypeError(
"The function does not support multiple input values.",
"Try again with a single row/value as input.",
"E.g. func(data=df.iloc[N], column=...)",
)
result_df = _iterate_func(
target_func, input_df, input_column, pivot_reg, **kwargs
)
else:
result_df = target_func(**param_dict, **kwargs) # type: ignore
merge_key = pivot_reg.func_out_column_name or input_column
# If requested to join to input
# and this function is returning a DataFrame
if join_type and not pivot_reg.return_raw_output:
return input_df.merge(
result_df,
left_on=input_column,
right_on=merge_key,
how=join_type,
)
return result_df
return pivot_lookup
def _get_entity_attr_or_self(obj, attrib):
"""Return entity attribute or obj if not an entity."""
if isinstance(obj, entities.Entity):
return getattr(obj, attrib)
return obj
def _get_input_value(
*args, pivot_reg: PivotRegistration, parent_kwargs: Dict[str, Any]
) -> Any:
"""Extract input value from args or kwargs."""
if args:
input_value = args[0]
else:
# Search possible input arg names
poss_args = [
arg
for arg in [
pivot_reg.func_df_param_name,
pivot_reg.func_input_value_arg,
"value",
"data",
"input",
]
if arg
]
for arg_name in poss_args:
input_value = parent_kwargs.pop(arg_name, None)
if input_value is not None:
break
else:
raise AttributeError(
"Required keyword argument not found.",
f"One of {', '.join(poss_args)} required.",
)
if isinstance(input_value, entities.Entity):
src_entity_attrib = pivot_reg.attr_for_entity(input_value)
input_value = _get_entity_attr_or_self(input_value, src_entity_attrib)
return input_value
def _check_valid_settings_for_input(input_value: Any, pivot_reg: PivotRegistration):
"""Check input against settings in `pivot_reg`."""
# Must have one of these specified
if not (pivot_reg.func_df_col_param_name or pivot_reg.func_input_value_arg):
raise ValueError(
"A value for one of 'func_df_col_param_name' ",
"or 'func_input_value_arg' must be given",
)
# If the function accepts only value type and cannot iterate. Make sure
# that the input_value is a simple value
if pivot_reg.input_type == "value":
if not pivot_reg.func_input_value_arg:
raise ValueError("No value for pivot func input argument was given")
if not pivot_reg.can_iterate and (
isinstance(input_value, pd.DataFrame)
or (
# pylint: disable=isinstance-second-argument-not-valid-type
isinstance(input_value, pd.DataFrame)
and not isinstance(input_value, str)
# pylint: enable=isinstance-second-argument-not-valid-type
)
):
raise ValueError(
f"This function does not accept inputs of {type(input_value)}"
)
def _arg_to_dframe(arg_val, col_name: str = "param_value"):
"""
Convert a scalar or Iterable value to a DataFrame.
Parameters
----------
arg_val: Any
The value to be converted
col_name: Optional[str]
The name to assign to the DataFrame column
Returns
-------
pd.DataFrame
The resulting DataFrame
Notes
-----
If `arg_val` is already a DataFrame it is returned as is.
"""
if isinstance(arg_val, pd.DataFrame):
return arg_val
if isinstance(arg_val, str) or not isinstance(arg_val, abc.Iterable):
return pd.DataFrame([arg_val], columns=[col_name])
return pd.DataFrame(arg_val, columns=[col_name])
def _create_input_df(input_value, pivot_reg, parent_kwargs):
"""Create input_df and params from input."""
# If input_value type is not already a dataframe, convert it.
# If the DF column param is specified, use that or fall back
# to using the function input value arg.
input_column = pivot_reg.func_df_col_param_name or pivot_reg.func_input_value_arg
# If input_value is already a DF, this call just returns the original DF
input_df = _arg_to_dframe(input_value, input_column) # type: ignore
if isinstance(input_value, pd.DataFrame):
# If the original input_value is a DataFrame
# try to find the column name specification in kwargs
for col_param in (
pivot_reg.func_df_col_param_name,
pivot_reg.func_input_value_arg,
*_DF_SRC_COL_PARAM_NAMES,
):
if col_param in parent_kwargs and parent_kwargs[col_param] in input_df:
input_column = parent_kwargs.pop(col_param)
break
else:
raise KeyError(
f"'{input_column}' is not in the input dataframe",
"Please specify the column when calling the function."
"You can use one of the parameter names for this:",
_DF_SRC_COL_PARAM_NAMES,
)
# we want to get rid of data=xyz parameters from kwargs, since we're adding them
# below
parent_kwargs.pop("data", None)
parent_kwargs.pop(pivot_reg.func_df_param_name, None)
if input_column not in input_df:
raise KeyError(f"'{input_column}' is not in the input dataframe")
if input_column:
param_dict = {
pivot_reg.func_df_param_name: input_df,
pivot_reg.func_df_col_param_name: input_column,
}
else:
# If no column was specified, the user will have to specify
# this in the call to the method - we just add the DF parameter
param_dict = {pivot_reg.func_df_param_name: input_df}
return input_df, input_column, param_dict
def _iterate_func(target_func, input_df, input_column, pivot_reg, **kwargs):
"""Call `target_func` function with values of each row in `input_df`."""
results = []
# Add any static parameters to all_rows_kwargs
all_rows_kwargs = kwargs.copy()
all_rows_kwargs.update((pivot_reg.func_static_params or {}))
res_key_col_name = pivot_reg.func_out_column_name or pivot_reg.func_input_value_arg
for row in input_df[[input_column]].itertuples(index=False):
# Get rid of any conflicting arguments from kwargs
func_kwargs = all_rows_kwargs.copy()
func_kwargs.pop(pivot_reg.func_input_value_arg, None)
# Create a param dictionary with the value parameter for this row
param_dict = {pivot_reg.func_input_value_arg: row[0]}
# run the function
result = target_func(**param_dict, **all_rows_kwargs)
# Process the output, if it is a DataFrame
if not pivot_reg.return_raw_output and not isinstance(result, pd.DataFrame):
col_value = next(iter(row._asdict().values()))
if isinstance(result, dict):
# if result is a dict - make that into a row.
result = pd.DataFrame(pd.Series(result)).T
result[res_key_col_name] = col_value
else:
# just make the result into a string and use that as a single col
result = pd.DataFrame(
[[col_value, str(result)]], columns=[res_key_col_name, "result"]
)
results.append(result)
if pivot_reg.return_raw_output:
if len(results) == 1:
return results[0]
return results
return pd.concat(results, ignore_index=True)
# _PARENT_SELF = "parent_self"
# def query_cont_member_wrap(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
# """
# Wrap a func to work as instance method in a QueryContainer.
# Parameters
# ----------
# func : Callable[[Any], Any]
# Function to wrap as method
# Returns
# -------
# Callable[[Any], Any]
# Wrapped function
# Notes
# -----
# This is designed to be used inside a `QueryContainer`. The wrapped
# function checks to see if its arg[0] is a QueryContainer - meaning
# it has been called as an instance function of that class.
# If so, and the parent class has a _parent_self attribute, it will
# replace the original arg[0] (the self of QueryContainer) with
# the self of the containing class (_parent_self).
# It relies containing class setting `_parent_self` as an attribute
# in any QueryContainer attributes that it has. The msticpy Entity
# class does this.
# If these conditions don't apply it simply passed through the call
# to the original function.
# See Also
# --------
# QueryContainer
# Entity
# """
# @wraps(func)
# def _wrapped_member(*args, **kwargs):
# if (
# args
# and args[0].__class__.__name__ == "QueryContainer"
# and hasattr(args[0], _PARENT_SELF)
# ):
# parent_self = getattr(args[0], _PARENT_SELF)
# return func(parent_self, *args[1:], **kwargs)
# return func(*args, **kwargs)
# return _wrapped_member
|
the-stack_0_4626 | from scipy.spatial import distance as dist
from imutils import face_utils
import threading
import cv2
import imutils
EYE_AR_THRESH = 0.2
EYE_AR_CONSEC_FRAMES = 48
class FatigueBackgroundWorker:
def __init__(self, vs, predictor, detector):
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.vs = vs
self.predictor = predictor
self.detector = detector
self.drowsinessDetected = False
def start(self):
self.thread.start()
def get_result(self):
return self.drowsinessDetected
def calculate_landmarks(self):
# Calculate face landmarks
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
return lStart, lEnd, rStart, rEnd
def eye_aspect_ratio(self, eye):
# Calculate euclidean distance between the two sets of vertical eye landmarks (x y coordinates)
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# Calculate euclidean distance between horizontal eye landmarks
C = dist.euclidean(eye[0], eye[3])
# Calculate and return eye aspect ratio (ear)
return (A + B) / (2.0 * C)
def calculate_ear(self, shape, lStart, lEnd, rStart, rEnd):
# Detect eye positions for ear calculation
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = self.eye_aspect_ratio(leftEye)
rightEAR = self.eye_aspect_ratio(rightEye)
# Calculate average EAR for both eyes
return (leftEAR + rightEAR) / 2
def run(self):
(lStart, lEnd, rStart, rEnd) = self.calculate_landmarks()
counter = 0
while True:
frame = self.vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.detector(gray, 0)
for face in faces:
shape = self.predictor(gray, face)
shape = face_utils.shape_to_np(shape)
ear = self.calculate_ear(shape, lStart, lEnd, rStart, rEnd)
# Check if EAR is lower than treshold value
if ear < EYE_AR_THRESH:
counter += 1
# If eyes were closed for a sufficient number of frames, drowsiness is detected
if counter >= EYE_AR_CONSEC_FRAMES:
self.drowsinessDetected = True
else:
self.drowsinessDetected = False
|
the-stack_0_4627 | """Some commonly used functions, like helpers"""
import lvgl as lv
import qrcode
import math
from micropython import const
import gc
from .components import QRCode, styles
PADDING = const(20)
BTN_HEIGHT = const(70)
HOR_RES = const(480)
VER_RES = const(800)
QR_PADDING = const(40)
def init_styles(dark=True):
if dark:
# Set theme
th = lv.theme_night_init(210, lv.font_roboto_22)
# adjusting theme
# background color
cbg = lv.color_hex(0x192432)
# ctxt = lv.color_hex(0x7f8fa4)
ctxt = lv.color_hex(0xffffff)
cbtnrel = lv.color_hex(0x506072)
cbtnpr = lv.color_hex(0x405062)
chl = lv.color_hex(0x313E50)
else:
# Set theme to light
# TODO: work in progress...
th = lv.theme_material_init(210, lv.font_roboto_22)
# adjusting theme
# background color
cbg = lv.color_hex(0xeeeeee)
# ctxt = lv.color_hex(0x7f8fa4)
ctxt = lv.color_hex(0)
cbtnrel = lv.color_hex(0x506072)
cbtnpr = lv.color_hex(0x405062)
chl = lv.color_hex(0x313E50)
th.style.label.sec.text.color = cbtnrel
th.style.scr.body.main_color = cbg
th.style.scr.body.grad_color = cbg
# text color
th.style.scr.text.color = ctxt
# buttons
# btn released
th.style.btn.rel.body.main_color = cbtnrel
th.style.btn.rel.body.grad_color = cbtnrel
th.style.btn.rel.body.shadow.width = 0
th.style.btn.rel.body.border.width = 0
th.style.btn.rel.body.radius = 10
# btn pressed
lv.style_copy(th.style.btn.pr, th.style.btn.rel)
th.style.btn.pr.body.main_color = cbtnpr
th.style.btn.pr.body.grad_color = cbtnpr
# button map released
th.style.btnm.btn.rel.body.main_color = cbg
th.style.btnm.btn.rel.body.grad_color = cbg
th.style.btnm.btn.rel.body.radius = 0
th.style.btnm.btn.rel.body.border.width = 0
th.style.btnm.btn.rel.body.shadow.width = 0
th.style.btnm.btn.rel.text.color = ctxt
# button map pressed
lv.style_copy(th.style.btnm.btn.pr, th.style.btnm.btn.rel)
th.style.btnm.btn.pr.body.main_color = chl
th.style.btnm.btn.pr.body.grad_color = chl
# button map inactive
lv.style_copy(th.style.btnm.btn.ina, th.style.btnm.btn.rel)
th.style.btnm.btn.ina.text.opa = 80
# button map background
th.style.btnm.bg.body.opa = 0
th.style.btnm.bg.body.border.width = 0
th.style.btnm.bg.body.shadow.width = 0
# textarea
th.style.ta.oneline.body.opa = 0
th.style.ta.oneline.body.border.width = 0
th.style.ta.oneline.text.font = lv.font_roboto_28
th.style.ta.oneline.text.color = ctxt
# slider
th.style.slider.knob.body.main_color = cbtnrel
th.style.slider.knob.body.grad_color = cbtnrel
th.style.slider.knob.body.radius = 5
th.style.slider.knob.body.border.width = 0
# page
th.style.page.bg.body.opa = 0
th.style.page.scrl.body.opa = 0
th.style.page.bg.body.border.width = 0
th.style.page.bg.body.padding.left = 0
th.style.page.bg.body.padding.right = 0
th.style.page.bg.body.padding.top = 0
th.style.page.bg.body.padding.bottom = 0
th.style.page.scrl.body.border.width = 0
th.style.page.scrl.body.padding.left = 0
th.style.page.scrl.body.padding.right = 0
th.style.page.scrl.body.padding.top = 0
th.style.page.scrl.body.padding.bottom = 0
lv.theme_set_current(th)
styles["theme"] = th
# Title style - just a default style with larger font
styles["title"] = lv.style_t()
lv.style_copy(styles["title"], th.style.label.prim)
styles["title"].text.font = lv.font_roboto_28
styles["title"].text.color = ctxt
styles["hint"] = lv.style_t()
lv.style_copy(styles["hint"], th.style.label.sec)
styles["hint"].text.font = lv.font_roboto_16
def add_label(text, y=PADDING, scr=None, style=None, width=None):
"""Helper functions that creates a title-styled label"""
if width is None:
width = HOR_RES-2*PADDING
if scr is None:
scr = lv.scr_act()
lbl = lv.label(scr)
lbl.set_text(text)
if style in styles:
lbl.set_style(0, styles[style])
lbl.set_long_mode(lv.label.LONG.BREAK)
lbl.set_width(width)
lbl.set_x((HOR_RES-width)//2)
lbl.set_align(lv.label.ALIGN.CENTER)
lbl.set_y(y)
return lbl
def add_button(text=None, callback=None, scr=None, y=700):
"""Helper function that creates a button with a text label"""
if scr is None:
scr = lv.scr_act()
btn = lv.btn(scr)
btn.set_width(HOR_RES-2*PADDING)
btn.set_height(BTN_HEIGHT)
if text is not None:
lbl = lv.label(btn)
lbl.set_text(text)
lbl.set_align(lv.label.ALIGN.CENTER)
btn.align(scr, lv.ALIGN.IN_TOP_MID, 0, 0)
btn.set_y(y)
if callback is not None:
btn.set_event_cb(callback)
return btn
def add_button_pair(text1, callback1, text2, callback2, scr=None, y=700):
"""Helper function that creates a button with a text label"""
btn1 = add_button(text1, callback1, scr=scr, y=y)
btn2 = add_button(text2, callback2, scr=scr, y=y)
align_button_pair(btn1, btn2)
return btn1, btn2
def align_button_pair(btn1, btn2):
"""Aligns two buttons in a row"""
w = (HOR_RES-3*PADDING)//2
btn1.set_width(w)
btn2.set_width(w)
btn2.set_x(HOR_RES//2+PADDING//2)
def add_qrcode(text, y=QR_PADDING, scr=None, style=None, width=None):
"""Helper functions that creates a title-styled label"""
if scr is None:
scr = lv.scr_act()
if width is None:
width = 350
qr = QRCode(scr)
qr.set_text(text)
qr.set_size(width)
qr.set_text(text)
qr.align(scr, lv.ALIGN.IN_TOP_MID, 0, y)
return qr
def separate(addr, letters=6, separator=" "):
extra = ""
if len(addr) % letters > 0:
extra = " "*(letters-(len(addr) % letters))
return separator.join([
addr[i:i+letters]
for i in range(0, len(addr), letters)
])+extra
def format_addr(addr, letters=6, words=3):
return separate(
separate(
addr, letters=letters, separator=" "
),
letters=(words*(letters+1)), separator="\n"
)
|
the-stack_0_4628 | from functools import lru_cache, singledispatch
from typing import Any, Callable, List, Tuple, Union
import attr
@attr.s
class _DispatchNotFound:
"""A dummy object to help signify a dispatch not found."""
pass
class MultiStrategyDispatch:
"""
MultiStrategyDispatch uses a combination of exact-match dispatch,
singledispatch, and FunctionDispatch.
"""
__slots__ = (
"_direct_dispatch",
"_function_dispatch",
"_single_dispatch",
"_generators",
"dispatch",
)
def __init__(self, fallback_func):
self._direct_dispatch = {}
self._function_dispatch = FunctionDispatch()
self._function_dispatch.register(lambda _: True, fallback_func)
self._single_dispatch = singledispatch(_DispatchNotFound)
self.dispatch = lru_cache(maxsize=None)(self._dispatch)
def _dispatch(self, cl):
try:
dispatch = self._single_dispatch.dispatch(cl)
if dispatch is not _DispatchNotFound:
return dispatch
except Exception:
pass
direct_dispatch = self._direct_dispatch.get(cl)
if direct_dispatch is not None:
return direct_dispatch
return self._function_dispatch.dispatch(cl)
def register_cls_list(self, cls_and_handler, direct: bool = False):
"""register a class to direct or singledispatch"""
for cls, handler in cls_and_handler:
if direct:
self._direct_dispatch[cls] = handler
else:
self._single_dispatch.register(cls, handler)
self.clear_direct()
self.dispatch.cache_clear()
def register_func_list(
self,
func_and_handler: List[
Union[
Tuple[Callable[[Any], bool], Any],
Tuple[Callable[[Any], bool], Any, bool],
]
],
):
"""register a function to determine if the handle
should be used for the type
"""
for tup in func_and_handler:
if len(tup) == 2:
func, handler = tup
self._function_dispatch.register(func, handler)
else:
func, handler, is_gen = tup
self._function_dispatch.register(
func, handler, is_generator=is_gen
)
self.clear_direct()
self.dispatch.cache_clear()
def clear_direct(self):
"""Clear the direct dispatch."""
self._direct_dispatch.clear()
class FunctionDispatch:
"""
FunctionDispatch is similar to functools.singledispatch, but
instead dispatches based on functions that take the type of the
first argument in the method, and return True or False.
objects that help determine dispatch should be instantiated objects.
"""
__slots__ = ("_handler_pairs",)
def __init__(self):
self._handler_pairs = []
def register(
self, can_handle: Callable[[Any], bool], func, is_generator=False
):
self._handler_pairs.insert(0, (can_handle, func, is_generator))
def dispatch(self, typ):
"""
returns the appropriate handler, for the object passed.
"""
for can_handle, handler, is_generator in self._handler_pairs:
# can handle could raise an exception here
# such as issubclass being called on an instance.
# it's easier to just ignore that case.
try:
ch = can_handle(typ)
except Exception:
continue
if ch:
if is_generator:
return handler(typ)
else:
return handler
raise KeyError("unable to find handler for {0}".format(typ))
|
the-stack_0_4629 | #!/usr/bin/env python3
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Try to identify a running sshd process in two ways: (1) look for a process
# with the name "sshd" or "in.sshd"; (2) look for a process listening on port
# 22. If neither of those is found, then assume that sshd is stopped.
#
# Separately, check the sshd configuration for password-based logins by
# invoking sshd with the "-T" argument to get its effective configuration. If
# we were able to identify a running sshd process as described above, then use
# that process to determine the path to the sshd binary and/or the non-default
# configuration file name. Otherwise, just do "sshd -T", hoping that it'll be
# in our search path.
import json
import psutil
import re
import subprocess
sshd_name_re = re.compile(r'\bsshd\b|\bin\.sshd\b')
results = {}
try:
sshd_process = None
for p in psutil.process_iter():
try:
if sshd_name_re.search(p.exe()) or \
any(c for c in p.connections('tcp')
if c.laddr[1] == 22 and not len(c.raddr)):
sshd_process = p.as_dict(attrs=('exe', 'cmdline'))
break
except (FileNotFoundError, psutil.NoSuchProcess):
continue
else:
raise StopIteration()
except StopIteration:
sshd_process = None
results['status'] = 'stopped'
sshd_config_command = ['sshd', '-T']
else:
results['status'] = 'running'
sshd_config_command = [sshd_process['exe'], '-T']
sshd_cmdline = sshd_process['cmdline']
try:
sshd_config_file = sshd_cmdline[sshd_cmdline.index('-f') + 1]
sshd_config_command.extend(['-f', sshd_config_file])
except:
pass
try:
sshd_config = subprocess.check_output(
sshd_config_command, stderr=open('/dev/null', 'w')).decode('utf8')
except FileNotFoundError:
if not sshd_process:
results['status'] = 'missing'
sshd_config = ''
except:
sshd_config = ''
results['config'] = {}
if sshd_config:
for config in sshd_config.strip().split('\n'):
key, value = config.split(' ', 1)
results['config'][key] = value
print(json.dumps(results))
|
the-stack_0_4630 | #!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
# pylint: disable=unbalanced-tuple-unpacking
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
import yaml # pytype: disable=pyi-error
from mininet.log import error
from mininet.util import pmonitor
from clib import mininet_test_base
from clib import mininet_test_util
from clib import mininet_test_topo
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
@staticmethod
def log_message(_format, *_args):
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
EVENT_SOCK_HEARTBEAT = '5'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = {'CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS', 'EVENT_SOCK_HEARTBEAT'}
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
required_events -= set(event.keys())
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self._enable_event_log()
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_events_log(self.event_log)
class Faucet8021XBaseTest(FaucetTest):
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def _init_faucet_config(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super(Faucet8021XBaseTest, self)._init_faucet_config()
def setUp(self):
super(Faucet8021XBaseTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self._enable_event_log(300)
def tearDown(self, ignore_oferrors=False):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super(Faucet8021XBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
dot1x_params = {'dp_id': int(self.dpid)}
for key, val in dot1x_event[top_level_key].items():
if key == 'port':
dot1x_params[key] = self.port_map[val]
elif key == 'eth_src':
dot1x_params[key] = replace_mac(val)
dot1x_event[top_level_key].update(dot1x_params)
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=15, tcpdump_packets=10,
expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success != success:
return False
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n'+listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBaseTest):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request. should then be 2 success.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False,
tcpdump_timeout=180, tcpdump_packets=6))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=80, vflags='-vvv', packets=10)
for req_str in (
'len 5, Request (1)', # assume that this is the identity request
'Identity: user', # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBaseTest):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
last_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for _ in range(4):
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total > last_total:
break
time.sleep(1)
self.assertGreater(total, last_total, msg='failed to successfully re-auth')
last_total = total
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBaseTest):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super(Faucet8021XMABTest, self).start_freeradius()
@staticmethod
def dhclient_callback(host, timeout):
dhclient_cmd = 'dhclient -d -1 %s' % host.defaultIntf()
return host.cmd(mininet_test_util.timeout_cmd(dhclient_cmd, timeout), verbose=True)
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.dhclient_callback(self.eapol1_host, 10)
self.wait_until_matching_lines_from_file(r'.*AAA_SUCCESS.*', self.env['faucet']['FAUCET_LOG'])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
self.wait_until_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFlood(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
# Name of switch interface connected to last host, accessible to controller.
last_host_switch_intf = None
def _init_faucet_config(self):
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
self.last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
# Now that interface is known, FAUCET config can be written to include it.
super(FaucetUntaggedControllerNfvTest, self)._init_faucet_config()
def test_untagged(self):
super(FaucetUntaggedControllerNfvTest, self).test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % self.last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super(FaucetUntaggedBroadcastTest, self).test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetExperimentalAPITest(FaucetUntaggedTest):
"""Test the experimental Faucet API."""
CONTROLLER_CLASS = mininet_test_topo.FaucetExperimentalAPI
results_file = None
def _set_static_vars(self):
super(FaucetExperimentalAPITest, self)._set_static_vars()
self._set_var_path('faucet', 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env['faucet']['API_TEST_RESULT']
def test_untagged(self):
self.wait_until_matching_lines_from_file(r'.*pass.*', self.results_file)
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
meter_stats:
dps: ['%s']
type: 'meter_stats'
interval: 5
db: 'meter_file'
meter_stats_prom:
dps: ['%s']
type: 'meter_stats'
db: 'prometheus'
interval: 5
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME, self.DP_NAME)
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_meter_stats_file):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
meter_file:
type: 'text'
file: %s
%s
""" % (faucet_config_file, self.get_gauge_watcher_config(),
monitor_stats_file, monitor_state_file, monitor_meter_stats_file,
self.GAUGE_CONFIG_DBS)
def _init_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_meter_stats_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config))
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
# TODO: userspace DP port status not reliable.
self.ping_all_when_learned()
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super(FaucetUntaggedApplyMeterTest, self).test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd(
'ping -c 1000 -f %s' % second_host.IP()))
# Require meter band bytes to match.
self.wait_until_matching_lines_from_file(
r'.+faucet-1-1-byte-band-count.+[1-9].+',
self.monitor_meter_stats_file)
meter_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'meter_id': 1
}
byte_band_count = self.scrape_prometheus_var(
'of_meter_byte_band_count', labels=meter_labels, controller='gauge')
self.assertTrue(byte_band_count)
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(
controller='faucet', var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller='gauge', retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller='gauge')
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller='gauge')
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self): # pylint: disable=invalid-name
self.handler = InfluxPostHandler
super(FaucetUntaggedInfluxTest, self).setUp()
self.setup_influx()
def tearDown(self, ignore_oferrors=False): # pylint: disable=invalid-name
if self.server:
self.server.shutdown()
self.server.socket.close()
super(FaucetUntaggedInfluxTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
gauge_log_name = self.env['gauge']['GAUGE_LOG']
self.wait_until_matching_lines_from_file(
r'.+error shipping.+', gauge_log_name, timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'bytes_in', 'flow_byte_count', 'port_state_reason',
'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log) as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self): # pylint: disable=invalid-name
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedSameVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.pingAll()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
ping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, mac_intf, first_host.IP()),
2)
second_host.cmd(ping_cmd)
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test that hosts learned and reported in Prometheus, time out."""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 25
arp_neighbor_timeout: 12
nd_neighbor_timeout: 12
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
mac_ipv4s = [mac_ipv4 for mac_ipv4, _ in mac_ips]
fping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c%u %s' % (
self.FPING_ARGS_SHORT, int(self.TIMEOUT / 3), ' '.join(mac_ipv4s)),
self.TIMEOUT / 2)
for _ in range(3):
fping_out = first_host.cmd(fping_cmd)
self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd)
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
mac_ips.append((mac_ipv4, macvlan_mac))
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetIPv4TupleTest, self).setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
open(self.acl_config_file, 'w').write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = [host_ip for host_ip in itertools.islice(
self.NET_BASE.hosts(), self.MAX_RULES)]
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self): # pylint: disable=invalid-name
super(FaucetConfigReloadTestBase, self).setUp()
self.ACL_COOKIE = random.randint(1, 2**16-1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=True)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=True)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': '100',
'tagged_vlans': ['200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': ['100', '200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.pingAll()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'10.0.0.0/24 next-hop 10.0.0.254',
'10.0.1.0/24 next-hop 10.0.0.1',
'10.0.2.0/24 next-hop 10.0.0.2',
'10.0.2.0/24 next-hop 10.0.0.2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.retry_net_ping(hosts=(first_host, second_host))
self.swap_host_macs(first_host, second_host)
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.retry_net_ping(hosts=(first_host, second_host))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetCoprocessorTest(FaucetUntaggedTest):
N_UNTAGGED = 3
N_TAGGED = 1
CONFIG = """
interfaces:
%(port_1)d:
coprocessor: {strategy: vlan_vid}
mirror: %(port_4)d
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Inject packet into pipeline using coprocessor.
coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
tcpdump_filter = ' and '.join((
'ether dst %s' % first_host.MAC(),
'ether src %s' % coprocessor_host.MAC(),
'icmp'))
cmds = [
lambda: coprocessor_host.cmd(
'arp -s %s %s' % (first_host.IP(), first_host.MAC())),
lambda: coprocessor_host.cmd(
'fping %s -c3 %s' % (self.FPING_ARGS_SHORT, first_host.IP())),
]
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, cmds, timeout=5, vflags='-vv', packets=1)
self.assertFalse(self.tcpdump_rx_packets(tcpdump_txt, packets=0))
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedLoopTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT)
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
%(port_2)d:
native_vlan: 100
lacp: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedIPv4LACPTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
""".strip() % tuple([self.port_map['port_%u' % i] for i in lag_ports])
lacp_timeout = 5
def prom_lacp_up_ports():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0)
lacp_up_ports += 1 if lacp_state == 3 else 0
return lacp_up_ports
def require_lag_up_ports(expected_up_ports):
for _ in range(lacp_timeout*10):
if prom_lacp_up_ports() == expected_up_ports:
break
time.sleep(1)
self.assertEqual(prom_lacp_up_ports(), expected_up_ports)
def require_linux_bond_up():
for _retries in range(lacp_timeout*2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
# Start with ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
# All ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
# Pick a random port to come up.
up_port = random.choice(lag_ports)
self.set_port_up(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
# We have connectivity with only one port.
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
# We have connectivity with two ports.
require_lag_up_ports(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
# We have connectivity if that random port goes down.
self.set_port_down(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i*2+i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
log_file = os.path.join(self.tmpdir, 'faucet.log')
self.wait_until_matching_lines_from_file(r'.+actor system mismatch.+', log_file)
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
# Try 64 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
# Try 128 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4, size=128)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
# Try 64 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
# Try 128 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6, size=128)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedDiffVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
def fping(self, macvlan_int, ipg):
return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def configure_mesh(self, first_host, second_host):
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.run_ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.run_ip('link set dev %s up' % vlan_int),
self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.run_ip('link set dev %s up' % macvlan_int),
self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.run_ip('route add default via %s table %u' % (ipg.ip, vid)),
self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
for ipa in (ipg.ip, ipd.ip):
setup_commands.append(self.fping(macvlan_int, ipa))
self.quiet_commands(host, setup_commands)
return required_ipds, ipd_to_macvlan
def verify_drop_rules(self, required_ipds, ipd_to_macvlan):
for _ in range(10):
if not required_ipds:
break
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
def verify_routing_performance(self, first_host, second_host):
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
def verify_l3_mesh(self, first_host, second_host):
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
def verify_l3_hairpin(self, first_host):
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.run_ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.run_ip('link set %s up' % macvlan2_int),
self.run_ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
required_ipds, ipd_to_macvlan = self.configure_mesh(first_host, second_host)
self.verify_drop_rules(required_ipds, ipd_to_macvlan)
self.verify_routing_performance(first_host, second_host)
self.verify_l3_mesh(first_host, second_host)
self.verify_l3_hairpin(first_host)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 103))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super(FaucetTaggedBroadcastTest, self).test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
self._enable_event_log()
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
self.wait_until_matching_lines_from_file(
r'.+L3_LEARN.+10.0.0.[12].+', self.event_log)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
# Delete port 2
self.change_port_config(
self.port_map['port_2'], None, None,
restart=False, cold_start=False)
# Add port 3
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlanb'},
restart=True, cold_start=True)
third_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(third_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(third_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(third_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
log_file = os.path.join(self.tmpdir, 'faucet.log')
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_file(expired_re, log_file)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'fc00::1:0/112 next-hop fc00::1:254',
'fc00::10:0/112 next-hop fc00::1:1',
'fc00::20:0/112 next-hop fc00::1:2',
'fc00::30:0/112 next-hop fc00::1:2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
class FaucetUntaggedRestBcastIPv6RouteTest(FaucetUntaggedIPv6RouteTest):
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetStringOfDPTest(FaucetTest):
MAX_HOSTS = 4
NUM_HOSTS = 4
LINKS_PER_HOST = 1
VID = 100
CONFIG = None
GROUP_TABLE = False
dpids = None
topo = None
def non_host_links(self, dpid):
return self.topo.dpid_peer_links(dpid)
@staticmethod
def get_config_header(_config_global, _debug_log, _dpid, _hardware):
"""Don't generate standard config file header."""
return ''
@staticmethod
def acls():
return {}
@staticmethod
def acl_in_dp():
return {}
def build_net(self, stack=False, n_dps=1,
n_tagged=0, tagged_vid=100,
untagged_hosts=None,
include=None, include_optional=None,
switch_to_switch_links=1, hw_dpid=None,
stack_ring=False, lacp=False, use_external=False,
router=None, dp_options=None):
"""Set up Mininet and Faucet for the given topology."""
if include is None:
include = []
if include_optional is None:
include_optional = []
self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]
self.dpids[0] = self.dpid
self.topo = mininet_test_topo.FaucetStringOfDPSwitchTopo(
self.OVS_TYPE,
self.ports_sock,
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
untagged_hosts=untagged_hosts,
links_per_host=self.LINKS_PER_HOST,
switch_to_switch_links=switch_to_switch_links,
test_name=self._test_name(),
hw_dpid=hw_dpid, switch_map=self.switch_map,
stack_ring=stack_ring,
port_order=self.port_order
)
self.port_maps = {dpid: self.create_port_map(dpid) for dpid in self.dpids}
self.port_map = self.port_maps[self.dpid]
self.CONFIG = self.get_config(
self.dpids,
hw_dpid,
stack,
self.hardware,
self.debug_log_path,
n_tagged,
tagged_vid,
untagged_hosts,
include,
include_optional,
self.acls(),
self.acl_in_dp(),
lacp,
use_external,
router,
dp_options
)
def get_config(self, dpids=None, hw_dpid=None, stack=False, hardware=None, ofchannel_log=None,
n_tagged=0, tagged_vid=0, untagged_hosts=None,
include=None, include_optional=None, acls=None, acl_in_dp=None,
lacp=False, use_external=False, router=None, dp_options=None):
"""Build a complete Faucet configuration for each datapath, using the given topology."""
if dpids is None:
dpids = []
if include is None:
include = []
if include_optional is None:
include_optional = []
if acls is None:
acls = {}
if acl_in_dp is None:
acl_in_dp = {}
dpid_names = {}
dpname_to_dpkey = {}
def dp_name(i):
return 'faucet-%i' % (i + 1)
def add_vlans(n_tagged, tagged_vid, untagged_hosts, router):
vlans_config = {}
if untagged_hosts:
for vid in untagged_hosts.keys():
vlans_config[vid] = {
'description': 'untagged',
}
if ((n_tagged and not untagged_hosts) or
(n_tagged and untagged_hosts and tagged_vid not in untagged_hosts)):
vlans_config[tagged_vid] = {
'description': 'tagged',
}
if router:
for vid in router.keys():
if vid in vlans_config:
if 'faucet_mac' in router[vid]:
vlans_config[vid]['faucet_mac'] = router[vid]['faucet_mac']
if 'faucet_vips' in router[vid]:
vlans_config[vid]['faucet_vips'] = router[vid]['faucet_vips']
return vlans_config
def add_router(router):
router_config = {}
if router:
router_config['router-1'] = {
'vlans': list(router.keys()),
}
return router_config
def add_acl_to_port(name, port, interfaces_config):
if name in acl_in_dp and port in acl_in_dp[name]:
interfaces_config[port]['acl_in'] = acl_in_dp[name][port]
def add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts):
for link in self.topo.dpid_peer_links(dpid):
port, peer_dpid, peer_port = link.port, link.peer_dpid, link.peer_port
interfaces_config[port] = {}
if stack:
# make this a stacking link.
interfaces_config[port].update(
{
'stack': {
'dp': dpid_names[peer_dpid],
'port': peer_port}
})
else:
# not a stack - make this a trunk.
tagged_vlans = []
if n_tagged:
tagged_vlans.append(tagged_vid)
if untagged_hosts:
for vid in untagged_hosts.keys():
if vid not in tagged_vlans:
tagged_vlans.append(vid)
if tagged_vlans:
interfaces_config[port]['tagged_vlans'] = tagged_vlans
if lacp:
interfaces_config[port].update(
{'lacp': 1, 'lacp_active': True})
add_acl_to_port(name, port, interfaces_config)
# TODO: make per test configurable
dp_config['lacp_timeout'] = 10
# TODO: make the stacking root configurable
first_dp = dpid == self.dpid
if stack and first_dp:
dp_config['stack'] = {
'priority': 1
}
def add_dp(name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options):
dp_config = {
'dp_id': int(dpid),
'hardware': hardware if dpid == hw_dpid else 'Open vSwitch',
'ofchannel_log': ofchannel_log + str(i) if ofchannel_log else None,
'interfaces': {},
'group_table': self.GROUP_TABLE,
}
interfaces_config = {}
index = 1
for n_port in range(n_tagged):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'tagged_vlans': [tagged_vid],
'loop_protect_external': (use_external and n_port != n_tagged - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
if untagged_hosts:
n_port = 0
for vid, num_hosts in untagged_hosts.items():
for _ in range(num_hosts):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'native_vlan': vid,
'loop_protect_external': (use_external and n_port != num_hosts - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
n_port += 1
add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts)
for portno, config in list(interfaces_config.items()):
stack = config.get('stack', None)
if stack and 'stack' in interfaces_config[portno]:
peer_portno = stack['port']
interfaces_config[portno]['stack'].update({
'port': 'b%u' % peer_portno})
dp_config['interfaces'] = interfaces_config
if dp_options:
for key, value in dp_options.items():
dp_config[key] = value
return dp_config
# Create config
config = {'version': 2}
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
config['vlans'] = add_vlans(n_tagged, tagged_vid, untagged_hosts, router)
if router:
config['routers'] = add_router(router)
config['acls'] = acls.copy()
config['dps'] = {}
for i, dpid in enumerate(dpids):
dpid_names[dpid] = name = dp_name(i)
dpname_to_dpkey[name] = dpid
self.set_dpid_names(dpid_names)
for i, dpid in enumerate(dpids):
name = dpid_names[dpid]
config['dps'][name] = add_dp(
name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options)
config_text = yaml.dump(config, default_flow_style=False)
return config_text
def verify_no_cable_errors(self):
i = 0
for dpid in self.dpids:
i += 1
labels = {'dp_id': '0x%x' % int(dpid), 'dp_name': 'faucet-%u' % i}
self.assertEqual(
0, self.scrape_prometheus_var(
var='stack_cabling_errors_total', labels=labels, default=None))
self.assertGreater(
self.scrape_prometheus_var(
var='stack_probes_received_total', labels=labels), 0)
def verify_stack_hosts(self, verify_bridge_local_rule=True, retries=3):
lldp_cap_files = []
for host in self.hosts_name_ordered():
lldp_cap_file = os.path.join(self.tmpdir, '%s-lldp.cap' % host)
lldp_cap_files.append(lldp_cap_file)
host.cmd(mininet_test_util.timeout_cmd(
'tcpdump -U -n -c 1 -i %s -w %s ether proto 0x88CC and not ether src %s &' % (
host.defaultIntf(), host.MAC(), lldp_cap_file), 60))
# should not flood LLDP from hosts
self.verify_lldp_blocked(self.hosts_name_ordered())
# hosts should see no LLDP probes
self.verify_empty_caps(lldp_cap_files)
if verify_bridge_local_rule:
# Verify 802.1x flood block triggered.
for dpid in self.dpids:
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
dpid=dpid, table_id=self._FLOOD_TABLE, ofa_match=False)
self.retry_net_ping(retries=retries)
def stack_port_status(self, dpid, dp_name, port_no):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
return self.scrape_prometheus_var(
'port_stack_state', labels=labels,
default=None, dpid=False)
def wait_for_stack_port_status(self, dpid, dp_name, port_no, status, timeout=25):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_stack_state', status, labels=labels,
default=None, dpid=False, timeout=timeout):
self.fail('did not get expected dpid %x port %u port_stack_state %u' % (
int(dpid), port_no, status))
def one_stack_port_down(self, dpid, dp_name, port):
self.set_port_down(port, dpid, wait=False)
self.wait_for_stack_port_status(dpid, dp_name, port, 4)
def one_stack_port_up(self, dpid, dp_name, port):
self.set_port_up(port, dpid, wait=False)
self.wait_for_stack_port_status(dpid, dp_name, port, 3)
def verify_stack_up(self, prop=1.0, timeout=25):
for _ in range(timeout):
links = 0
links_up = 0
for i, dpid in enumerate(self.dpids, start=1):
dp_name = 'faucet-%u' % i
for link in self.non_host_links(dpid):
status = self.stack_port_status(dpid, dp_name, link.port)
links += 1
if status == 3: # up
links_up += 1
prop_up = links_up / links
if prop_up >= prop:
return
time.sleep(1)
self.fail('not enough links up: %f / %f' % (links_up, links))
def verify_one_stack_down(self, stack_offset_port, coldstart=False):
self.retry_net_ping()
stack_port = self.non_host_links(self.dpid)[stack_offset_port].port
remote_stack_port = self.non_host_links(self.dpid)[stack_offset_port].peer_port
self.set_port_down(stack_port, wait=False)
# self.dpids[1] is the intermediate switch.
self.set_port_down(remote_stack_port, self.dpids[1], wait=False)
# test case where one link is down when coldstarted.
if coldstart:
self.coldstart_conf()
self.verify_stack_up(prop=0.75)
self.verify_stack_hosts(verify_bridge_local_rule=False)
# Broadcast works, and first switch doesn't see broadcast packet ins from stack.
packet_in_before_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.verify_broadcast()
packet_in_after_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.assertEqual(
packet_in_before_broadcast,
packet_in_after_broadcast)
# TODO: re-enable.
# self.verify_no_cable_errors()
def verify_no_arp_storm(self, ping_host, tcpdump_host):
num_arp_expected = self.topo.switch_to_switch_links * 2
tcpdump_filter = 'arp and ether src %s' % ping_host.MAC()
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: ping_host.cmd('arp -d %s' % tcpdump_host.IP()),
lambda: ping_host.cmd(' '.join((self.FPINGS_ARGS_ONE, tcpdump_host.IP())))],
packets=(num_arp_expected+1))
num_arp_received = len(re.findall(
'who-has %s tell %s' % (tcpdump_host.IP(), ping_host.IP()), tcpdump_txt))
self.assertTrue(num_arp_received)
self.assertLessEqual(num_arp_received, num_arp_expected)
def verify_stack_has_no_loop(self):
for ping_host, tcpdump_host in (
(self.hosts_name_ordered()[0], self.hosts_name_ordered()[-1]),
(self.hosts_name_ordered()[-1], self.hosts_name_ordered()[0])):
self.verify_no_arp_storm(ping_host, tcpdump_host)
def verify_all_stack_hosts(self):
for _ in range(2):
self.verify_stack_up()
self.verify_no_cable_errors()
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
self.verify_unicast_not_looped()
self.verify_no_bcast_to_self()
self.verify_stack_has_no_loop()
self.flap_all_switch_ports()
def verify_tunnel_established(self, src_host, dst_host, other_host, packets=3):
"""Verify ICMP packets tunnelled from src to dst."""
icmp_match = {'eth_type': IPV4_ETH, 'ip_proto': 1}
self.wait_until_matching_flow(icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
tcpdump_text = self.tcpdump_helper(
dst_host, 'icmp[icmptype] == 8', [
# need to set static ARP as only ICMP is tunnelled.
lambda: src_host.cmd('arp -s %s %s' % (other_host.IP(), other_host.MAC())),
lambda: src_host.cmd('ping -c%u -t1 %s' % (packets, other_host.IP()))
],
packets=1, timeout=(packets + 1),
)
self.wait_nonzero_packet_count_flow(
icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
self.assertTrue(re.search(
'%s: ICMP echo request' % other_host.IP(), tcpdump_text
), 'Tunnel was not established')
def verify_one_broadcast(self, from_host, to_hosts):
self.assertGreater(len(to_hosts), 1, 'Testing only one ext host is not useful')
received_broadcasts = []
for to_host in to_hosts:
if self.verify_broadcast(hosts=(from_host, to_host), broadcast_expected=None):
received_broadcasts.append(to_host)
received_names = {host.name: host for host in received_broadcasts}
self.assertEqual(len(received_broadcasts), 1,
'Received not exactly one broadcast from %s: %s' %
(from_host.name, received_names))
def map_int_ext_hosts(self):
conf = self._get_faucet_conf()
host_name_map = {host.name: host for host in self.hosts_name_ordered()}
int_hosts = set()
ext_hosts = set()
dp_hosts = {}
for dp_name, dp_conf in conf['dps'].items():
dpid = int(dp_conf['dp_id'])
dp_int_hosts = set()
dp_ext_hosts = set()
for port, p_conf in dp_conf['interfaces'].items():
if 'stack' not in p_conf:
host = host_name_map[self.net.topo.dpid_port_host[dpid][port]]
if p_conf.get('loop_protect_external', False):
dp_ext_hosts.add(host)
else:
dp_int_hosts.add(host)
dp_hosts[dp_name] = (dp_int_hosts, dp_ext_hosts)
int_hosts.update(dp_int_hosts)
ext_hosts.update(dp_ext_hosts)
return int_hosts, ext_hosts, dp_hosts
def verify_protected_connectivity(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
self.verify_broadcast(hosts=(int_host, other_int_host), broadcast_expected=True)
self.one_ipv4_ping(int_host, other_int_host.IP())
# All internal hosts can reach exactly one external host.
self.verify_one_broadcast(int_host, ext_hosts)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other.
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(hosts=(ext_host, other_ext_host), broadcast_expected=False)
# All external hosts can reach internal hosts.
for int_host in int_hosts:
self.verify_broadcast(hosts=(ext_host, int_host), broadcast_expected=True)
self.one_ipv4_ping(ext_host, int_host.IP())
def set_externals_state(self, dp_name, externals_up):
"""Set the port up/down state of all external ports on a switch"""
dp_conf = self._get_faucet_conf()['dps'][dp_name]
for port_num, port_conf in dp_conf['interfaces'].items():
if port_conf.get('loop_protect_external'):
if externals_up:
self.set_port_up(port_num, dp_conf.get('dp_id'))
else:
self.set_port_down(port_num, dp_conf.get('dp_id'))
def validate_with_externals_down(self, dp_name):
"""Check situation when all externals on a given dp are down"""
self.set_externals_state(dp_name, False)
self.verify_protected_connectivity()
self.set_externals_state(dp_name, True)
def validate_with_externals_down_fails(self, dp_name):
"""Faucet code is not currently correct, so expect to fail."""
# TODO: Fix faucet so the test inversion is no longer required.
asserted = False
try:
self.validate_with_externals_down(dp_name)
except AssertionError:
asserted = True
self.assertTrue(asserted, 'Did not fail as expected for %s' % dp_name)
class FaucetSingleUntaggedIPV4RoutingWithStackingTest(FaucetStringOfDPTest):
"""IPV4 intervlan routing with stacking test"""
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
SWITCH_TO_SWITCH_LINKS = 1
NUM_DPS = 4
V100 = 100
V200 = 200
V300 = 300
V100_NUM_HOSTS = 1
V200_NUM_HOSTS = 1
V300_NUM_HOSTS = 0
FAUCET_MAC2 = '0e:00:00:00:00:02'
@staticmethod
def get_dp_options():
return {
'drop_spoofed_faucet_mac': False,
'arp_neighbor_timeout': 2,
'max_resolve_backoff_time': 2,
'proactive_learn_v4': True
}
def setUp(self):
pass
def set_up(self):
super(FaucetSingleUntaggedIPV4RoutingWithStackingTest, self).setUp()
router_info = {
self.V100: {
'faucet_mac': self.FAUCET_MAC,
'faucet_vips': [self.get_faucet_vip(1)],
'targeted_gw_resolution': False,
},
self.V200: {
'faucet_mac': self.FAUCET_MAC2,
'faucet_vips': [self.get_faucet_vip(2)],
'targeted_gw_resolution': False,
}
}
untagged_hosts = {self.V100: self.V100_NUM_HOSTS,
self.V200: self.V200_NUM_HOSTS,
self.V300: self.V300_NUM_HOSTS}
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts=untagged_hosts,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
router=router_info,
dp_options=self.get_dp_options()
)
self.start_net()
@staticmethod
def get_faucet_mac(vindex):
"""Get the faucet MAC"""
return '0e:00:00:00:00:0%u' % vindex
def get_faucet_vip(self, vindex):
"""Get the IPV4 faucet vip"""
return '10.%u00.0.254/%u' % (vindex, self.NETPREFIX)
def get_ip(self, host_n, vindex):
"""Get the IPV4 host ip"""
return '10.%u00.0.%u/%u' % (vindex, host_n, self.NETPREFIX)
def host_ping(self, src_host, dst_ip):
"""ping host"""
self.one_ipv4_ping(src_host, dst_ip, require_host_learned=False, retries=5)
def set_host_ip(self, host, host_ip):
"""Set the host ip"""
host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)
def verify_intervlan_routing(self):
"""Setup host routes and verify intervlan routing is possible"""
num_hosts = self.V100_NUM_HOSTS + self.V200_NUM_HOSTS + self.V300_NUM_HOSTS
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 1))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 0]
v200_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 2))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 1]
for host_tuple in v100_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for host_tuple in v200_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for v100_host_tuple in v100_hosts:
v100_host, v100_host_ip = v100_host_tuple
for v200_host_tuple in v200_hosts:
v200_host, v200_host_ip = v200_host_tuple
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
for src_host_tuple in v100_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v100_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
for src_host_tuple in v200_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v200_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
def test_intervlan_routing_2stack(self):
"""Verify intervlan routing works with 2 DPs in a stack"""
self.NUM_DPS = 2
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_3stack(self):
"""Verify intervlan routing works with 3 DPs in a stack"""
self.NUM_DPS = 3
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_4stack(self):
"""Verify intervlan routing works with 4 DPs in a stack"""
self.NUM_DPS = 4
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_path_no_vlans(self):
"""Test when a DP in the path of a intervlan route contains no routed VLANs"""
self.NUM_DPS = 3
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[5]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove all hosts on the middle DP by chaning them to hosts on VLAN300
# the middle DP now contains no hosts with VLAN 100 or VLAN 200
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
def test_dp_one_vlan_from_router(self):
"""Test when a DP has only one of the routed VLANs"""
self.NUM_DPS = 2
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[3]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove host on VLAN100 by changing it to a host on VLAN300, there is now only
# one host on the DP that is being routed (200)
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
if port_dict['native_vlan'] == self.V100:
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
class FaucetSingleUntaggedIPV6RoutingWithStackingTest(FaucetSingleUntaggedIPV4RoutingWithStackingTest):
"""IPV6 intervlan routing with stacking tests"""
IPV = 6
NETPREFIX = 64
ETH_TYPE = IPV6_ETH
def get_dp_options(self):
return {
'drop_spoofed_faucet_mac': False,
'nd_neighbor_timeout': 2,
'max_resolve_backoff_time': 1,
'proactive_learn_v6': True
}
def host_ping(self, src_host, dst_ip):
self.one_ipv6_ping(src_host, dst_ip, require_host_learned=False)
def set_host_ip(self, host, host_ip):
self.add_host_ipv6_address(host, host_ip)
def get_faucet_vip(self, vindex):
"""Get the IPV6 faucet vip"""
return 'fc0%u::1:254/112' % vindex
def get_ip(self, host_n, vindex):
"""Get the IPV6 host ip"""
return 'fc0%u::1:%u/64' % (vindex, host_n)
class FaucetStringOfDPUntaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPUntaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, untagged_hosts={self.VID: self.NUM_HOSTS})
self.start_net()
def test_untagged(self):
"""All untagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
class FaucetStringOfDPTaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPTaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts(verify_bridge_local_rule=False)
self.verify_traveling_dhcp_mac()
class FaucetSingleStackStringOfDPTagged0Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged0Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
"""All tagged hosts in stack topology can reach each other."""
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(0, coldstart)
class FaucetSingleStackStringOfDPTagged1Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged1Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(1, coldstart)
class FaucetStringOfDPLACPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of LACP-connected datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
match_bcast = {'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'}
action_str = 'OUTPUT:%u'
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPLACPUntaggedTest, self).setUp()
self.build_net(
stack=False,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
lacp=True)
self.start_net()
def lacp_ports(self):
first_link, second_link = sorted(self.non_host_links(self.dpid))
first_lacp_port, second_lacp_port = first_link.port, second_link.port
remote_first_lacp_port, remote_second_lacp_port = first_link.peer_port, second_link.peer_port
return (first_lacp_port, second_lacp_port,
remote_first_lacp_port, remote_second_lacp_port)
def wait_for_lacp_state(self, port_no, wanted_state, dpid, dp_name, timeout=30):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_lacp_state', wanted_state,
labels=labels, dpid=False, timeout=timeout):
self.fail('wanted LACP state for %s to be %u' % (labels, wanted_state))
def wait_for_lacp_port_init(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 1, dpid, dp_name)
def wait_for_lacp_port_up(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 3, dpid, dp_name)
def wait_for_lacp_port_noact(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 5, dpid, dp_name)
# We sort non_host_links by port because FAUCET sorts its ports
# and only floods out of the first active LACP port in that list
def wait_for_all_lacp_up(self):
(first_lacp_port, second_lacp_port, remote_first_lacp_port, _) = self.lacp_ports()
self.wait_for_lacp_port_up(first_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_up(second_lacp_port, self.dpid, self.DP_NAME)
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % first_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % remote_first_lacp_port],
dpid=self.dpids[1])
def test_lacp_port_down(self):
"""LACP works with any member down."""
(first_lacp_port, second_lacp_port,
remote_first_lacp_port, remote_second_lacp_port) = self.lacp_ports()
local_ports = {first_lacp_port, second_lacp_port}
remote_ports = {remote_first_lacp_port, remote_second_lacp_port}
self.wait_for_all_lacp_up()
self.retry_net_ping()
for local_lacp_port, remote_lacp_port in (
(first_lacp_port, remote_first_lacp_port),
(second_lacp_port, remote_second_lacp_port)):
other_local_lacp_port = list(local_ports - {local_lacp_port})[0]
other_remote_lacp_port = list(remote_ports - {remote_lacp_port})[0]
self.set_port_down(local_lacp_port, wait=False)
self.wait_for_lacp_port_init(
local_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_init(
remote_lacp_port, self.dpids[1], 'faucet-2')
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[
self.action_str % other_local_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[
self.action_str % other_remote_lacp_port],
dpid=self.dpids[1])
self.retry_net_ping()
self.set_port_up(local_lacp_port)
self.wait_for_all_lacp_up()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other, LAG_CHANGE event emitted."""
self._enable_event_log()
for _ in range(3):
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
self.flap_all_switch_ports()
# Check for presence of LAG_CHANGE event in event socket log
self.wait_until_matching_lines_from_file(r'.+LAG_CHANGE.+', self.event_log)
def test_dyn_fail(self):
"""Test lacp fail on reload with dynamic lacp status."""
conf = self._get_faucet_conf()
(src_port, dst_port, fail_port, _) = self.lacp_ports()
self.wait_for_lacp_port_up(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
interfaces_conf = conf['dps']['faucet-2']['interfaces']
interfaces_conf[fail_port]['lacp'] = 0
interfaces_conf[fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_init(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
def test_passthrough(self):
"""Test lacp passthrough on port fail."""
conf = self._get_faucet_conf()
(src_port, dst_port, fail_port, end_port) = self.lacp_ports()
interfaces_conf = conf['dps']['faucet-1']['interfaces']
interfaces_conf[dst_port]['lacp_passthrough'] = [src_port]
interfaces_conf[dst_port]['loop_protect_external'] = True
interfaces_conf[dst_port]['lacp'] = 2
interfaces_conf[src_port]['loop_protect_external'] = True
interfaces_conf = conf['dps']['faucet-2']['interfaces']
interfaces_conf[fail_port]['loop_protect_external'] = True
interfaces_conf[end_port]['loop_protect_external'] = True
interfaces_conf[end_port]['lacp'] = 2
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
interfaces_conf[fail_port]['lacp'] = 0
interfaces_conf[fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_init(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_init(end_port, self.dpids[1], 'faucet-2')
class FaucetStackStringOfDPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackStringOfDPUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
self.verify_stack_hosts()
class FaucetSingleStackStringOfDPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
"""Host can reach each other, unless both marked loop_protect_external"""
for host in self.hosts_name_ordered():
self.require_host_learned(host)
# Part 1: Make sure things are connected properly.
self.verify_protected_connectivity() # Before reload
# Part 2: Test the code on pipeline reconfiguration path.
conf = self._get_faucet_conf()
loop_interface = None
for interface, interface_conf in conf['dps']['faucet-2']['interfaces'].items():
if 'stack' in interface_conf:
continue
if not interface_conf.get('loop_protect_external', False):
loop_interface = interface
break
self._mark_external(loop_interface, True)
self._mark_external(loop_interface, False)
# Part 3: Make sure things are the same after reload.
self.verify_protected_connectivity() # After reload
def _mark_external(self, loop_interface, protect_external):
conf = self._get_faucet_conf()
conf['dps']['faucet-2']['interfaces'][loop_interface]['loop_protect_external'] = protect_external
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
def test_missing_ext(self):
"""Test stacked dp with all external ports down on a switch"""
self.validate_with_externals_down_fails('faucet-1')
self.validate_with_externals_down_fails('faucet-2')
class FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
_, root_ext_hosts = dp_hosts[self.DP_NAME]
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
self.verify_broadcast(
hosts=(int_host, other_int_host), broadcast_expected=True)
self.verify_unicast(
hosts=(int_host, other_int_host), unicast_expected=True)
# All internal hosts should reach exactly one external host.
self.verify_one_broadcast(int_host, ext_hosts)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(
hosts=(ext_host, other_ext_host), broadcast_expected=False)
remote_ext_hosts = ext_hosts - set(root_ext_hosts)
# int host should never be broadcast to an ext host that is not on the root.
for local_int_hosts, _ in dp_hosts.values():
for local_int_host in local_int_hosts:
for remote_ext_host in remote_ext_hosts:
self.verify_broadcast(
hosts=(local_int_host, remote_ext_host), broadcast_expected=False)
class FaucetGroupStackStringOfDPUntaggedTest(FaucetStackStringOfDPUntaggedTest):
"""Test topology of stacked datapaths with untagged hosts."""
GROUP_TABLE = True
class FaucetStackRingOfDPTest(FaucetStringOfDPTest):
NUM_DPS = 3
SOFTWARE_ONLY = True
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackRingOfDPTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=1,
stack_ring=True)
self.start_net()
def test_untagged(self):
"""Stack loop prevention works and hosts can ping each other, STACK_TOPO_CHANGE event emitted."""
self._enable_event_log()
self.verify_stack_up()
self.verify_stack_has_no_loop()
self.retry_net_ping()
self.verify_traveling_dhcp_mac()
# Move through each DP breaking either side of the ring
for dpid_i in range(self.NUM_DPS):
dpid = self.dpids[dpid_i]
dp_name = 'faucet-%u' % (dpid_i + 1)
for link in self.non_host_links(dpid):
port = link.port
self.one_stack_port_down(dpid, dp_name, port)
self.retry_net_ping()
self.one_stack_port_up(dpid, dp_name, port)
# Check for presence of STACK_TOPO_CHANGE event in event socket log
self.wait_until_matching_lines_from_file(r'.+STACK_TOPO_CHANGE.+', self.event_log)
class FaucetSingleStack4RingOfDPTest(FaucetStackRingOfDPTest):
NUM_DPS = 4
class FaucetSingleStackAclControlTest(FaucetStringOfDPTest):
"""Test ACL control of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def acls(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.2',
'actions': {
'output': {
'port': map1['port_2']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [
map1['port_2'],
map1['port_4']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map1['port_4']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
2: [
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map2['port_5']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
3: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.7',
'actions': {
'output': {
'port': map3['port_1']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [map3['port_1']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
'faucet-1': {
# Port 1, acl_in = 1
map1['port_1']: 1,
},
'faucet-2': {
# Port 4, acl_in = 2
map2['port_4']: 2,
},
'faucet-3': {
# Port 4, acl_in = 3
map3['port_4']: 3,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackAclControlTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
)
self.start_net()
def test_unicast(self):
"""Hosts in stack topology can appropriately reach each other over unicast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[1], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[3], table_id=None)
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[6], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[7], table_id=None)
self.verify_no_cable_errors()
def test_broadcast(self):
"""Hosts in stack topology can appropriately reach each other over broadcast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[1])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[3])
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[6])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[7])
self.verify_no_cable_errors()
class FaucetStringOfDPACLOverrideTest(FaucetStringOfDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
# ACL rules which will get overridden.
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
@staticmethod
def acls_override():
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port, acl_in = 1
port_1: 1,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
missing_config = os.path.join(self.tmpdir, 'missing_config.yaml')
self.build_net(
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
include_optional=[self.acls_config, missing_config],
)
self.start_net()
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_no_cable_errors()
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_no_cable_errors()
class FaucetTunnelSameDpTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-1',
'port': 'b%(port_2)d'}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelSameDpTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, dst_host, other_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
class FaucetTunnelTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
dpid2 = self.dpids[1]
port2_1 = self.port_maps[dpid2]['port_1']
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-2',
'port': port2_1}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self,):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
def test_tunnel_path_rerouted(self):
"""Test a tunnel path is rerouted when a stack is down."""
self.verify_stack_up()
first_stack_port = self.non_host_links(self.dpid)[0].port
self.one_stack_port_down(self.dpid, self.DP_NAME, first_stack_port)
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
self.set_port_up(first_stack_port, self.dpid)
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
log_file = self.env['faucet']['FAUCET_LOG']
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_file(host_log_re, log_file)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16*'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
faucet_log = self.env['faucet']['FAUCET_LOG']
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_file(
r'.*ERROR.*unknown datapath', faucet_log, timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16*'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1 << 63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def _init_faucet_config(self): # pylint: disable=invalid-name
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetSingleUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetSingleUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
the-stack_0_4632 | """
molssi_math.py
A sample repository for the MolSSI Workshop at UF.
Some math functions.
"""
def mean(num_list):
"""
Calculate the mean/average of a list of numbers.
Parameters
-----------
num_list : list
The list to take the average of
Returns
-----------
mean_list : float
The mean of the list
"""
# Check that input is type list
if not isinstance(num_list, list):
raise TypeError('Invalid input %s - must be type list' %(num_list))
# Check that list is not empty
if num_list == []:
raise ValueError('Cannot calculate the mean of an empty list.')
try:
mean_list = sum(num_list) / len(num_list)
except TypeError:
raise TypeError('Cannot calculate mean of list - all list elements must be numeric')
return mean_list
def factorial(n):
"""
Calculate a factorial
Parameters
-----------
n : int
The factorial parameter.
Returns
-------
factorial : int
The requested factorial
"""
fact = 1
for i in range(1, n+1):
fact = fact * i
return fact
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from Henry David Thoreau"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
|
the-stack_0_4633 | from re import L
import discord
from discord.ext import commands
from discord.ext.commands import bot
from settings import constants
owners = constants.owners
admins = constants.admins
def is_owner(ctx):
""" Checks if the author is one of the owners """
return ctx.author.id in owners
def is_admin(ctx):
if (
ctx.author.id in ctx.bot.constants.admins
or ctx.author.id in ctx.bot.constants.owners
):
return True
return
async def check_permissions(ctx, perms, *, check=all):
""" Checks if author has permissions to a permission """
if ctx.author.id in owners:
return True
resolved = ctx.author.guild_permissions
guild_perm_checker = check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
if guild_perm_checker is False:
# Try to see if the user has channel permissions that override
resolved = ctx.channel.permissions_for(ctx.author)
return check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
async def check_bot_permissions(ctx, perms, *, check=all):
""" Checks if author has permissions to a permission """
if ctx.guild:
resolved = ctx.guild.me.guild_permissions
guild_perm_checker = check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
if guild_perm_checker is False:
# Try to see if the user has channel permissions that override
resolved = ctx.channel.permissions_for(ctx.guild.me)
return check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
else:
return True
def has_perms(*, check=all, **perms): # Decorator to check if a user has perms
async def pred(ctx):
result = await check_permissions(ctx, perms, check=check)
perm_list = [
x.title().replace("_", " ").replace("Tts", "TTS").replace("Guild", "Server")
for x in perms
]
if result is False:
raise commands.BadArgument(
message=f"You are missing the following permission{'' if len(perm_list) == 1 else 's'}: `{', '.join(perm_list)}`"
)
return result
return commands.check(pred)
def bot_has_perms(*, check=all, **perms): # Decorator to check if the bot has perms
async def pred(ctx):
result = await check_bot_permissions(ctx, perms, check=check)
if (
result is False
): # We know its a guild because permissions failed in check_bot_permissions()
guild_perms = [x[0] for x in ctx.guild.me.guild_permissions if x[1] is True]
channel_perms = [
x[0] for x in ctx.channel.permissions_for(ctx.guild.me) if x[1] is True
]
botperms = guild_perms + channel_perms
perms_needed = []
for x in perms:
if not x in botperms: # Only complain about the perms we don't have
perms_needed.append(x)
perm_list = [
x.title().replace("_", " ").replace("Tts", "TTS") for x in perms_needed
]
raise commands.BadArgument(
message=f"I require the following permission{'' if len(perm_list) == 1 else 's'}: `{', '.join(perm_list)}`"
)
return result
return commands.check(pred)
def is_bot_admin(): # Decorator for bot admin commands
async def pred(ctx):
return is_admin(ctx)
return commands.check(pred)
async def check_priv(ctx, member):
"""
Handle permission hierarchy for commands
Return the reason for failure.
"""
try:
# Self checks
if member == ctx.author:
return f"You cannot {ctx.command.name} yourself."
if member.id == ctx.bot.user.id:
return f"I cannot {ctx.command.name} myself."
# Bot lacks permissions
if member.id == ctx.guild.owner.id:
return f"I cannot {ctx.command.name} the server owner."
if ctx.guild.me.top_role.position == member.top_role.position:
return f"I cannot {ctx.command.name} a user with equal permissions."
if ctx.guild.me.top_role.position < member.top_role.position:
return f"I cannot {ctx.command.name} a user with superior permissions."
if member.id in owners:
return f"I cannot {ctx.command.name} my creator."
# Check if user bypasses
if ctx.author.id == ctx.guild.owner.id:
return
if ctx.author.id in owners:
return
# Now permission check
if ctx.author.top_role.position == member.top_role.position:
return f"You cannot {ctx.command.name} a user with equal permissions."
if ctx.author.top_role.position < member.top_role.position:
return f"You cannot {ctx.command.name} a user with superior permissions."
except Exception as e:
print(e)
pass
async def checker(ctx, value):
if type(value) is list:
for x in value:
result = await check_priv(ctx, member=x)
if type(value) is not list:
result = await check_priv(ctx, member=value)
return result
def can_handle(ctx, permission: str):
""" Checks if bot has permissions or is in DMs right now """
return isinstance(ctx.channel, discord.DMChannel) or getattr(
ctx.channel.permissions_for(ctx.guild.me), permission
)
|
the-stack_0_4634 | import matplotlib as mlt
# mlt.use('TkAgg')
# mlt.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import pykep as pk
from utils import *
from conversions import *
class plotting(object):
"""
Provide visualization for hodographicShaping trajectories
Samples trajectory at initialization
Methods to plot various characteristics (3D trajectory, hodograph, etc.)
"""
def __init__(self, trajectory, samples=100, folder='graveyeard', save=False,
ephemSource = 'jpl'):
'''
Create plotting object
trajectory needs to be of type 'hodographicShaping'
ephemSource needs to correspond to the one used for trajectory (due to
planet names)
'''
print('\nBegin plotting.')
print('Sampling at', samples, 'points.')
self.samples = samples
self.trajectory = trajectory
self.folder = folder
self.save = save
self.ephemSource = ephemSource
if self.save==True:
checkFolder(self.folder)
# sample planets and trajectory
self.plPosCart, self.plPosCyl, self.plVelCart, self.plVelCyl = \
self.samplePlanets(trajectory, samples=samples)
self.traPosCart, self.traPosCyl = \
self.sampleTrajectoryPosition(trajectory, samples=samples)
self.traVelCart, self.traVelCyl = \
self.sampleTrajectoryVelocity(trajectory, samples=samples)
self.traAccCyl = \
self.sampleTrajectoryAcceleration(trajectory, samples=samples)
def trajectory3D(self, save=None, folder=None, scaling=True):
"""
Plot the given trajectory in 3D
"""
print('Plot 3D trajectory')
# start figure
fig = newFigure(height=6.4)
ax = fig.gca(projection='3d')
# Sun
ax.scatter([0], [0], [0], s=100, color='yellow', label='Sun', marker='o', edgecolor='orange',)
# Departure planet
ax.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, self.plPosCart['zDep']/pk.AU, label='Departure planet', c='C0')
ax.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, self.plPosCart['zDep'][0]/pk.AU, c='k')
ax.scatter(self.plPosCart['xDep'][-1]/pk.AU, self.plPosCart['yDep'][-1]/pk.AU, self.plPosCart['zDep'][-1]/pk.AU, c='k')
# Arrival planet
ax.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, self.plPosCart['zArr']/pk.AU, label='Arrival planet', c='C3')
ax.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, self.plPosCart['zArr'][0]/pk.AU, c='k')
ax.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, self.plPosCart['zArr'][-1]/pk.AU, c='k')
# Trajectory
ax.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, self.traPosCart['z']/pk.AU, label='Trajectory', c='C1')
ax.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, self.traPosCart['z'][0]/pk.AU, label='launch', c='C2')
ax.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, self.traPosCart['z'][-1]/pk.AU, label='arrival', c='C3')
# formatting
if scaling:
axisEqual3D(ax)
# plt.title('Orbits and trajectory')
ax.set_xlabel('x [AU]', labelpad=15)
ax.set_ylabel('y [AU]', labelpad=15)
ax.set_zlabel('z [AU]', labelpad=15)
plt.grid()
plt.legend()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory3D.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory3D.png'), dpi=300)
plt.show()
def trajectory2D(self, save=None, folder=None, quiver=False):
'''
Two dimensional plot in the ecliptic plane
'''
fig = newFigure(height=6.4)
# Sun
sun = plt.scatter([0], [0], s=100, color='yellow', label='Sun', marker='o', edgecolor='orange')
# arrival planet
plot1 = plt.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='k', zorder=2)
# departure planet
plot1 = plt.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, label='Departure Planet', color='C0', zorder=1)
plot1 = plt.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='C3', label='arrival', zorder=2)
# trajectory
plot1 = plt.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, label='Trajectory', color='C1', zorder=1)
plot0 = plt.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, color='k', zorder=2)
plt.xlabel('$x$ [AU]')
plt.ylabel('$y$ [AU]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.axis('equal')
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory2D.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory2D.png'), dpi=300)
plt.show()
def trajectory3Danimation(self, save=None, folder=None):
"""
Animation of the flown trajectory
"""
print('Show animated trajectory.')
import matplotlib.animation as animation
# data = np.array([x, y])
data = np.vstack((self.traPosCart['x'],
self.traPosCart['y'],
self.traPosCart['z']))
dataDep = np.vstack((self.plPosCart['xDep'],
self.plPosCart['yDep'],
self.plPosCart['zDep']))
dataArr = np.vstack((self.plPosCart['xArr'],
self.plPosCart['yArr'],
self.plPosCart['zArr']))
data /= pk.AU
dataDep /= pk.AU
dataArr /= pk.AU
# create figure
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# start with an empty plot
line0, = plt.plot([], [], [], "C1-", zorder=3)
dot0, = plt.plot([], [], [], "C1o", zorder=3)
dot1, = plt.plot([], [], [], "C0o", zorder=3)
dot2, = plt.plot([], [], [], "C3o", zorder=3)
# Sun
sun = ax.scatter([0], [0], [0], s=100, color='yellow', label='Sun',
marker='o', edgecolor='orange')
# Departure planet
planet1 = ax.plot(self.plPosCart['xDep']/pk.AU,
self.plPosCart['yDep']/pk.AU,
self.plPosCart['zDep']/pk.AU,
label='Departure planet', c='C0')
# Arrival planet
planet2 = ax.plot(self.plPosCart['xArr']/pk.AU,
self.plPosCart['yArr']/pk.AU,
self.plPosCart['zArr']/pk.AU,
label='Arrival planet', c='C3')
# formatting
ax.set_xlabel('x [AU]', labelpad=15)
ax.set_ylabel('y [AU]', labelpad=15)
ax.set_zlabel('z [AU]', labelpad=15)
# ax.set_zlim(-0.05, 0.05)
axisEqual3D(ax)
plt.grid(True)
# plt.title("Low-thrust trajectory")
# this function will be called at every iteration
def update_line(num, data, line, dot0, dot1, dot2):
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
dot0.set_data(data[0:2, num])
dot0.set_3d_properties(data[2, num])
dot1.set_data(dataDep[0:2, num])
dot1.set_3d_properties(dataDep[2, num])
dot2.set_data(dataArr[0:2, num])
dot2.set_3d_properties(dataArr[2, num])
return line,
nFrame = int(len(self.traPosCart['x']))
line_ani = animation.FuncAnimation(fig, update_line, frames=nFrame,
fargs=(data, line0, dot0, dot1, dot2),
interval=20, repeat_delay=1e3)
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=60, metadata=dict(artist='Leon S'),
bitrate=1800)
line_ani.save(os.path.join(os.getcwd(), folder, 'trajectory3D.mp4'),
writer=writer)
# line_ani.save(os.path.join(os.getcwd(), folder,
# 'trajectory3D.mp4'), fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
def hodograph(self, twoDplot=False, save=None, folder=None):
"""
Plot the trajectory's hodograph
Plot the given trajectory in 2D as subplot if twoDplot is set to True
"""
print('Plot hodograph')
# Hoodgraph and orbits
if twoDplot:
figHodoOrbit = newFigure(height=7)
else:
figHodoOrbit = newFigure(height=3)
# Hodograph
if twoDplot:
plt.subplot(2, 1, 1)
#departure planet
plot1 = plt.plot(self.plVelCyl['VrDep']/1E3, self.plVelCyl['VtDep']/1E3, label='Departure Planet', color='C0', zorder=1)
plot0 = plt.scatter(self.plVelCyl['VrDep'][-1]/1E3, self.plVelCyl['VtDep'][-1]/1E3, color='k', zorder=2)
#arrival planet
plot1 = plt.plot(self.plVelCyl['VrArr']/1E3, self.plVelCyl['VtArr']/1E3, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plVelCyl['VrArr'][0]/1E3, self.plVelCyl['VtArr'][0]/1E3, color='k', zorder=2)
# trajectory
plot1 = plt.plot(self.traVelCyl['vr']/1E3, self.traVelCyl['vt']/1E3, label='Trajectory', color='C1', zorder=1)
plot1 = plt.scatter(self.traVelCyl['vr'][0]/1E3, self.traVelCyl['vt'][0]/1E3, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.traVelCyl['vr'][-1]/1E3, self.traVelCyl['vt'][-1]/1E3, color='C3', label='arrival', zorder=2)
plt.xlabel('$V_r$ [km/s]')
plt.ylabel('$V_t$ [km/s]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.axis('equal')
# Positions
if twoDplot:
plt.title('Hodograph')
plt.subplot(2, 1, 2)
# trajectory
plot1 = plt.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, label='Trajectory', color='C1', zorder=1)
plot0 = plt.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, color='k', zorder=2)
# arrival planet
plot1 = plt.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='k', zorder=2)
# departure planet
plot1 = plt.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, label='Departure Planet', color='C0', zorder=1)
plot1 = plt.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='C3', label='arrival', zorder=2)
plt.xlabel('$x$ [AU]')
plt.ylabel('$y$ [AU]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.title('Orbit')
plt.axis('equal')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'hodograph.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'hodograph.png'), dpi=300)
plt.show()
def thrust(self, save=None, folder=None):
"""
Plot the thrust profile in Cylindrical coordinates
"""
print('Plot thrust')
fig = newFigure(height=3)
samplePoints = self.tSampleSec
# Cylindrical accelerations
plot1 = plt.plot(self.tSample, self.trajectory.fr(samplePoints), ':', label=r'$f_r$')
plot1 = plt.plot(self.tSample, self.trajectory.ft(samplePoints), '--', label=r'$f_\theta$')
plot1 = plt.plot(self.tSample, self.trajectory.fz(samplePoints), '-.', label=r'$f_z$')
plot1 = plt.plot(self.tSample, self.trajectory.fTotal(samplePoints), '-', label=r'$f_{\mathrm{total}}$', alpha=0.5)
plt.grid()
plt.xlabel('time [mjd2000]')
plt.ylabel(r'$f$ $[m/s^2]$')
plt.xlim([self.tSample[0], self.tSample[-1]])
# plt.ylim([-0.0004, 0.0005])
plt.title('Thrust acceleration')
plt.legend()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'thrust.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'thrust.png'), dpi=300)
plt.show()
def figure119(self, save=None, folder=None):
"""
Plot the thrust profile next to the 3D trajectory
Recreates Figure 11.9 in [Gondelach, 2012]
"""
print('Plot trajectory and thrust, recreating Figure 11.9')
samplePoints = self.tSampleSec
# initialize figure
fig = plt.figure(figsize=(10, 4))
gs = mlt.gridspec.GridSpec(1, 2, width_ratios=[3, 2])
ax0 = plt.subplot(gs[0], projection='3d')
# plot 3D trajectory
ax0.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, self.plPosCart['zDep']/pk.AU, label='Earth', c='b')
ax0.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, self.plPosCart['zArr']/pk.AU, label='Mars', c='k')
ax0.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, self.traPosCart['z']/pk.AU, label='Transfer', c='r')
# axis formatting
ax0.set_xlim(-2, 2)
ax0.set_xticks([-2, -1, 0, 1, 2])
ax0.set_ylim(-2, 2)
ax0.set_yticks([-2, 0, 2])
ax0.set_zlim(-0.06, 0.05)
ax0.view_init(30, -95)
ax0.xaxis.pane.fill = False
ax0.yaxis.pane.fill = False
ax0.zaxis.pane.fill = False
ax0.grid(False)
ax0.set_xlabel('x [AU]')
ax0.set_ylabel('y [AU]')
ax0.set_zlabel('z [AU]', labelpad=10)
ax0.tick_params(axis='z', pad=8)
# plt.legend()
# plot thrust profile
ax1 = plt.subplot(gs[1])
tDays = np.linspace(0, self.trajectory.tof, self.samples)
ax1.plot(tDays, self.trajectory.fr(samplePoints), '-b', label='Radial')
ax1.plot(tDays, self.trajectory.ft(samplePoints), '-r', label='Normal')
ax1.plot(tDays, self.trajectory.fz(samplePoints), '-g', label='Axial')
ax1.plot(tDays, self.trajectory.fTotal(samplePoints), '--k', label='Total')
ax1.set_xlabel('Time [days]')
ax1.set_xticks([0, 200, 400, 600, 800, 1000, 1200])
ax1.set_ylabel('Thrust acceleration [m/s^2]')
ax1.set_ylim([-5e-5, 20e-5])
ax1.set_xlim(left=tDays[0])
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-5,-5))
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.legend()
fig.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, '119.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, '119.png'), dpi=300)
plt.show()
def stateVectorsAll(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot position and velocity (cylindrical and cartesian)')
fig = plt.figure(figsize=(12, 15))
# Cartesian velocities
nPlots = 6
plt.subplot(nPlots, 2, 1)
plot1 = plt.plot(self.tSample, self.traVelCart['vx'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_x$ [m/s]')
plt.title('Cartesian Velocities')
plt.subplot(nPlots, 2, 3)
plot1 = plt.plot(self.tSample, self.traVelCart['vy'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_y$ [m/s]')
plt.subplot(nPlots, 2, 5)
plot1 = plt.plot(self.tSample, self.traVelCart['vz'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_z$ [m/s]')
# Cylindrical velocities
plt.subplot(nPlots, 2, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 2, 4)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 2, 6)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
# Cartesian positions
plt.subplot(nPlots, 2, 7)
plot1 = plt.plot(self.tSample, self.traPosCart['x']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$x$ [AU]')
plt.title('Cartesian Positions')
plt.subplot(nPlots, 2, 9)
plot1 = plt.plot(self.tSample, self.traPosCart['y']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$y$ [AU]')
plt.subplot(nPlots, 2, 11)
plot1 = plt.plot(self.tSample, self.traPosCart['z']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$z$ [AU]')
# Cylindrical positions
plt.subplot(nPlots, 2, 8)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 2, 10)
plot1 = plt.plot(self.tSample, self.traPosCyl['t']*180/np.pi, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 2, 12)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'state.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'state.png'), dpi=300)
plt.show()
def stateVectorsCylindrical(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cylindrical state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cylindrical positions
plt.subplot(nPlots, 3, 1)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 3, 4)
plot1 = plt.plot(self.tSample, self.traPosCyl['t']*180/np.pi, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 3, 7)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cylindrical velocities
plt.subplot(nPlots, 3, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 3, 5)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 3, 8)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
def stateVectorsCylindricalInclPlanets(self, save=None, folder=None):
"""
Plot the spacecraft's and planets' state vectors over time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cylindrical state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cylindrical positions
plt.subplot(nPlots, 3, 1)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['rDep']/pk.AU, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['rArr']/pk.AU, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 3, 4)
tsaw = self.traPosCyl['t']*180/np.pi
for i in range(0, 6):
tsaw[tsaw > 180] = tsaw[tsaw > 180] - 360 # make saw pattern
plot1 = plt.plot(self.tSample, tsaw, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['tDep']*180/np.pi, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['tArr']*180/np.pi, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 3, 7)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['zDep']/pk.AU, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['zArr']/pk.AU, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cylindrical velocities
plt.subplot(nPlots, 3, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VrDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VrArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 3, 5)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VtDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VtArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 3, 8)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VzDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VzArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
# Cylindrical accelerations
plt.subplot(nPlots, 3, 3)
plot1 = plt.plot(self.tSample, self.traAccCyl['ar'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_r$ [m/s^2]')
plt.title('Cylindrical Accelerations')
plt.subplot(nPlots, 3, 6)
plot1 = plt.plot(self.tSample, self.traAccCyl['at'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_\theta$ [m/s^2]')
plt.subplot(nPlots, 3, 9)
plot1 = plt.plot(self.tSample, self.traAccCyl['az'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_z$ [m/s^2]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCylindricalInclPlanets.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCylindricalInclPlanets.png'), dpi=300)
plt.show()
def stateVectorsCartesian(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cartesian state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cartesian positions
plt.subplot(nPlots, 2, 1)
plot1 = plt.plot(self.tSample, self.traPosCart['x']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$x$ [AU]')
plt.title('Cartesian Positions')
plt.subplot(nPlots, 2, 3)
plot1 = plt.plot(self.tSample, self.traPosCart['y']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$y$ [deg]')
plt.subplot(nPlots, 2, 5)
plot1 = plt.plot(self.tSample, self.traPosCart['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cartesian velocities
plt.subplot(nPlots, 2, 2)
plot1 = plt.plot(self.tSample, self.traVelCart['vx'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
plt.title('Cartesian Velocities')
plt.subplot(nPlots, 2, 4)
plot1 = plt.plot(self.tSample, self.traVelCart['vy'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_y$ [m/s]')
plt.subplot(nPlots, 2, 6)
plot1 = plt.plot(self.tSample, self.traVelCart['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCartesian.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCartesian.png'), dpi=300)
plt.show()
def samplePlanets(self, trajectory, samples=100):
"""
Return a dictionary with sampled position vectors of the departure and
arrival planets of the given trajectory
"""
# define planets
if self.ephemSource == 'jpl':
planetDep = pk.planet.jpl_lp(trajectory.departureBody)
planetArr = pk.planet.jpl_lp(trajectory.arrivalBody)
elif self.ephemSource == 'spice':
planetDep = pk.planet.spice(trajectory.departureBody, 'sun', 'eclipj2000')
planetArr = pk.planet.spice(trajectory.arrivalBody, 'sun', 'eclipj2000')
else:
print('ERROR: This is not a valid source of ephemerides.')
# time variable [days]
self.tSample = np.linspace(self.trajectory.jdDep, self.trajectory.jdArr, samples)
tSample = self.tSample
# init planet velocity vectors
tof = self.trajectory.tof
VrDep = np.linspace(0, tof, samples)
VtDep = np.linspace(0, tof, samples)
VzDep = np.linspace(0, tof, samples)
VrArr = np.linspace(0, tof, samples)
VtArr = np.linspace(0, tof, samples)
VzArr = np.linspace(0, tof, samples)
VxDep = np.linspace(0, tof, samples)
VyDep = np.linspace(0, tof, samples)
VzDep = np.linspace(0, tof, samples)
VxArr = np.linspace(0, tof, samples)
VyArr = np.linspace(0, tof, samples)
VzArr = np.linspace(0, tof, samples)
# init position vectors
xDep = np.linspace(0, tof, samples)
yDep = np.linspace(0, tof, samples)
zDep = np.linspace(0, tof, samples)
xArr = np.linspace(0, tof, samples)
yArr = np.linspace(0, tof, samples)
zArr = np.linspace(0, tof, samples)
rDep = np.linspace(0, tof, samples)
tDep = np.linspace(0, tof, samples)
zDep = np.linspace(0, tof, samples)
rArr = np.linspace(0, tof, samples)
tArr = np.linspace(0, tof, samples)
zArr = np.linspace(0, tof, samples)
# retrieve and convert planet state vectors
for i in range(0, len(tSample)):
epochSample = pk.epoch(tSample[i], 'mjd2000')
# Departure planet
rCart, vCart = planetDep.eph(epochSample)
vCyl = Vcart2cyl(vCart, rCart)
rCyl = Pcart2cyl(rCart)
xDep[i] = rCart[0]
yDep[i] = rCart[1]
zDep[i] = rCart[2]
rDep[i] = rCyl[0]
tDep[i] = rCyl[1]
zDep[i] = rCyl[2]
VrDep[i] = vCyl[0]
VtDep[i] = vCyl[1]
VxDep[i] = vCart[0]
VyDep[i] = vCart[1]
VzDep[i] = vCart[2]
# Arrival planet
rCart, vCart = planetArr.eph(epochSample)
vCyl = Vcart2cyl(vCart, rCart)
rCyl = Pcart2cyl(rCart)
xArr[i] = rCart[0]
yArr[i] = rCart[1]
zArr[i] = rCart[2]
rArr[i] = rCyl[0]
tArr[i] = rCyl[1]
zArr[i] = rCyl[2]
VrArr[i] = vCyl[0]
VtArr[i] = vCyl[1]
VxArr[i] = vCart[0]
VyArr[i] = vCart[1]
VzArr[i] = vCart[2]
# dictionary with cartesian positions
planetCartesianPositions = {'xDep' : xDep,
'yDep' : yDep,
'zDep' : zDep,
'xArr' : xArr,
'yArr' : yArr,
'zArr' : zArr}
planetCylindricalPositions = {'rDep' : rDep,
'tDep' : tDep,
'zDep' : zDep,
'rArr' : rArr,
'tArr' : tArr,
'zArr' : zArr}
planetCartesianVelocities = {'VxDep' : VxDep,
'VyDep' : VyDep,
'VzDep' : VzDep,
'VxArr' : VxArr,
'VyArr' : VyArr,
'VzArr' : VzArr}
planetCylindricalVelocity = {'VrDep' : VrDep,
'VtDep' : VtDep,
'VzDep' : VzDep,
'VrArr' : VrArr,
'VtArr' : VtArr,
'VzArr' : VzArr}
print('Done sampling planets.')
return planetCartesianPositions, planetCylindricalPositions, planetCartesianVelocities, planetCylindricalVelocity
def sampleTrajectoryPosition(self, trajectory, samples=100):
"""
Returns Cartesian position vectors of the full trajectory
I.e. from t=0 to t=tof
"""
# time vector
self.tSampleSec = np.linspace(0, self.trajectory.tofSec, samples)
tSampleSec = self.tSampleSec
# sample and compute position vectors
xTra = np.linspace(0, self.trajectory.tofSec, samples)
yTra = np.linspace(0, self.trajectory.tofSec, samples)
zTra = np.linspace(0, self.trajectory.tofSec, samples)
tTra = np.linspace(0, self.trajectory.tofSec, samples)
rTra = np.linspace(0, self.trajectory.tofSec, samples)
zTra = np.linspace(0, self.trajectory.tofSec, samples)
for i in range(0, len(tSampleSec)):
ti = tSampleSec[i]
rTra[i], tTra[i], zTra[i] = [self.trajectory.r(ti), self.trajectory.t(ti), self.trajectory.z(ti)]
xTra[i], yTra[i], zTra[i] = Pcyl2cart([rTra[i], tTra[i], zTra[i]])
# dictionary with cartesian positions
trajectoryCartPositions = {'x' : xTra,
'y' : yTra,
'z' : zTra}
trajectoryCylPositions = {'r' : rTra,
't' : tTra,
'z' : zTra}
print('Done sampling trajectory position.')
return trajectoryCartPositions, trajectoryCylPositions
def sampleTrajectoryVelocity(self, trajectory, samples=100):
"""
Returns Cartesian velocity vectors of the full trajectory
I.e. from t=0 to t=tof
"""
# time vector
tSampleSec = self.tSampleSec
# cartesian velocities
xTraVel = np.linspace(0, self.trajectory.tofSec, samples)
yTraVel = np.linspace(0, self.trajectory.tofSec, samples)
zTraVel = np.linspace(0, self.trajectory.tofSec, samples)
rTraVel = np.linspace(0, self.trajectory.tofSec, samples)
tTraVel = np.linspace(0, self.trajectory.tofSec, samples)
zTraVel = np.linspace(0, self.trajectory.tofSec, samples)
for i in range(0, len(tSampleSec)):
vCyl = [self.trajectory.rDot(tSampleSec[i]), self.trajectory.tDot(tSampleSec[i]), self.trajectory.zDot(tSampleSec[i])]
rCyl = [self.trajectory.r(tSampleSec[i]), self.trajectory.t(tSampleSec[i]), self.trajectory.z(tSampleSec[i])]
vCart = Vcyl2cart(vCyl, rCyl)
xTraVel[i] = vCart[0]
yTraVel[i] = vCart[1]
zTraVel[i] = vCart[2]
rTraVel[i] = vCyl[0]
tTraVel[i] = vCyl[1]
zTraVel[i] = vCyl[2]
# dictionaries
trajectoryVelocitiesCart = {'vx' : xTraVel,
'vy' : yTraVel,
'vz' : zTraVel}
trajectoryVelocitiesCyl = {'vr' : rTraVel,
'vt' : tTraVel,
'vz' : zTraVel}
print('Done sampling trajectory velocity.')
return trajectoryVelocitiesCart, trajectoryVelocitiesCyl
def sampleTrajectoryAcceleration(self, trajectory, samples=100):
"""
Returns cylindrical acceleration vectors of the full trajectory
"""
# initialize vectors
rTraAcc = np.linspace(0, 1, samples)
tTraAcc = np.linspace(0, 1, samples)
zTraAcc = np.linspace(0, 1, samples)
xTraAcc = np.linspace(0, 1, samples)
yTraAcc = np.linspace(0, 1, samples)
totalTraAcc = np.linspace(0, 1, samples)
x = self.traPosCart['x']
y = self.traPosCart['y']
z = self.traPosCart['z']
# sample acceleration vectors
for i in range(0, len(self.tSampleSec)):
ti = self.tSampleSec[i]
aCyl = [self.trajectory.rDDot(ti), self.trajectory.tDDot(ti), self.trajectory.zDDot(ti)]
rTraAcc[i] = aCyl[0]
tTraAcc[i] = aCyl[1]
zTraAcc[i] = aCyl[2]
# dictionaries
trajectoryAccelerationsCyl = {'ar' : rTraAcc,
'at' : tTraAcc,
'az' : zTraAcc}
print('Done sampling trajectory acceleration.')
return trajectoryAccelerationsCyl
|
the-stack_0_4635 | from django import http
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext
from django.views.decorators.vary import vary_on_headers
import six
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon, Category
from olympia.amo.decorators import json_view
from olympia.amo.templatetags.jinja_helpers import locale_url, urlparams
from olympia.amo.utils import render, sorted_groupby
from olympia.browse.views import personas_listing as personas_listing_view
from olympia.versions.compare import dict_from_int, version_dict, version_int
from .forms import ESSearchForm
DEFAULT_NUM_PERSONAS = 21 # Results appear in a grid of 3 personas x 7 rows.
log = olympia.core.logger.getLogger('z.search')
def _personas(request):
"""Handle the request for persona searches."""
initial = dict(request.GET.items())
# Ignore these filters since return the same results for Firefox
# as for Thunderbird, etc.
initial.update(appver=None, platform=None)
form = ESSearchForm(initial, type=amo.ADDON_PERSONA)
form.is_valid()
qs = Addon.search_public()
filters = ['sort']
mapping = {
'downloads': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'updated': '-last_updated',
'hotness': '-hotness'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-average_daily_users',
types=[amo.ADDON_PERSONA])
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_PERSONAS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
categories, filter, base, category = personas_listing_view(request)
context = {
'pager': pager,
'form': form,
'categories': categories,
'query': form_data,
'filter': filter,
'search_placeholder': 'themes'}
return render(request, 'search/personas.html', context)
class BaseAjaxSearch(object):
"""Generates a list of dictionaries of add-on objects based on
ID or name matches. Safe to be served to a JSON-friendly view.
Sample output:
[
{
"id": 1865,
"name": "Adblock Plus",
"url": "http://path/to/details/page",
"icons": {
"32": "http://path/to/icon-32",
"64": "http://path/to/icon-64"
}
},
...
]
"""
def __init__(self, request, excluded_ids=(), ratings=False):
self.request = request
self.excluded_ids = excluded_ids
self.src = getattr(self, 'src', None)
self.types = getattr(self, 'types', amo.ADDON_TYPES.keys())
self.limit = 10
self.key = 'q' # Name of search field.
self.ratings = ratings
# Mapping of JSON key => add-on property.
default_fields = {
'id': 'id',
'name': 'name',
'url': 'get_url_path',
'icons': {
'32': ('get_icon_url', 32),
'64': ('get_icon_url', 64)
}
}
self.fields = getattr(self, 'fields', default_fields)
if self.ratings:
self.fields['rating'] = 'average_rating'
def queryset(self):
"""Get items based on ID or search by name."""
results = Addon.objects.none()
q = self.request.GET.get(self.key)
if q:
try:
pk = int(q)
except ValueError:
pk = None
qs = None
if pk:
qs = Addon.objects.public().filter(id=int(q))
elif len(q) > 2:
qs = Addon.search_public().filter_query_string(q.lower())
if qs:
results = qs.filter(type__in=self.types)
return results
def _build_fields(self, item, fields):
data = {}
for key, prop in six.iteritems(fields):
if isinstance(prop, dict):
data[key] = self._build_fields(item, prop)
else:
# prop is a tuple like: ('method', 'arg1, 'argN').
if isinstance(prop, tuple):
val = getattr(item, prop[0])(*prop[1:])
else:
val = getattr(item, prop, '')
if callable(val):
val = val()
data[key] = six.text_type(val)
return data
def build_list(self):
"""Populate a list of dictionaries based on label => property."""
results = []
for item in self.queryset()[:self.limit]:
if item.id in self.excluded_ids:
continue
d = self._build_fields(item, self.fields)
if self.src and 'url' in d:
d['url'] = urlparams(d['url'], src=self.src)
results.append(d)
return results
@property
def items(self):
return self.build_list()
class SearchSuggestionsAjax(BaseAjaxSearch):
src = 'ss'
class AddonSuggestionsAjax(SearchSuggestionsAjax):
# No personas.
types = [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP]
class PersonaSuggestionsAjax(SearchSuggestionsAjax):
types = [amo.ADDON_PERSONA]
@json_view
@non_atomic_requests
def ajax_search(request):
"""This is currently used only to return add-ons for populating a
new collection. Themes (formerly Personas) are included by default, so
this can be used elsewhere.
"""
search_obj = BaseAjaxSearch(request)
search_obj.types = amo.ADDON_SEARCH_TYPES
return search_obj.items
@json_view
@non_atomic_requests
def ajax_search_suggestions(request):
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
suggester = suggesterClass(request, ratings=False)
return _build_suggestions(
request,
cat,
suggester)
def _build_suggestions(request, cat, suggester):
results = []
q = request.GET.get('q')
if q and (q.isdigit() or len(q) > 2):
q_ = q.lower()
if cat != 'apps':
# Applications.
for a in amo.APP_USAGE:
name_ = six.text_type(a.pretty).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': a.id,
'name': ugettext(u'{0} Add-ons').format(a.pretty),
'url': locale_url(a.short),
'cls': 'app ' + a.short
})
# Categories.
cats = Category.objects
cats = cats.filter(Q(application=request.APP.id) |
Q(type=amo.ADDON_SEARCH))
if cat == 'themes':
cats = cats.filter(type=amo.ADDON_PERSONA)
else:
cats = cats.exclude(type=amo.ADDON_PERSONA)
for c in cats:
if not c.name:
continue
name_ = six.text_type(c.name).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': c.id,
'name': six.text_type(c.name),
'url': c.get_url_path(),
'cls': 'cat'
})
results += suggester.items
return results
def _filter_search(request, qs, query, filters, sorting,
sorting_default='-weekly_downloads', types=None):
"""Filter an ES queryset based on a list of filters."""
if types is None:
types = []
APP = request.APP
# Intersection of the form fields present and the filters we want to apply.
show = [f for f in filters if query.get(f)]
if query.get('q'):
qs = qs.filter_query_string(query['q'])
if 'platform' in show and query['platform'] in amo.PLATFORM_DICT:
ps = (amo.PLATFORM_DICT[query['platform']].id, amo.PLATFORM_ALL.id)
# If we've selected "All Systems" don't filter by platform.
if ps[0] != ps[1]:
qs = qs.filter(platforms__in=ps)
if 'appver' in show:
# Get a min version less than X.0.
low = version_int(query['appver'])
# Get a max version greater than X.0a.
high = version_int(query['appver'] + 'a')
# Note: when strict compatibility is not enabled on add-ons, we
# fake the max version we index in compatible_apps.
qs = qs.filter(**{
'current_version.compatible_apps.%s.max__gte' % APP.id: high,
'current_version.compatible_apps.%s.min__lte' % APP.id: low
})
if 'atype' in show and query['atype'] in amo.ADDON_TYPES:
qs = qs.filter(type=query['atype'])
else:
qs = qs.filter(type__in=types)
if 'cat' in show:
cat = (Category.objects.filter(id=query['cat'])
.filter(Q(application=APP.id) | Q(type=amo.ADDON_SEARCH)))
if not cat.exists():
show.remove('cat')
if 'cat' in show:
qs = qs.filter(category=query['cat'])
if 'tag' in show:
qs = qs.filter(tags=query['tag'])
if 'sort' in show:
qs = qs.order_by(sorting[query['sort']])
elif not query.get('q'):
# Sort by a default if there was no query so results are predictable.
qs = qs.order_by(sorting_default)
return qs
@vary_on_headers('X-PJAX')
@non_atomic_requests
def search(request, tag_name=None):
APP = request.APP
types = (amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP)
category = request.GET.get('cat')
if category == 'collections':
extra_params = {'sort': {'newest': 'created'}}
else:
extra_params = None
fixed = fix_search_query(request.GET, extra_params=extra_params)
if fixed is not request.GET:
# We generally want a 301, except if it's a "type", because that's only
# here to support the new frontend, so a permanent redirect could mess
# things up when the user is going back and forth between the old and
# new frontend. https://github.com/mozilla/addons-server/issues/6846
status = 302 if 'type' in request.GET else 301
return http.HttpResponseRedirect(
urlparams(request.path, **fixed), status=status)
facets = request.GET.copy()
# In order to differentiate between "all versions" and an undefined value,
# we use "any" instead of "" in the frontend.
if 'appver' in facets and facets['appver'] == 'any':
facets['appver'] = ''
form = ESSearchForm(facets or {})
form.is_valid() # Let the form try to clean data.
form_data = form.cleaned_data
if tag_name:
form_data['tag'] = tag_name
if category == 'themes' or form_data.get('atype') == amo.ADDON_PERSONA:
return _personas(request)
sort, extra_sort = split_choices(form.sort_choices, 'created')
if form_data.get('atype') == amo.ADDON_SEARCH:
# Search add-ons should not be searched by ADU, so replace 'Users'
# sort with 'Weekly Downloads'.
sort, extra_sort = list(sort), list(extra_sort)
sort[1] = extra_sort[1]
del extra_sort[1]
# Perform search, using aggregation so that we can build the facets UI.
# Note that we don't need to aggregate on platforms, that facet it built
# from our constants directly, using the current application for this
# request (request.APP).
appversion_field = 'current_version.compatible_apps.%s.max' % APP.id
qs = (Addon.search_public().filter(app=APP.id)
.aggregate(tags={'terms': {'field': 'tags'}},
appversions={'terms': {'field': appversion_field}},
categories={'terms': {'field': 'category', 'size': 200}})
)
filters = ['atype', 'appver', 'cat', 'sort', 'tag', 'platform']
mapping = {'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'downloads': '-weekly_downloads',
'updated': '-last_updated',
'hotness': '-hotness'}
qs = _filter_search(request, qs, form_data, filters, mapping, types=types)
pager = amo.utils.paginate(request, qs)
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': form_data,
'form': form,
'sort_opts': sort,
'extra_sort_opts': extra_sort,
'sorting': sort_sidebar(request, form_data, form),
'sort': form_data.get('sort'),
}
if not ctx['is_pjax']:
aggregations = pager.object_list.aggregations
ctx.update({
'tag': tag_name,
'categories': category_sidebar(request, form_data, aggregations),
'platforms': platform_sidebar(request, form_data),
'versions': version_sidebar(request, form_data, aggregations),
'tags': tag_sidebar(request, form_data, aggregations),
})
return render(request, 'search/results.html', ctx)
class FacetLink(object):
def __init__(self, text, urlparams, selected=False, children=None):
self.text = text
self.urlparams = urlparams
self.selected = selected
self.children = children or []
def sort_sidebar(request, form_data, form):
sort = form_data.get('sort')
return [FacetLink(text, {'sort': key}, key == sort)
for key, text in form.sort_choices]
def category_sidebar(request, form_data, aggregations):
APP = request.APP
qatype, qcat = form_data.get('atype'), form_data.get('cat')
cats = [f['key'] for f in aggregations['categories']]
categories = Category.objects.filter(id__in=cats)
if qatype in amo.ADDON_TYPES:
categories = categories.filter(type=qatype)
# Search categories don't have an application.
categories = categories.filter(Q(application=APP.id) |
Q(type=amo.ADDON_SEARCH))
# If category is listed as a facet but type is not, then show All.
if qcat in cats and not qatype:
qatype = True
# If category is not listed as a facet NOR available for this application,
# then show All.
if qcat not in categories.values_list('id', flat=True):
qatype = qcat = None
categories = [(_atype, sorted(_cats, key=lambda x: x.name))
for _atype, _cats in sorted_groupby(categories, 'type')]
rv = []
cat_params = {'cat': None}
all_label = ugettext(u'All Add-ons')
rv = [FacetLink(all_label, {'atype': None, 'cat': None}, not qatype)]
for addon_type, cats in categories:
selected = addon_type == qatype and not qcat
# Build the linkparams.
cat_params = cat_params.copy()
cat_params.update(atype=addon_type)
link = FacetLink(amo.ADDON_TYPES[addon_type],
cat_params, selected)
link.children = [
FacetLink(c.name, dict(cat_params, cat=c.id), c.id == qcat)
for c in cats]
rv.append(link)
return rv
def version_sidebar(request, form_data, aggregations):
appver = ''
# If appver is in the request, we read it cleaned via form_data.
if 'appver' in request.GET or form_data.get('appver'):
appver = form_data.get('appver')
app = six.text_type(request.APP.pretty)
exclude_versions = getattr(request.APP, 'exclude_versions', [])
# L10n: {0} is an application, such as Firefox. This means "any version of
# Firefox."
rv = [FacetLink(
ugettext(u'Any {0}').format(app), {'appver': 'any'}, not appver)]
vs = [dict_from_int(f['key']) for f in aggregations['appversions']]
# Insert the filtered app version even if it's not a facet.
av_dict = version_dict(appver)
if av_dict and av_dict not in vs and av_dict['major']:
vs.append(av_dict)
# Valid versions must be in the form of `major.minor`.
vs = set((v['major'], v['minor1'] if v['minor1'] not in (None, 99) else 0)
for v in vs)
versions = ['%s.%s' % v for v in sorted(vs, reverse=True)]
for version, floated in zip(versions, map(float, versions)):
if (floated not in exclude_versions and
floated > request.APP.min_display_version):
rv.append(FacetLink('%s %s' % (app, version), {'appver': version},
appver == version))
return rv
def platform_sidebar(request, form_data):
qplatform = form_data.get('platform')
app_platforms = request.APP.platforms.values()
ALL = app_platforms.pop(0)
# The default is to show "All Systems."
selected = amo.PLATFORM_DICT.get(qplatform, ALL)
if selected != ALL and selected not in app_platforms:
# Insert the filtered platform even if it's not a facet.
app_platforms.append(selected)
# L10n: "All Systems" means show everything regardless of platform.
rv = [FacetLink(ugettext(u'All Systems'), {'platform': ALL.shortname},
selected == ALL)]
for platform in app_platforms:
rv.append(FacetLink(platform.name, {'platform': platform.shortname},
platform == selected))
return rv
def tag_sidebar(request, form_data, aggregations):
qtag = form_data.get('tag')
tags = [facet['key'] for facet in aggregations['tags']]
rv = [FacetLink(ugettext(u'All Tags'), {'tag': None}, not qtag)]
rv += [FacetLink(tag, {'tag': tag}, tag == qtag) for tag in tags]
if qtag and qtag not in tags:
rv += [FacetLink(qtag, {'tag': qtag}, True)]
return rv
def fix_search_query(query, extra_params=None):
rv = {force_bytes(k): v for k, v in query.items()}
changed = False
# Change old keys to new names.
keys = {
'lver': 'appver',
'pid': 'platform',
'type': 'atype',
}
for old, new in keys.items():
if old in query:
rv[new] = rv.pop(old)
changed = True
# Change old parameter values to new values.
params = {
'sort': {
'newest': 'updated',
'popularity': 'downloads',
'weeklydownloads': 'users',
'averagerating': 'rating',
'sortby': 'sort',
},
'platform': {
str(p.id): p.shortname
for p in amo.PLATFORMS.values()
},
'atype': {k: str(v) for k, v in amo.ADDON_SEARCH_SLUGS.items()},
}
if extra_params:
params.update(extra_params)
for key, fixes in params.items():
if key in rv and rv[key] in fixes:
rv[key] = fixes[rv[key]]
changed = True
return rv if changed else query
def split_choices(choices, split):
"""Split a list of [(key, title)] pairs after key == split."""
index = [idx for idx, (key, title) in enumerate(choices)
if key == split]
if index:
index = index[0] + 1
return choices[:index], choices[index:]
else:
return choices, []
|
the-stack_0_4636 | import gspread
from oauth2client.service_account import ServiceAccountCredentials
class GoogleSheetMGR(object):
def __init__(self, key, sheet=None, json_file=None):
self.key = key
self.sheet = sheet
if json_file:
self.json_file = json_file
else:
self.json_file = "/root/gspread2.json"
self._authorize()
self.keys = self.get_keys()
def _authorize(self):
"""
See: http://gspread.readthedocs.io/en/latest/oauth2.html
"""
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.json_file, scope)
gc = gspread.authorize(credentials)
self.Worksheet = gc.open_by_key(self.key)
if self.sheet:
self.ws_obj = self.Worksheet.get_worksheet(self.sheet)
else:
self.ws_obj = self.Worksheet.sheet1
def get_all_values(self):
return self._get_all_values()
def _worksheet_action(self, action, *args):
"""
Wrap gspread worksheet actions to make sure no authorization issue
"""
retry = 0
max_retry = 5
while True:
try:
func = getattr(self.ws_obj, action)
return func(*args)
except Exception as details:
self._authorize()
retry += 1
if retry > max_retry:
raise details
def _get_all_values(self):
"""
Get all values from worksheet
"""
return self._worksheet_action('get_all_values')
def _update_cell(self, *args):
"""
Update a cell in spreadsheet
"""
return self._worksheet_action('update_cell', *args)
def get_keys(self):
"""
1st row is the key
"""
return self._get_all_values()[0]
def rework_sheet(self, data):
old_data = self.get_all_values()
for i_row, row_data in enumerate(old_data):
for i_cell, cell_data in enumerate(row_data):
new_val = data[i_row][i_cell]
if new_val != cell_data:
self._update_cell(i_row, i_cell, new_val)
def add_new_row(self, row_data, row=None):
if row:
new_row = row
else:
new_row = len(self._get_all_values()) + 1
for index, cell_data in enumerate(row_data):
self._update_cell(new_row, index + 1, cell_data)
def add_new_row_by_dict(self, data_dict, row=None):
if row:
new_row = row
else:
new_row = len(self._get_all_values()) + 1
for index, key in enumerate(self.keys):
if key in data_dict.keys():
self._update_cell(new_row, index + 1, data_dict[key])
def search_update_by_dict(self, search_dict, data_dict):
table = self._get_all_values()
index_dict = {}
index_dict2 = {}
for index, key in enumerate(self.keys):
if key in search_dict.keys():
index_dict[index] = search_dict[key]
if key in data_dict.keys():
index_dict2[index] = data_dict[key]
for row, i in enumerate(table):
for index, val in index_dict.items():
if i[index] != str(val):
found = False
break
else:
found = True
if not found:
continue
for key, val in index_dict2.items():
self._update_cell(row + 1, key + 1, val)
def search_info_by_dict(self, search_dict, table=None):
# TODO: merge code with search_update_by_dict
if not table:
table = self._get_all_values()
index_dict = {}
for index, key in enumerate(self.keys):
if key in search_dict.keys():
index_dict[index] = search_dict[key]
ret = []
for row, i in enumerate(table):
for index, val in index_dict.items():
if i[index] != str(val):
found = False
break
else:
found = True
if not found:
continue
tmp = {}
for index, key in enumerate(self.keys):
tmp[key] = i[index]
ret.append(tmp)
return ret
|
the-stack_0_4637 | """
Train deterministic HM-DenseED
"""
from time import time
import torch
import os
import numpy as np
import scipy.io as io
import sys
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from args import args, device
from models.model import DenseED
from utils.train_data_upload import train_load_data
from utils.test_data_upload import test_load_data
from models.bdsmm import bdsmm
from plot.velocity_plot import velocity_post_process
from plot.plot_PDF import mcs_test
from plot.basis_plot import basis_plot
import matplotlib.pyplot as plt
import scipy.io as io
import matplotlib.ticker as ticker
plt.switch_backend('agg')
import warnings
warnings.filterwarnings("ignore")
torch.set_default_tensor_type('torch.FloatTensor')
# initialize DenseED model
model = DenseED(in_channels=1, out_channels=1,
blocks=args.blocks,
growth_rate=args.growth_rate,
init_features=args.init_features,
drop_rate=args.drop_rate,
bn_size=args.bn_size,
bottleneck=args.bottleneck,
out_activation='Sigmoid').to(device)
print(model)
n_out_pixels_train = args.ntrain * args.imsize
n_out_pixels_test = args.ntest * args.imsize
dir = './plot'
# load data
train_loader = train_load_data()
test_loader = test_load_data()
print('...................Loaded data!...........................')
optimizer = optim.Adam(model.parameters(), lr=args.lr,weight_decay=args.weight_decay)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10,
verbose=True, threshold=0.01, threshold_mode='rel',
cooldown=0, min_lr=0, eps=1e-8)
#train
def train(epoch):
model.train()
mse = 0.
mse_total = 0.
#===================================================================================
for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix) in enumerate(train_loader):
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input.float(),basis_patch.float(),A_matrix.float(),B_matrix.float(), target_P.float(), q_matrix.float()
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input_rr.to(device),output_basis.to(device),A1_transformed1.to(device),B1_transformed.to(device), target_pressure.to(device), q1_transformed.to(device)
#================================================================================
output_basis = output_basis.view(144,1,15,15)
input_rr = input_rr.view(144,1,15,15)
#load the A matrix from sparse to dense
A_app = []
for i in range(1):
A_torch = A1_transformed1[i,:,:]
A_torch1 = A_torch[:,0:2]
A_torch2 = A_torch[:,2]
A_torch1 = A_torch1.type(torch.LongTensor).to(device)
A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))
A_app.append(A_torch_final)
A1_transformed = torch.stack(A_app,dim=0)
#Obtain the indices for the non-interior support region
C = io.loadmat(dir+'/matlab_index_save_1.mat')
C = C['basis_save']
C = np.squeeze(C)
X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
for i in range(X.shape[0]):
X[i] = C[i]
# -1 because of matlab and python
X1 = X.reshape(144,225)-1
#==============
X2 = np.zeros((144,225))
for i in range(144):
var2 = np.zeros((15,15))
ele = X1[i,0]
for varu in range(15):
var1 = ele+128*(varu)
for vm in range(15):
var2[varu,vm] = var1+vm
var3 = var2.reshape(1,225)
X2[i,:] = var3
X2 = torch.Tensor(X2)
#================================================================================
model.zero_grad()
#obtain the output from the DenseED model
output = model(input_rr)
#================================================================================
output = output.view(1,144,225)
#============================================================================
for RRRR in range (1):
output_RR = output[RRRR,:,:]
output_RR = output_RR.reshape(144,225)
B1_p_out = B1_transformed[RRRR,:,:].reshape(16384, 256)
jjj=0
ss = 0
for ii in range(12):
jjj = 35+16*ii
for k in range (jjj,jjj+12):
ss =ss+1
s = ss-1
basis_temp = X2[s,:]
tem = B1_p_out[:,k-1]
basis_temp = basis_temp.type(torch.LongTensor)
tem = tem.type(torch.cuda.FloatTensor)
temp_variable = output_RR[143-s,:]/torch.max(output_RR[143-s,:])
tem[basis_temp] = temp_variable
B1_p_out[:,k-1] = tem
B1_transformed[RRRR,:,:] = B1_p_out
#====================================================
temp11 = []
for kkk in range(1):
B1_transformed_temp = B1_transformed[kkk,:,:]
B1_transformed1 = torch.transpose(B1_transformed_temp,0,1)
dim = torch.sum(B1_transformed1,dim=0)
B1_transformed2 = torch.div(B1_transformed1,dim)
B1_transformed22 = torch.transpose(B1_transformed2,0,1)
temp11.append(B1_transformed22)
B1_transformed = temp11
B1_transformed = torch.stack(temp11,dim=0)
#============================================
R1_transformed = torch.transpose(B1_transformed,1,2)
A1_transformed = torch.transpose(A1_transformed,1,2)
R1_transformed = torch.transpose(R1_transformed,1,2)
A_c_transformed = torch.matmul(torch.transpose(bdsmm(A1_transformed,R1_transformed),1,2),B1_transformed)
R1_transformed = torch.transpose(R1_transformed,1,2)
temp1_transformed = torch.matmul(R1_transformed,q1_transformed)
temp2_transformed,LU = torch.solve(temp1_transformed,A_c_transformed)
temp3_transformed = torch.matmul(B1_transformed,temp2_transformed)
predict_pressure = temp3_transformed
target_pressure = target_pressure.view(1,16384)
predict_pressure = predict_pressure.view(1,16384)
target_pressure = target_pressure.type(torch.cuda.FloatTensor)
predict_pressure = predict_pressure.type(torch.cuda.FloatTensor)
loss2 = F.mse_loss(predict_pressure,target_pressure, size_average=False)
loss2.backward()
optimizer.step()
mse += loss2.item()
rmse = np.sqrt(mse / n_out_pixels_train)
scheduler.step(rmse)
mse_total += mse
return mse_total
#test
def test(epoch):
model.eval()
mse = 0.
mse_total = 0.
final_target = []
final_predict = []
for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix, T_val, ft_val) in enumerate(test_loader):
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input.float(),basis_patch.float(),A_matrix.float(),B_matrix.float(), target_P.float(), q_matrix.float()
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input_rr.to(device),output_basis.to(device),A1_transformed1.to(device),B1_transformed.to(device), target_pressure.to(device), q1_transformed.to(device)
#================================================================================
output_basis = output_basis.view(144*64,1,15,15)
input_rr = input_rr.view(144*64,1,15,15)
A_app = []
for i in range(64):
A_torch = A1_transformed1[i,:,:]
A_torch1 = A_torch[:,0:2]
A_torch2 = A_torch[:,2]
A_torch1 = A_torch1.type(torch.LongTensor).to(device)
A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))
A_app.append(A_torch_final)
A1_transformed = torch.stack(A_app,dim=0).to(device)
C = io.loadmat(dir+'/matlab_index_save_1.mat')
C = C['basis_save']
C = np.squeeze(C)
X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
for i in range(X.shape[0]):
X[i] = C[i]
# -1 because of matlab and python
X1 = X.reshape(144,225)-1
#==============
X2 = np.zeros((144,225))
for i in range(144):
var2 = np.zeros((15,15))
ele = X1[i,0]
for varu in range(15):
var1 = ele+128*(varu)
for vm in range(15):
var2[varu,vm] = var1+vm
var3 = var2.reshape(1,225)
X2[i,:] = var3
X2 = torch.Tensor(X2)
#================================================================================
with torch.no_grad():
output = model(input_rr)
#================================================================================
output = output.view(64,144,225)
output_basis = output_basis.view(64,144,225)
#============================================================================
for RRRR in range (64):
output_RR = output[RRRR,:,:].reshape(144,225)
B1_p_out = B1_transformed[RRRR,:,:].reshape(16384, 256)
jjj=0
ss = 0
for ii in range(12):
jjj = 35+16*ii
for k in range (jjj,jjj+12):
ss =ss+1
s = ss-1
basis_temp = X2[s,:]
tem = B1_p_out[:,k-1]
basis_temp = basis_temp.type(torch.LongTensor)
tem = tem.type(torch.cuda.FloatTensor)
temp_variable = output_RR[143-s,:]/torch.max(output_RR[143-s,:])
tem[basis_temp] = temp_variable
B1_p_out[:,k-1] = tem
B1_transformed[RRRR,:,:] = B1_p_out
#====================================================
temp11 = []
for kkk in range(64):
B1_transformed_temp = B1_transformed[kkk,:,:]
B1_transformed1 = torch.transpose(B1_transformed_temp,0,1)
dim = torch.sum(B1_transformed1,dim=0)
B1_transformed2 = torch.div(B1_transformed1,dim)
B1_transformed22 = torch.transpose(B1_transformed2,0,1)
temp11.append(B1_transformed22)
B1_transformed = temp11
B1_transformed = torch.stack(temp11,dim=0).to(device)
temp_save = B1_transformed.cpu().detach().numpy()
#============================================
R1_transformed = torch.transpose(B1_transformed,1,2)
A1_transformed = torch.transpose(A1_transformed,1,2)
R1_transformed = torch.transpose(R1_transformed,1,2)
A_c_transformed = torch.matmul(torch.transpose(bdsmm(A1_transformed,R1_transformed),1,2),B1_transformed)
R1_transformed = torch.transpose(R1_transformed,1,2)
temp1_transformed = torch.matmul(R1_transformed,q1_transformed)
temp2_transformed,LU = torch.solve(temp1_transformed,A_c_transformed)
temp3_transformed = torch.matmul(B1_transformed,temp2_transformed)
predict_pressure = temp3_transformed.view(64,16384)
target_pressure = target_pressure.view(64,16384).type(torch.cuda.FloatTensor)
predict_pressure = predict_pressure.type(torch.cuda.FloatTensor)
loss2 = F.mse_loss(predict_pressure,target_pressure, size_average=False)
predict_press = predict_pressure.cpu().detach().numpy()
target_press = target_pressure
target_press = target_press.cpu().detach().numpy()
if epoch % args.epochs == 0:
if batch_idx == 0:
interior_basis = output_basis.cpu().detach().numpy()
io.savemat('./result_data/test_interior_basis_%d.mat'%epoch, dict([('interior_basis',np.array(interior_basis))]))
io.savemat('./result_data/test_prolongation_%d.mat'%epoch, dict([('prolongation_operator',np.array(temp_save))]))
if args.kle == 100:
index_val = 28
elif args.kle == 1000:
index_val = 23
elif args.kle == 16384:
index_val = 28
elif args.data == 'channel':
index_val = 7
velocity_x_tar, velocity_y_tar, velocity_x_pred, velocity_y_pred = \
velocity_post_process(target_press[index_val,:], predict_press[index_val,:],T_val[index_val,:],ft_val[index_val,:],epoch,index_val)
mse += loss2.item()
final_target.append(target_press)
final_predict.append(predict_press)
final_target = np.array(final_target)
final_predict = np.array(final_predict)
output_new = output.view(64,144,15,15)
predict_new = output_basis.view(64,144,15,15)
output_new = output_new.cpu().detach().numpy()
predict_new = predict_new.cpu().detach().numpy()
mse_total += mse
return mse_total, final_target, final_predict
#plot rmse
def train_test_error(r2_train,r2_test,epoch):
plt.figure()
plt.plot(r2_train, label="Train: {:.3f}".format(np.mean(r2_train[-5:])))
plt.plot(r2_test, label="Test: {:.3f}".format(np.mean(r2_test[-5:])))
plt.xlabel('Epoch')
plt.ylabel(r'RMSE')
plt.legend(loc='lower right')
plt.savefig("./results/rmse.pdf", dpi=600)
plt.close()
np.savetxt("./results/rmse_train.txt", r2_train)
np.savetxt("./results/rmse_test.txt", r2_test)
#==========================================================
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
#==========================================================
#main
def main():
mkdir('results')
mkdir('result_data')
print('Start training and testing........................................................')
loss_train_all = []
loss_test_all = []
for epoch in range(1, args.epochs + 1):
print ('epoch number .......................................',epoch)
mse_tot = train(epoch)
with torch.no_grad():
mse_tot_test, final_target, final_predict = test(epoch)
rmse = np.sqrt(mse_tot / n_out_pixels_train)
rmse1 = np.sqrt(mse_tot_test / n_out_pixels_test)
loss_train_all.append(rmse)
loss_test_all.append(rmse1)
loss_train_all = np.array(loss_train_all)
loss_test_all = np.array(loss_test_all)
print('.............training and testing done....................')
print('\n')
print('saving the training error and testing error')
io.savemat('./result_data/training_loss.mat', dict([('training_loss',np.array(loss_train_all))]))
io.savemat('./result_data/test_loss.mat', dict([('testing_loss',np.array(loss_test_all))]))
print('.........................plotting results.........................................')
train_test_error(loss_train_all,loss_test_all, 1)
print('.........................saving model..............................................')
torch.save(model, 'KLE_100.pt')
print('.............................plotting basis.........................................')
basis_plot()
"""clean up gpu memory"""
torch.cuda.empty_cache()
print('.............................plotting PDF............................................')
mcs_test(model)
if __name__ == '__main__':
main()
|
the-stack_0_4638 | from __future__ import print_function
import torch
import os
import shutil
from collections import defaultdict
import numpy as np
def getNeigh(node_num, feature_map, knn):
similarity = np.dot(feature_map, feature_map.T)
sort_id = np.argsort(-similarity, axis=1)
adj_sets = defaultdict(set)
for n in range(node_num):
for k in range(1, knn+1):
adj_sets[n].add(sort_id[n, k])
return adj_sets
def collectGraphTrain(node_num, class_num, feat_dim = 2048, knn = 10, suffix = '_gem.npy'):
'''
(training dataset)
collect info. about graph including: node, label, feature, neighborhood(adjacent) relationship.
neighborhood(adjacent) relationship are constructed based on similarity between features.
'''
print('node_num:', node_num, '\nclass_num:', class_num)
feature_map = np.load('train_feature_map/feature_map' + suffix)
assert node_num == feature_map.shape[0], 'node_num does not match feature shape.'
assert feat_dim == feature_map.shape[1], 'feat_dim does not match feature shape.'
label = np.load('train_feature_map/label' + suffix)
# adj_sets = getNeigh(node_num, feature_map, knn)
neighs = np.load('train_feature_map/neighs' + suffix)
adj_sets = defaultdict(set)
for n in range(node_num):
adj_sets[n] = set(neighs[n, 1:knn+1])
return torch.from_numpy(label), torch.from_numpy(feature_map).float(), adj_sets
def collectGraphTest(feature_path, node_num, feat_dim = 2048, knn = 10, suffix = '_gem.npy'):
print("node num.:", node_num)
feature_map = np.load(os.path.join(feature_path, 'feature_map' + suffix))
assert node_num == feature_map.shape[0], 'node_num does not match feature shape.'
assert feat_dim == feature_map.shape[1], 'feat_dim does not match feature shape.'
neighs = np.load(os.path.join(feature_path, 'neighs' + suffix))
adj_sets = defaultdict(set)
for n in range(node_num):
adj_sets[n] = set(neighs[n, 1:knn+1])
query_feature = np.load(os.path.join(feature_path, 'query' + suffix))
return torch.from_numpy(feature_map).float(), adj_sets, torch.from_numpy(query_feature).float() |
the-stack_0_4641 | """Constants for the FRITZ!Box Tools integration."""
DOMAIN = "fritz"
PLATFORMS = ["device_tracker"]
DATA_FRITZ = "fritz_data"
DEFAULT_DEVICE_NAME = "Unknown device"
DEFAULT_HOST = "192.168.178.1"
DEFAULT_PORT = 49000
DEFAULT_USERNAME = ""
ERROR_AUTH_INVALID = "invalid_auth"
ERROR_CONNECTION_ERROR = "connection_error"
ERROR_UNKNOWN = "unknown_error"
TRACKER_SCAN_INTERVAL = 30
|
the-stack_0_4643 | import lzma
import os
import subprocess
import re
import zipfile
from typing import Tuple
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp.test_utils import TestClient
from otupdate import buildroot, common
from otupdate import openembedded
from otupdate.common.update_actions import Partition
from otupdate.openembedded import PartitionManager
from tests.common.config import FakeRootPartElem
HERE = os.path.abspath(os.path.dirname(__file__))
one_up = os.path.abspath(os.path.join(__file__, "../../"))
@pytest.fixture(params=[openembedded, buildroot])
async def test_cli(
aiohttp_client, otupdate_config, request, version_file_path, mock_name_synchronizer
) -> Tuple[TestClient, str]:
"""
Build an app using dummy versions, then build a test client and return it
"""
cli_client_pkg = request.param
app = cli_client_pkg.get_app(
name_synchronizer=mock_name_synchronizer,
system_version_file=version_file_path,
config_file_override=otupdate_config,
boot_id_override="dummy-boot-id-abc123",
)
client = await aiohttp_client(app)
return client, cli_client_pkg.__name__
@pytest.fixture
def downloaded_update_file_consolidated(request, extracted_update_file_consolidated):
"""
Return the path to a zipped update file
To exclude files, mark with ``exclude_rootfs_ext4``,
``exclude_rootfs_ext4_hash``, ``exclude_rootfs_ext4_hash_sig``.
This uses :py:meth:`extracted_update_file` to generate the contents, so
marks that fixture understands can be used when requesting this fixture
Can also be used by tests that will upload it to a test server, since
when the test server boots its download path will be somewhere else
"""
zip_path_arr = []
list_of_update_files = [
(
"rootfs.xz",
"rootfs.xz.sha256",
"rootfs.xz.hash.sig",
"tmp_uncomp_xz_hash_path",
"ot3-system.zip",
),
(
"rootfs.ext4",
"rootfs.ext4.hash",
"rootfs.ext4.hash.sig",
"tmp_uncomp_xz_hash_path",
"ot2-system.zip",
),
]
for index, (rootfs, sha256, sig, xz_hash, pkg) in enumerate(list_of_update_files):
rootfs_path = os.path.join(extracted_update_file_consolidated[index], rootfs)
hash_path = os.path.join(extracted_update_file_consolidated[index], sha256)
sig_path = os.path.join(extracted_update_file_consolidated[index], sig)
xz_hash_path = os.path.join(extracted_update_file_consolidated[index], xz_hash)
zip_path = os.path.join(extracted_update_file_consolidated[index], pkg)
with zipfile.ZipFile(zip_path, "w") as zf:
if not request.node.get_closest_marker("exclude_rootfs_ext4"):
zf.write(rootfs_path, rootfs)
if not request.node.get_closest_marker("exclude_rootfs_ext4_hash"):
zf.write(hash_path, sha256)
if not request.node.get_closest_marker("exclude_rootfs_ext4_hash_sig"):
zf.write(sig_path, sig)
zf.write(xz_hash_path, xz_hash)
zip_path_arr.append(zip_path)
os.unlink(rootfs_path)
os.unlink(hash_path)
os.unlink(sig_path)
return zip_path_arr
def write_fake_rootfs(
rootfs_name: str, rootfs_path: str, rootfs_contents: bytes, uncomp_xz_path: str
) -> str:
if rootfs_name == "rootfs.xz":
with lzma.open(rootfs_path, "w") as f:
f.write(rootfs_contents)
with lzma.open(rootfs_path, "rb") as fsrc, open(uncomp_xz_path, "wb") as fdst:
while True:
chunk = fsrc.read(1024)
fdst.write(chunk)
if len(chunk) != 1024:
break
return uncomp_xz_path
else:
with open(rootfs_path, "wb") as rfs:
rfs.write(rootfs_contents)
return rootfs_path
def gen_hash_val_direct(rfs_path: str) -> str:
try:
shasum_out = subprocess.check_output(
[
"shasum",
"-a",
"256",
rfs_path,
]
)
return shasum_out
except (subprocess.CalledProcessError, FileNotFoundError):
pytest.skip("no shasum invokeable on command line")
@pytest.fixture
def extracted_update_file_consolidated(request, tmpdir):
"""
Return the path to a dir containing an unzipped update file.
To make a bad hash, mark with ``bad_hash``. To make a bad
signature, mark with ``bad_sig``.
"""
extracted_files_dir_path_arr = []
list_of_extracted_files = [
(
"rootfs.xz",
"rootfs.xz.sha256",
"rootfs.xz.hash.sig",
),
(
"rootfs.ext4",
"rootfs.ext4.hash",
"rootfs.ext4.hash.sig",
),
]
for (rootfs, sha256, sig) in list_of_extracted_files:
rootfs_path = os.path.join(tmpdir, rootfs)
hash_path = os.path.join(tmpdir, sha256)
uncomp_xz_hash_path = os.path.join(tmpdir, "tmp_uncomp_xz_hash_path")
sig_path = os.path.join(tmpdir, sig)
uncomp_xz_path = os.path.join(tmpdir, "tmp_uncomp")
rootfs_contents = os.urandom(100000)
write_fake_rootfs(rootfs, rootfs_path, rootfs_contents, uncomp_xz_path)
if request.node.get_closest_marker("bad_hash"):
hashval = b"0oas0ajcs0asd0asjc0ans0d9ajsd0ian0s9djas"
else:
hashval = re.match(
b"^([a-z0-9]+) ",
gen_hash_val_direct(rootfs_path),
).group(1)
hashval2 = re.match(
b"^([a-z0-9]+) ",
gen_hash_val_direct(uncomp_xz_path),
).group(1)
with open(hash_path, "wb") as rfsh:
rfsh.write(hashval)
with open(uncomp_xz_hash_path, "wb") as rfsh:
rfsh.write(hashval2)
if not request.node.get_closest_marker("bad_sig"):
try:
subprocess.check_output(["openssl", "version"])
except (subprocess.CalledProcessError, FileNotFoundError):
pytest.skip("requires openssl binary to be installed")
subprocess.check_call(
[
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(one_up, "ot-update-server-unit-tests.key"),
"-out",
sig_path,
hash_path,
]
)
else:
with open(sig_path, "wb") as sigfile:
sigfile.write(os.urandom(256))
extracted_files_dir_path_arr.append(tmpdir)
return extracted_files_dir_path_arr
@pytest.fixture
def testing_partition(monkeypatch, tmpdir):
partfile = os.path.join(tmpdir, "fake-partition")
find_unused = mock.Mock()
monkeypatch.setattr(buildroot.update_actions, "_find_unused_partition", find_unused)
find_unused.return_value = FakeRootPartElem(
"TWO", common.update_actions.Partition(2, partfile)
)
return partfile
@pytest.fixture
def mock_partition_manager_valid_switch(tmpdir) -> MagicMock:
"""Mock Partition Manager."""
partfile = os.path.join(tmpdir, "fake-partition")
mock_part = MagicMock(spec=PartitionManager)
mock_part.find_unused_partition.return_value = Partition(2, partfile)
mock_part.switch_partition.return_value = Partition(2, partfile)
mock_part.resize_partition.return_value = True
mock_part.mount_fs.return_value = True
mock_part.umount_fs.return_value = True
mock_part.mountpoint_root.return_value = "/mnt"
return mock_part
|
the-stack_0_4646 | #!/usr/bin/env python3
# Copyright (c) 2020 GBCR Developers
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import GoldBCRTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(GoldBCRTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of .2 sat/byte
node1.settxfee(Decimal("0.00000200"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of .15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(150))
# Test that txs are still being received by test connection (paying .15 sat/byte)
node1.settxfee(Decimal("0.00000150"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to .1 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.00000100"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
|
the-stack_0_4648 | #!/usr/bin/env python3
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
from typing import Tuple
import numpy as np
class FrameRecord:
"""
Store representation of a bounding box in some timeframe, in different coordinate systems.
This bounding box comes from a track that shares the same color.
"""
def __init__(
self,
bbox_city_fr: np.ndarray,
bbox_ego_frame: np.ndarray,
occlusion_val: int,
color: Tuple[float, float, float],
track_uuid: str,
obj_class_str: str,
) -> None:
"""Initialize FrameRecord.
Args:
bbox_city_fr: bounding box for city frame.
bbox_ego_frame: bounding box for ego frame.
occlusion_val: occlusion value.
color: tuple representing color. RGB values should be within [0,1] range.
track_uuid: track uuid
obj_class_str: object class string
"""
self.bbox_city_fr = bbox_city_fr
self.bbox_ego_frame = bbox_ego_frame
self.occlusion_val = occlusion_val
self.color = color
self.track_uuid = track_uuid
self.obj_class_str = obj_class_str
|
the-stack_0_4649 | #!/usr/bin/env python3
import serial, sys, json, logging
import paho.mqtt.client as mqtt
CONFIG_FILE='config.json'
try:
with open(CONFIG_FILE) as config_file:
config = json.load(config_file)
except:
print("Config file not present or invalid JSON!")
sys.exit(1)
header_retained = b'#R'
header_unretained = b'#U'
ser = serial.Serial()
ser.baudrate = config['baudrate']
ser.port = config['port']
ser.timeout = 5
logging.basicConfig(level=logging.DEBUG if config['debug'] else logging.WARNING, format="%(levelname)s: %(message)s")
log = logging.getLogger("")
try:
ser.open()
except:
log.error("Failed to open serial port {}!".format(config['port']))
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
print("Available serial ports:")
for port, desc, hwid in sorted(ports):
print("{}: {}".format(port, desc))
sys.exit(1)
def onConnect(client, userdata, flags, rc):
log.info("Connected to MQTT server")
def onDisconnect(client, userdata, rc):
log.info("Disconnected from MQTT server")
def onLog(client, userdata, level, buf):
if (level == mqtt.MQTT_LOG_INFO or level == mqtt.MQTT_LOG_NOTICE):
log.info(buf)
elif (level == mqtt.MQTT_LOG_WARNING or level == mqtt.MQTT_LOG_ERR):
log.warning(buf)
def post_mqtt(topic, message, retain = False):
(rc, mid) = mqttc.publish(topic, message, qos=0, retain=retain)
if (rc != mqtt.MQTT_ERR_SUCCESS):
log.warning("MQTT Publish unsuccessful!")
mqttc = mqtt.Client()
mqttc.on_connect = onConnect
mqttc.on_disconnect = onDisconnect
mqttc.on_log = onLog
try:
mqttc.connect(config['mqtt_server'], config['mqtt_port'], 60)
except Exception as e:
log.error("Can't connect to the MQTT broker! {}".format(e))
if ser.is_open:
ser.close()
sys.exit(1)
mqttc.loop_start()
while True:
try:
line = ser.readline().rstrip()
if line is not b'':
log.debug(line.decode("utf-8"))
if line.startswith(header_retained) or line.startswith(header_unretained):
topic = line[2:]
try:
(topic, message) = topic.split(b' ', 1)
except ValueError:
message = b''
log.info("Posting {} to topic {}".format(
message.decode("utf-8"), topic.decode("utf-8")))
post_mqtt(topic.decode('utf8'), message, line.startswith(header_retained))
except KeyboardInterrupt:
print('\n')
mqttc.disconnect()
if ser.is_open:
ser.close()
sys.exit(0)
except Exception as e:
log.error("{}".format(e))
if ser.is_open:
ser.close()
sys.exit(1)
|
the-stack_0_4650 | from PIL import Image
def bytes_to_bin(data):
return "".join(f"{bin(i)[2:]:>08}" for i in data)
def hide_lsb_image(image, binary):
pixels = image.load()
for i in range(image.height):
for j in range(image.width):
r, g, b = pixels[j, i]
bit = int(binary[i % len(binary)])
r = (r & (~0x01)) | bit
g = (g & (~0x01)) | bit
b = (b & (~0x01)) | bit
pixels[j, i] = (r, g, b)
return pixels
data = b"donnuCTF{d52127b1b3f17805675280653e10fb66}"
image = Image.open("easy2.png")
hide_lsb_image(image, bytes_to_bin(data))
image.save("encoded.png") |
the-stack_0_4651 | import re
import numbers
from collections import namedtuple
from .shapes import *
LAYER_DESCRIPTORS = {
# Caffe Types
'AbsVal': shape_identity,
'Accuracy': shape_scalar,
'ArgMax': shape_not_implemented,
'BatchNorm': shape_identity,
'BNLL': shape_not_implemented,
'Concat': shape_concat,
'ContrastiveLoss': shape_scalar,
'Convolution': shape_convolution,
'Deconvolution': shape_not_implemented,
'Data': shape_data,
'Dropout': shape_identity,
'DummyData': shape_data,
'EuclideanLoss': shape_scalar,
'Eltwise': shape_identity,
'Exp': shape_identity,
'Flatten': flatten_shape,
'HDF5Data': shape_data,
'HDF5Output': shape_identity,
'HingeLoss': shape_scalar,
'Im2col': shape_not_implemented,
'ImageData': shape_data,
'InfogainLoss': shape_scalar,
'InnerProduct': shape_inner_product,
'Input': shape_data,
'LRN': shape_identity,
'MemoryData': shape_mem_data,
'MultinomialLogisticLoss': shape_scalar,
'MVN': shape_not_implemented,
'Pooling': shape_pool,
'Power': shape_identity,
'ReLU': shape_identity,
'Scale': shape_identity,
'Sigmoid': shape_identity,
'SigmoidCrossEntropyLoss': shape_scalar,
'Silence': shape_not_implemented,
'Softmax': shape_identity,
'SoftmaxWithLoss': shape_scalar,
'Split': shape_not_implemented,
'Slice': shape_not_implemented,
'TanH': shape_identity,
'WindowData': shape_not_implemented,
'Threshold': shape_identity,
'Reshape': reshape_shape
}
LAYER_TYPES = LAYER_DESCRIPTORS.keys()
LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES})
class NodeKind(LayerType):
@staticmethod
def map_raw_kind(kind):
if kind in LAYER_TYPES:
return kind
return None
@staticmethod
def compute_output_shape(node):
try:
val = LAYER_DESCRIPTORS[node.kind](node)
return val
except NotImplementedError:
raise KaffeError('Output shape computation not implemented for type: %s' % node.kind)
class NodeDispatchError(KaffeError):
pass
class NodeDispatch(object):
@staticmethod
def get_handler_name(node_kind):
if len(node_kind) <= 4:
# A catch-all for things like ReLU and tanh
return node_kind.lower()
# Convert from CamelCase to under_scored
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', node_kind)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def get_handler(self, node_kind, prefix):
name = self.get_handler_name(node_kind)
name = '_'.join((prefix, name))
try:
return getattr(self, name)
except AttributeError:
raise NodeDispatchError('No handler found for node kind: %s (expected: %s)' %
(node_kind, name))
class LayerAdapter(object):
def __init__(self, layer, kind):
self.layer = layer
self.kind = kind
@property
def parameters(self):
name = NodeDispatch.get_handler_name(self.kind)
name = '_'.join((name, 'param'))
try:
return getattr(self.layer, name)
except AttributeError:
raise NodeDispatchError('Caffe parameters not found for layer kind: %s' % (self.kind))
@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
if scalar:
return scalar
if repeated:
if isinstance(repeated, numbers.Number):
return repeated
if len(repeated) == 1:
# Same value applies to all spatial dimensions
return int(repeated[0])
assert idx < len(repeated)
# Extract the value for the given spatial dimension
return repeated[idx]
if default is None:
raise ValueError('Unable to determine kernel parameter!')
return default
@property
def kernel_parameters(self):
assert self.kind in (NodeKind.Convolution, NodeKind.Pooling)
params = self.parameters
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
s_h = self.get_kernel_value(params.stride_h, params.stride, 0, default=1)
s_w = self.get_kernel_value(params.stride_w, params.stride, 1, default=1)
p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0)
p_w = self.get_kernel_value(params.pad_h, params.pad, 1, default=0)
return KernelParameters(k_h, k_w, s_h, s_w, p_h, p_w)
KernelParameters = namedtuple('KernelParameters', ['kernel_h', 'kernel_w', 'stride_h', 'stride_w',
'pad_h', 'pad_w'])
|
the-stack_0_4652 | import os
import download
import settings
import translate
import utils
def create_profileicon_json(lang, path):
cdragon_profileicons = download.download_versioned_cdragon_profileicons_summary()
profileicon = {
"type": "profileicon",
"version": settings.patch['json'],
"data": {}
}
for x in cdragon_profileicons:
if "iconPath" not in x:
continue
icon_id = x["id"]
profileicon["data"][icon_id] = {
"id": icon_id,
"title": translate.t(lang, "summoner_icon_title_" + str(icon_id)),
"description": translate.t(lang, "summoner_icon_description_" + str(icon_id)),
"image": {
"full": str(icon_id) + ".png"
}
}
utils.save_json(profileicon, os.path.join(path, "profileicon.json"))
return profileicon
def add_sprite_info(lang, path):
"""
Adds Sprite Info to JSONs
"""
data = utils.load_json(os.path.join(path, "spriter_output.json"))
profileicons = utils.load_json(os.path.join(path, f"data/{lang}/profileicon.json"))
for profileicon in profileicons['data']:
try:
profileicons['data'][profileicon]['image'].update({
'sprite': data['result']['profileicon'][profileicon]['regular']['texture'] + ".png",
'group': "profileicon",
'x': data['result']['profileicon'][profileicon]['regular']['x'],
'y': data['result']['profileicon'][profileicon]['regular']['y'],
'w': data['result']['profileicon'][profileicon]['regular']['width'],
'h': data['result']['profileicon'][profileicon]['regular']['height'],
})
except KeyError:
print("Failed to add sprite of profileicon: " + profileicon)
utils.save_json(profileicons, os.path.join(path, f"data/{lang}/profileicon.json"))
|
the-stack_0_4653 | # Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Author - Shivam Mishra <[email protected]>
from __future__ import unicode_literals
import frappe
from json import loads, dumps
from frappe import _, DoesNotExistError, ValidationError, _dict
from frappe.boot import get_allowed_pages, get_allowed_reports
from six import string_types
from functools import wraps
from frappe.cache_manager import (
build_domain_restriced_doctype_cache,
build_domain_restriced_page_cache,
build_table_count_cache
)
def handle_not_exist(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExistError:
if frappe.message_log:
frappe.message_log.pop()
return []
return wrapper
class Workspace:
def __init__(self, page_name, minimal=False):
self.page_name = page_name
self.extended_links = []
self.extended_charts = []
self.extended_shortcuts = []
self.user = frappe.get_user()
self.allowed_modules = self.get_cached('user_allowed_modules', self.get_allowed_modules)
self.doc = self.get_page_for_user()
if self.doc.module and self.doc.module not in self.allowed_modules:
raise frappe.PermissionError
self.can_read = self.get_cached('user_perm_can_read', self.get_can_read_items)
self.allowed_pages = get_allowed_pages(cache=True)
self.allowed_reports = get_allowed_reports(cache=True)
if not minimal:
self.onboarding_doc = self.get_onboarding_doc()
self.onboarding = None
self.table_counts = get_table_with_counts()
self.restricted_doctypes = frappe.cache().get_value("domain_restricted_doctypes") or build_domain_restriced_doctype_cache()
self.restricted_pages = frappe.cache().get_value("domain_restricted_pages") or build_domain_restriced_page_cache()
def is_page_allowed(self):
cards = self.doc.get_link_groups() + get_custom_reports_and_doctypes(self.doc.module) + self.extended_links
shortcuts = self.doc.shortcuts + self.extended_shortcuts
for section in cards:
links = loads(section.get('links')) if isinstance(section.get('links'), string_types) else section.get('links')
for item in links:
if self.is_item_allowed(item.get('link_to'), item.get('link_type')):
return True
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
for item in shortcuts:
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
return True
return False
def get_cached(self, cache_key, fallback_fn):
_cache = frappe.cache()
value = _cache.get_value(cache_key, user=frappe.session.user)
if value:
return value
value = fallback_fn()
# Expire every six hour
_cache.set_value(cache_key, value, frappe.session.user, 21600)
return value
def get_can_read_items(self):
if not self.user.can_read:
self.user.build_permissions()
return self.user.can_read
def get_allowed_modules(self):
if not self.user.allow_modules:
self.user.build_permissions()
return self.user.allow_modules
def get_page_for_user(self):
filters = {
'extends': self.page_name,
'for_user': frappe.session.user
}
user_pages = frappe.get_all("Workspace", filters=filters, limit=1)
if user_pages:
return frappe.get_cached_doc("Workspace", user_pages[0])
filters = {
'extends_another_page': 1,
'extends': self.page_name,
'is_default': 1
}
default_page = frappe.get_all("Workspace", filters=filters, limit=1)
if default_page:
return frappe.get_cached_doc("Workspace", default_page[0])
self.get_pages_to_extend()
return frappe.get_cached_doc("Workspace", self.page_name)
def get_onboarding_doc(self):
# Check if onboarding is enabled
if not frappe.get_system_settings("enable_onboarding"):
return None
if not self.doc.onboarding:
return None
if frappe.db.get_value("Module Onboarding", self.doc.onboarding, "is_complete"):
return None
doc = frappe.get_doc("Module Onboarding", self.doc.onboarding)
# Check if user is allowed
allowed_roles = set(doc.get_allowed_roles())
user_roles = set(frappe.get_roles())
if not allowed_roles & user_roles:
return None
# Check if already complete
if doc.check_completion():
return None
return doc
def get_pages_to_extend(self):
pages = frappe.get_all("Workspace", filters={
"extends": self.page_name,
'restrict_to_domain': ['in', frappe.get_active_domains()],
'for_user': '',
'module': ['in', self.allowed_modules]
})
pages = [frappe.get_cached_doc("Workspace", page['name']) for page in pages]
for page in pages:
self.extended_links = self.extended_links + page.get_link_groups()
self.extended_charts = self.extended_charts + page.charts
self.extended_shortcuts = self.extended_shortcuts + page.shortcuts
def is_item_allowed(self, name, item_type):
if frappe.session.user == "Administrator":
return True
item_type = item_type.lower()
if item_type == "doctype":
return (name in self.can_read or [] and name in self.restricted_doctypes or [])
if item_type == "page":
return (name in self.allowed_pages and name in self.restricted_pages)
if item_type == "report":
return name in self.allowed_reports
if item_type == "help":
return True
if item_type == "dashboard":
return True
return False
def build_workspace(self):
self.cards = {
'label': _(self.doc.cards_label),
'items': self.get_links()
}
self.charts = {
'label': _(self.doc.charts_label),
'items': self.get_charts()
}
self.shortcuts = {
'label': _(self.doc.shortcuts_label),
'items': self.get_shortcuts()
}
if self.onboarding_doc:
self.onboarding = {
'label': _(self.onboarding_doc.title),
'subtitle': _(self.onboarding_doc.subtitle),
'success': _(self.onboarding_doc.success_message),
'docs_url': self.onboarding_doc.documentation_url,
'items': self.get_onboarding_steps()
}
def _doctype_contains_a_record(self, name):
exists = self.table_counts.get(name, False)
if not exists and frappe.db.exists(name):
if not frappe.db.get_value('DocType', name, 'issingle'):
exists = bool(frappe.db.get_all(name, limit=1))
else:
exists = True
self.table_counts[name] = exists
return exists
def _prepare_item(self, item):
if item.dependencies:
dependencies = [dep.strip() for dep in item.dependencies.split(",")]
incomplete_dependencies = [d for d in dependencies if not self._doctype_contains_a_record(d)]
if len(incomplete_dependencies):
item.incomplete_dependencies = incomplete_dependencies
else:
item.incomplete_dependencies = ""
if item.onboard:
# Mark Spotlights for initial
if item.get("type") == "doctype":
name = item.get("name")
count = self._doctype_contains_a_record(name)
item["count"] = count
# Translate label
item["label"] = _(item.label) if item.label else _(item.name)
return item
@handle_not_exist
def get_links(self):
cards = self.doc.get_link_groups()
if not self.doc.hide_custom:
cards = cards + get_custom_reports_and_doctypes(self.doc.module)
if len(self.extended_links):
cards = merge_cards_based_on_label(cards + self.extended_links)
default_country = frappe.db.get_default("country")
new_data = []
for card in cards:
new_items = []
card = _dict(card)
links = card.get('links', [])
for item in links:
item = _dict(item)
# Condition: based on country
if item.country and item.country != default_country:
continue
# Check if user is allowed to view
if self.is_item_allowed(item.link_to, item.link_type):
prepared_item = self._prepare_item(item)
new_items.append(prepared_item)
if new_items:
if isinstance(card, _dict):
new_card = card.copy()
else:
new_card = card.as_dict().copy()
new_card["links"] = new_items
new_card["label"] = _(new_card["label"])
new_data.append(new_card)
return new_data
@handle_not_exist
def get_charts(self):
all_charts = []
if frappe.has_permission("Dashboard Chart", throw=False):
charts = self.doc.charts
if len(self.extended_charts):
charts = charts + self.extended_charts
for chart in charts:
if frappe.has_permission('Dashboard Chart', doc=chart.chart_name):
# Translate label
chart.label = _(chart.label) if chart.label else _(chart.chart_name)
all_charts.append(chart)
return all_charts
@handle_not_exist
def get_shortcuts(self):
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
items = []
shortcuts = self.doc.shortcuts
if len(self.extended_shortcuts):
shortcuts = shortcuts + self.extended_shortcuts
for item in shortcuts:
new_item = item.as_dict().copy()
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
if item.type == "Report":
report = self.allowed_reports.get(item.link_to, {})
if report.get("report_type") in ["Query Report", "Script Report", "Custom Report"]:
new_item['is_query_report'] = 1
else:
new_item['ref_doctype'] = report.get('ref_doctype')
# Translate label
new_item["label"] = _(item.label) if item.label else _(item.link_to)
items.append(new_item)
return items
@handle_not_exist
def get_onboarding_steps(self):
steps = []
for doc in self.onboarding_doc.get_steps():
step = doc.as_dict().copy()
step.label = _(doc.title)
if step.action == "Create Entry":
step.is_submittable = frappe.db.get_value("DocType", step.reference_document, 'is_submittable', cache=True)
steps.append(step)
return steps
@frappe.whitelist()
@frappe.read_only()
def get_desktop_page(page):
"""Applies permissions, customizations and returns the configruration for a page
on desk.
Args:
page (string): page name
Returns:
dict: dictionary of cards, charts and shortcuts to be displayed on website
"""
try:
wspace = Workspace(page)
wspace.build_workspace()
return {
'charts': wspace.charts,
'shortcuts': wspace.shortcuts,
'cards': wspace.cards,
'onboarding': wspace.onboarding,
'allow_customization': not wspace.doc.disable_user_customization
}
except DoesNotExistError:
return {}
@frappe.whitelist()
def get_desk_sidebar_items():
"""Get list of sidebar items for desk"""
# don't get domain restricted pages
blocked_modules = frappe.get_doc('User', frappe.session.user).get_blocked_modules()
filters = {
'restrict_to_domain': ['in', frappe.get_active_domains()],
'extends_another_page': 0,
'for_user': '',
'module': ['not in', blocked_modules]
}
if not frappe.local.conf.developer_mode:
filters['developer_mode_only'] = '0'
# pages sorted based on pinned to top and then by name
order_by = "pin_to_top desc, pin_to_bottom asc, name asc"
all_pages = frappe.get_all("Workspace", fields=["name", "category", "icon", "module"],
filters=filters, order_by=order_by, ignore_permissions=True)
pages = []
# Filter Page based on Permission
for page in all_pages:
try:
wspace = Workspace(page.get('name'), True)
if wspace.is_page_allowed():
pages.append(page)
page['label'] = _(page.get('name'))
except frappe.PermissionError:
pass
return pages
def get_table_with_counts():
counts = frappe.cache().get_value("information_schema:counts")
if not counts:
counts = build_table_count_cache()
return counts
def get_custom_reports_and_doctypes(module):
return [
_dict({
"label": _("Custom Documents"),
"links": get_custom_doctype_list(module)
}),
_dict({
"label": _("Custom Reports"),
"links": get_custom_report_list(module)
}),
]
def get_custom_doctype_list(module):
doctypes = frappe.get_all("DocType", fields=["name"], filters={"custom": 1, "istable": 0, "module": module}, order_by="name")
out = []
for d in doctypes:
out.append({
"type": "Link",
"link_type": "doctype",
"link_to": d.name,
"label": _(d.name)
})
return out
def get_custom_report_list(module):
"""Returns list on new style reports for modules."""
reports = frappe.get_all("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": "No", "disabled": 0, "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "Link",
"link_type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report", "Custom Report") else 0,
"label": _(r.name),
"link_to": r.name,
})
return out
def get_custom_workspace_for_user(page):
"""Get custom page from workspace if exists or create one
Args:
page (stirng): Page name
Returns:
Object: Document object
"""
filters = {
'extends': page,
'for_user': frappe.session.user
}
pages = frappe.get_list("Workspace", filters=filters)
if pages:
return frappe.get_doc("Workspace", pages[0])
doc = frappe.new_doc("Workspace")
doc.extends = page
doc.for_user = frappe.session.user
return doc
@frappe.whitelist()
def save_customization(page, config):
"""Save customizations as a separate doctype in Workspace per user
Args:
page (string): Name of the page to be edited
config (dict): Dictionary config of al widgets
Returns:
Boolean: Customization saving status
"""
original_page = frappe.get_doc("Workspace", page)
page_doc = get_custom_workspace_for_user(page)
# Update field values
page_doc.update({
"icon": original_page.icon,
"charts_label": original_page.charts_label,
"cards_label": original_page.cards_label,
"shortcuts_label": original_page.shortcuts_label,
"module": original_page.module,
"onboarding": original_page.onboarding,
"developer_mode_only": original_page.developer_mode_only,
"category": original_page.category
})
config = _dict(loads(config))
if config.charts:
page_doc.charts = prepare_widget(config.charts, "Workspace Chart", "charts")
if config.shortcuts:
page_doc.shortcuts = prepare_widget(config.shortcuts, "Workspace Shortcut", "shortcuts")
if config.cards:
page_doc.build_links_table_from_cards(config.cards)
# Set label
page_doc.label = page + '-' + frappe.session.user
try:
if page_doc.is_new():
page_doc.insert(ignore_permissions=True)
else:
page_doc.save(ignore_permissions=True)
except (ValidationError, TypeError) as e:
# Create a json string to log
json_config = dumps(config, sort_keys=True, indent=4)
# Error log body
log = \
"""
page: {0}
config: {1}
exception: {2}
""".format(page, json_config, e)
frappe.log_error(log, _("Could not save customization"))
return False
return True
def prepare_widget(config, doctype, parentfield):
"""Create widget child table entries with parent details
Args:
config (dict): Dictionary containing widget config
doctype (string): Doctype name of the child table
parentfield (string): Parent field for the child table
Returns:
TYPE: List of Document objects
"""
if not config:
return []
order = config.get('order')
widgets = config.get('widgets')
prepare_widget_list = []
for idx, name in enumerate(order):
wid_config = widgets[name].copy()
# Some cleanup
wid_config.pop("name", None)
# New Doc
doc = frappe.new_doc(doctype)
doc.update(wid_config)
# Manually Set IDX
doc.idx = idx + 1
# Set Parent Field
doc.parentfield = parentfield
prepare_widget_list.append(doc)
return prepare_widget_list
@frappe.whitelist()
def update_onboarding_step(name, field, value):
"""Update status of onboaridng step
Args:
name (string): Name of the doc
field (string): field to be updated
value: Value to be updated
"""
frappe.db.set_value("Onboarding Step", name, field, value)
@frappe.whitelist()
def reset_customization(page):
"""Reset workspace customizations for a user
Args:
page (string): Name of the page to be reset
"""
page_doc = get_custom_workspace_for_user(page)
page_doc.delete()
def merge_cards_based_on_label(cards):
"""Merge cards with common label."""
cards_dict = {}
for card in cards:
label = card.get('label')
if label in cards_dict:
links = cards_dict[label].links + card.links
cards_dict[label].update(dict(links=links))
cards_dict[label] = cards_dict.pop(label)
else:
cards_dict[label] = card
return list(cards_dict.values())
|
the-stack_0_4655 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession.builder().getOrCreate()
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
warnings.warn("Arrow will not be used in createDataFrame: %s" % str(e))
# Fallback to create DataFrame without arrow if raise some exception
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
the-stack_0_4656 | # This file contains globals used across the dottizer program
# ==============================================================================
# User Specified Params
# ==============================================================================
# drill_sizes = [] # Sets available drill bits when creating images
drill_sizes = [
0.0,
1.0/8.0,
9.0/64.0,
5.0/32.0,
3.0/16.0,
1.0/4.0,
5.0/16.0,
3.0/8.0,
1.0/2.0,
5.0/8.0,
3.0/4.0,
7.0/8.0,
1.0
]
# Function for generating drill_sizes global var
def generate_global_drill_sizes():
print('called generate_global_drill_sizes()')
global drill_sizes
drill_sizes = []
for size in optional_drill_sizes:
if size[1]:
drill_sizes.append(size[0])
print(drill_sizes)
# drill_sizes = np.asarray(temp_drill_sizes)
optional_drill_sizes = [
[0.0, True],
[1.0/8.0, True],
[9.0/64.0, True],
[5.0/32.0, True],
[3.0/16.0, True],
[1.0/4.0, True],
[5.0/16.0, True],
[3.0/8.0, True],
[1.0/2.0, True], # Largest regular drill bit
[5.0/8.0, False],
[3.0/4.0, False],
[7.0/8.0, False],
[1.0, False]
]
generate_global_drill_sizes()
dist_between_holes = 0.4 # Sets the distance between holes (edge to edge in inches)
border_x = 2.0 # Sets the border width (in inches)
border_y = 2.0 # Sets the border height (in inches)
input_image = "input.png" # Input image for dottizing
dots_wide = 50 # How many drill holes wide the image should be (dotsTall will be calculated from this)
pixels_per_inch = 40 # Sets scaling of rendered image(s)
pixels_per_inch_render = 80
# ==============================================================================
# Other Params (not exposed to user yet)
# ==============================================================================
font_size = 12 # Size of global font
# Image colors
font_color = (255, 255, 255, 255) # Color of global font
grid_color = (128, 128, 128, 128) # Color of background dot grid
inset_border_color = (0, 0, 255, 255) # Color of edge border line
# Params for making series of images
# series_base_width = dotsWide # dotWidth for initial image
# series_increment = 10 # How many dot increments to make images for
# ==============================================================================
# Internal Dottizer Stuff
# ==============================================================================
# workDir = 'dottizerFiles/' # Directory for temporary dottizer files
out_dir = 'out/' # Directory for output images
# Size of source image view
src_view_x = 300
src_view_y = 225
# Size of output image view
out_view_x = 1200/2
out_view_y = 900/2
# out_view_x = 1200
# out_view_y = 900
|
the-stack_0_4658 | import numpy as np
import pandas as pd
def clean_Data(data, fill=0, with_pages = False):
'''
Clean the input data, including fill nan with 0 and removing page column
Args:
data: input data for clean
type: ndarray, shape: (series, time_step)
fill: the number to fill
type: INT
with_pages: check whether it has page column
type: Boolean
Return:
data: clean data
type: ndarray, shape: (series, time_step)
'''
data = pd.DataFrame(data)
data = data.fillna(0).values
if with_pages:
data = data[:,1:]
return data
def normalise_transform(data):
'''
This is for normalising the input data.
Args:
data: input data to transform
type: ndarray, shape: (series, time_step)
Return:
transformed_Data: transformed data
type: ndarray, shape: (series, time_step)
'''
transformed_Data = np.log1p(data*0.5).astype('float32')
return transformed_Data
def normalise_reverse(data):
'''
This is for rescaling the scaled data.
Args:
data: input data to rescale
type: ndarray, shape: (series, time_step)
Return:
reversed_data: rescaled data
type: ndarray, shape: (series, time_step)
'''
reversed_data = np.expm1(data)/0.5
return reversed_data
def split_data(train, test, pred_days=60):
'''
This is for spliting raw data into train_X, train_y, test_X, and test_y
Args:
train: raw train data
type: ndarray, shape: (series, time_step)
test: raw test data
type: ndarray, shape: (series, time_step)
pred_day: number of days to forecast
type: INT
Return:
train_X: data of training input
type: ndarray, shape: (series, time_step)
train_y: data of training output
type: ndarray, shape: (series, pred_days)
test_X: data of testing input
type: ndarray, shape: (series, time_step)
test_y: raw data for validating testing (ground truth)
type: ndarray, shape: (series, pred_days)
'''
series_numb, total_length = train.shape
print(series_numb, total_length)
train_X = train[:, :(total_length - pred_days)]
train_y = train[:, -(pred_days):]
test_X = train[:, pred_days:total_length]
test_y = test[:, total_length:(total_length+pred_days)]
print(test_X.shape, test_y.shape)
series, train_step = train_X.shape
series, pred_step = train_y.shape
train_X = train_X.reshape(series,1,train_step)
train_y = train_y.reshape(series,1,pred_step)
test_X = test_X.reshape(series,1,train_step)
test_y = test_y.reshape(series,1,pred_step)
return train_X, train_y, test_X, test_y
|
the-stack_0_4659 | """Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
import imageio
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def shuffle_examples(image_paths, labels):
shuffle_list = list(zip(image_paths, labels))
random.shuffle(shuffle_list)
image_paths_shuff, labels_shuff = zip(*shuffle_list)
return image_paths_shuff, labels_shuff
def random_rotate_image(image):
angle = np.random.uniform(low=-10.0, high=10.0)
return misc.imrotate(image, angle, 'bicubic')
# 1: Random rotate 2: Random crop 4: Random flip 8: Fixed image standardization 16: Flip
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder):
images_and_labels_list = []
for _ in range(nrof_preprocess_threads):
filenames, label, control = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.io.read_file(filename)
image = tf.image.decode_image(file_contents, 3)
image = tf.cond(get_control_flag(control[0], RANDOM_ROTATE),
lambda:tf.compat.v1.py_func(random_rotate_image, [image], tf.uint8),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], RANDOM_CROP),
lambda:tf.compat.v1.random_crop(image, image_size + (3,)),
lambda:tf.image.resize_with_crop_or_pad(image, image_size[0], image_size[1]))
image = tf.cond(get_control_flag(control[0], RANDOM_FLIP),
lambda:tf.image.random_flip_left_right(image),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], FIXED_STANDARDIZATION),
lambda:(tf.cast(image, tf.float32) - 127.5)/128.0,
lambda:tf.image.per_image_standardization(image))
image = tf.cond(get_control_flag(control[0], FLIP),
lambda:tf.image.flip_left_right(image),
lambda:tf.identity(image))
#pylint: disable=no-member
image.set_shape(image_size + (3,))
images.append(image)
images_and_labels_list.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels_list, batch_size=batch_size_placeholder,
shapes=[image_size + (3,), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * 100,
allow_smaller_final_batch=True)
return image_batch, label_batch
def get_control_flag(control, field):
return tf.equal(tf.compat.v1.mod(tf.compat.v1.floor_div(control, field), 2), 1)
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
grads = opt.compute_gradients(total_loss, update_gradient_vars)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1]>image_size:
sz1 = int(image.shape[1]//2)
sz2 = int(image_size//2)
if random_crop:
diff = sz1-sz2
(h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
else:
(h, v) = (0,0)
image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
data_type = type(image_paths[0])
for i in range(nrof_samples):
if data_type is str:
img = imageio.imread(image_paths[i])
else:
img = image_paths[i]
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i,:,:,:] = img
return images
def get_label_batch(label_data, batch_size, batch_index):
nrof_examples = np.size(label_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = label_data[j:j+batch_size]
else:
x1 = label_data[j:nrof_examples]
x2 = label_data[0:nrof_examples-j]
batch = np.vstack([x1,x2])
batch_int = batch.astype(np.int64)
return batch_int
def get_batch(image_data, batch_size, batch_index):
nrof_examples = np.size(image_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = image_data[j:j+batch_size,:,:,:]
else:
x1 = image_data[j:nrof_examples,:,:,:]
x2 = image_data[0:nrof_examples-j,:,:,:]
batch = np.vstack([x1,x2])
batch_float = batch.astype(np.float32)
return batch_float
def get_triplet_batch(triplets, batch_index, batch_size):
ax, px, nx = triplets
a = get_batch(ax, int(batch_size/3), batch_index)
p = get_batch(px, int(batch_size/3), batch_index)
n = get_batch(nx, int(batch_size/3), batch_index)
batch = np.vstack([a, p, n])
return batch
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
if par[1]=='-':
lr = -1
else:
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def split_dataset(dataset, split_ratio, min_nrof_images_per_class, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*(1-split_ratio)))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
nrof_images_in_class = len(paths)
split = int(math.floor(nrof_images_in_class*(1-split_ratio)))
if split==nrof_images_in_class:
split = nrof_images_in_class-1
if split>=min_nrof_images_per_class and nrof_images_in_class-split>=1:
train_set.append(ImageClass(cls.name, paths[:split]))
test_set.append(ImageClass(cls.name, paths[split:]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def store_revision_info(src_path, output_dir, arg_string):
try:
# Get git hash
cmd = ['git', 'rev-parse', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_hash = stdout.strip()
except OSError as e:
git_hash = ' '.join(cmd) + ': ' + e.strerror
try:
# Get local changes
cmd = ['git', 'diff', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_diff = stdout.strip()
except OSError as e:
git_diff = ' '.join(cmd) + ': ' + e.strerror
# Store a text file in the log directory
rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
with open(rev_info_filename, "w") as text_file:
text_file.write('arguments: %s\n--------------------\n' % arg_string)
text_file.write('tensorflow version: %s\n--------------------\n' % tf.__version__) # @UndefinedVariable
text_file.write('git hash: %s\n--------------------\n' % git_hash)
text_file.write('%s' % git_diff)
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
|
the-stack_0_4662 | from board import Board
from player import HumanPlayer, RandomPlayer, MinimaxPlayer
class Game:
__active_player__ = None
__inactive_player__ = None
__player_1__ = None
__player_2__ = None
__move_symbol__ = {}
def __init__(self, player_1, player_2, size):
self.__player_1__ = player_1
self.__player_2__ = player_2
self.__board__ = Board(size)
self.__active_player__ = player_1
self.__inactive_player__ = player_2
def switch_turn(self):
temp = self.__inactive_player__
self.__inactive_player__ = self.__active_player__
self.__active_player__ = temp
def play_game(self):
game_over = False
# self.__board__.board = [["O", "X", " "],
# ["X", " ", " "],
# [" ", " ", " "]]
# self.__board__.possible_moves = [(0,2), (1, 2), (2,0), (2,2)]
while not game_over:
print("======== {} turn! ========".format(self.__active_player__.get_name()))
move = self.__active_player__.move(self.__board__)
game_over, msg, player = self.__board__.apply_move(self.__active_player__, move)
if (game_over):
ending_msg = "><><><><><>< {} ><><><><><><".format(msg)
print("*" * len(ending_msg))
print(ending_msg)
print("*" * len(ending_msg))
else:
self.switch_turn()
player1 = HumanPlayer("O")
player2 = MinimaxPlayer("X")
board_size = 3
game = Game(player1, player2, board_size)
game.play_game() |
the-stack_0_4663 | #!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2019-2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import random
mails = (
"mail.ru",
"inbox.ru",
"list.ru",
"bk.ru",
"ya.ru",
"yandex.com",
"yandex.ua",
"yandex.ru",
"gmail.com"
)
# Get random service
def random_service(list):
return random.choice(list)
# Create random name
def random_name():
with open("tools/SMS/names.json", 'r') as names:
names = json.load(names)["names"]
return random.choice(names)
# Create random suffix for email
# %random_name%SUFFIX@%random_email%
def random_suffix(int_range = 4):
numbers = []
for _ in range(int_range):
numbers.append(str(random.randint(1, 9)))
return "".join(numbers)
# Create random email by name, suffix, mail
# Example: [email protected]
def random_email():
return random_name() + random_suffix() + "@" + random.choice(mails)
# Create random password
# %random_name%%random_suffix%
def random_password():
return random_name() + random_suffix(int_range = 10)
# Get random user agent
def random_useragent():
with open("tools/SMS/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
|
the-stack_0_4664 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train the DNC on a repeated copy task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers.python.layers import initializers
from dnc import dnc
import numpy as np
import cv2
from scipy import ndimage as nd
from PIL import Image
import os, sys
import time
from utility import alrc
experiment_number = 103
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("hidden_size", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 64, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_integer("clip_value", 0, "Maximum absolute value of controller and dnc outputs.")
tf.flags.DEFINE_bool("use_batch_norm", True, "Use batch normalization in generator.")
tf.flags.DEFINE_string("model", "LSTM", "LSTM or DNC.")
tf.flags.DEFINE_integer("projection_size", 0, "Size of projection layer. Zero for no projection.")
tf.flags.DEFINE_bool("is_input_embedder", False, "Embed inputs before they are input.")
# Optimizer parameters.
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("replay_size", 25000, "Maximum examples in ring buffer.")
tf.flags.DEFINE_integer("avg_replays", 4, "Mean frequency each experience is used.")
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10, "Epsilon used for RMSProp optimizer.")
tf.flags.DEFINE_float("L2_norm", 1.e-4, "Decay rate for L2 regularization. 0 for no regularization.")
# Task parameters
tf.flags.DEFINE_integer("img_side", 96, "Number of image pixels for square image")
tf.flags.DEFINE_integer("num_steps", 20, "Number of image pixels for square image")
tf.flags.DEFINE_integer("step_size", 20, "Distance STEM probe moves at each step (in px).")
tf.flags.DEFINE_integer("num_actions", 2, "Number of parameters to describe actions.")
tf.flags.DEFINE_integer("shuffle_size", 2000, "Size of moving buffer to sample data from.")
tf.flags.DEFINE_integer("prefetch_size", 10, "Number of batches to prepare in advance.")
# Training options.
tf.flags.DEFINE_float("actor_lr", 0.001, "Actor learning rate.")
tf.flags.DEFINE_float("critic_lr", 0.001, "Critic learning rate.")
tf.flags.DEFINE_float("generator_lr", 0.003, "Generator learning rate.")
tf.flags.DEFINE_float("gamma", 0.97, "Reward/loss decay.")
tf.flags.DEFINE_bool("is_advantage_actor_critic", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_bool("is_cyclic_generator_learning_rate", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_integer("supervision_iters", 100_000, "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_start", 1., "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_end", 0., "Starting value for supeversion.")
if FLAGS.supervision_iters:
#Flag will not be used
tf.flags.DEFINE_float("supervision", 0.5, "Weighting for known discounted future reward.")
else:
#Flag will be used
tf.flags.DEFINE_float("supervision", 0.0, "Weighting for known discounted future reward.")
tf.flags.DEFINE_bool("is_target_actor", True and FLAGS.supervision != 1, "True to use target actor.")
tf.flags.DEFINE_bool("is_target_critic", True and FLAGS.supervision != 1, "True to use target critic.")
tf.flags.DEFINE_bool("is_target_generator", False, "True to use target generator.")
tf.flags.DEFINE_integer("update_frequency", 0, "Frequency of hard target network updates. Zero for soft updates.")
tf.flags.DEFINE_float("target_decay", 0.9997, "Decay rate for target network soft updates.")
tf.flags.DEFINE_bool("is_generator_batch_norm_tracked", False, "True to track generator batch normalization.")
tf.flags.DEFINE_bool("is_positive_qs", True, "Whether to clip qs to be positive.")
tf.flags.DEFINE_bool("is_infilled", True, "True to use infilling rather than generator.")
tf.flags.DEFINE_bool("is_prev_position_input", True, "True to input previous positions.")
tf.flags.DEFINE_bool("is_ornstein_uhlenbeck", True, "True for O-U exploration noise.")
tf.flags.DEFINE_bool("is_noise_decay", True, "Decay noise if true.")
tf.flags.DEFINE_float("ou_theta", 0.1, "Drift back to mean.")
tf.flags.DEFINE_float("ou_sigma", 0.2, "Size of random process.")
tf.flags.DEFINE_bool("is_rel_to_truth", False, "True to normalize losses using expected losses.")
tf.flags.DEFINE_bool("is_clipped_reward", True, "True to clip rewards.")
tf.flags.DEFINE_bool("is_clipped_critic", False, "True to clip critic predictions for actor training.")
tf.flags.DEFINE_float("over_edge_penalty", 0.05, "Penalty for action going over edge of image.")
tf.flags.DEFINE_bool("is_prioritized_replay", False, "True to prioritize the replay of difficult experiences.")
tf.flags.DEFINE_bool("is_biased_prioritized_replay", False, "Priority sampling without bias correction.")
tf.flags.DEFINE_bool("is_relative_to_spirals", False, "True to compare generator losses against losses for spirals.")
tf.flags.DEFINE_bool("is_self_competition", False, "Oh it is on. True to compete against past versions of itself.")
tf.flags.DEFINE_float("norm_generator_losses_decay", 0.999, "Divide generator losses by their running mean. Zero for no normalization.")
tf.flags.DEFINE_integer("start_iter", 0, "Starting iteration")
tf.flags.DEFINE_integer("train_iters", 500_000, "Training iterations")
tf.flags.DEFINE_integer("val_examples", 20_000, "Number of validation examples")
tf.flags.DEFINE_string("model_dir",
f"//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/recurrent_conv-1/{experiment_number}/",
"Working directory.")
tf.flags.DEFINE_string("data_file",
"//Desktop-sa1evjv/h/small_scans/96x96.npy",
"Datafile containing 19769 96x96 downsampled STEM crops.")
tf.flags.DEFINE_integer("report_freq", 10, "How often to print losses to the console.")
os.chdir(FLAGS.model_dir)
sys.path.insert(0, FLAGS.model_dir)
def norm_img(img, min=None, max=None, get_min_and_max=False):
if min == None:
min = np.min(img)
if max == None:
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
if get_min_and_max:
return img.astype(np.float32), (min, max)
else:
return img.astype(np.float32)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def disp(img):
#if len(img.shape) == 3:
# img = np.sum(img, axis=2)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
def run_model(input_sequence, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
clip_value = FLAGS.clip_value
dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)
initial_state = dnc_core.initial_state(FLAGS.batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=dnc_core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
return output_sequence
class RingBuffer(object):
def __init__(
self,
action_shape,
observation_shape,
full_scan_shape,
batch_size,
buffer_size=1000,
num_past_losses=None,
):
self.buffer_size = buffer_size
self.actions = np.zeros([buffer_size]+list(action_shape)[1:])
self.observations = np.zeros([buffer_size]+list(observation_shape)[1:])
self.full_scans = np.zeros([buffer_size]+list(full_scan_shape)[1:])
self.position = 0
self._batch_size = batch_size
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
self.priorities = np.zeros([buffer_size])
self.indices = np.arange(buffer_size)
if FLAGS.is_self_competition:
self.past_losses = np.zeros([num_past_losses])
self.labels = np.zeros([buffer_size], np.int32)
def add(self, actions, observations, full_scans, labels=None):
i0 = self.position % self.buffer_size
num_before_cycle = min(self.buffer_size-i0, self._batch_size)
self.actions[i0:i0+num_before_cycle] = actions[:num_before_cycle]
self.observations[i0:i0+num_before_cycle] = observations[:num_before_cycle]
self.full_scans[i0:i0+num_before_cycle] = full_scans[:num_before_cycle]
num_remaining = self._batch_size - num_before_cycle
if num_remaining > 0:
self.actions[0:num_remaining] = actions[num_before_cycle:]
self.observations[:num_remaining] = observations[num_before_cycle:]
self.full_scans[:num_remaining] = full_scans[num_before_cycle:]
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
if self.position:
mean_priority = np.sum(self.priorities) / min(self.position, self.buffer_size)
else:
mean_priority = 0.3
self.priorities[i0:i0+num_before_cycle] = mean_priority*np.ones([num_before_cycle])
if num_before_cycle < self._batch_size:
self.priorities[0:num_remaining] = mean_priority*np.ones([self._batch_size - num_before_cycle])
if FLAGS.is_self_competition:
self.labels[i0:i0+num_before_cycle] = labels[:num_before_cycle]
if num_remaining > 0:
self.labels[0:num_remaining] = labels[num_before_cycle:]
self.position += self._batch_size
def get(self):
limit = min(self.position, self.buffer_size)
if FLAGS.is_prioritized_replay:
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
) #alpha=1
beta = 0.5 + 0.5*(FLAGS.train_iters - self.position)/FLAGS.train_iters
sampled_priority_weights = self.priorities[sample_idxs]**( -beta )
sampled_priority_weights /= np.max(sampled_priority_weights)
elif FLAGS.is_biased_prioritized_replay:
alpha = (FLAGS.train_iters - self.position)/FLAGS.train_iters
priorities = self.priorities**alpha
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
)
else:
sample_idxs = np.random.randint(0, limit, size=self._batch_size)
sampled_actions = np.stack([self.actions[i] for i in sample_idxs])
sampled_observations = np.stack([self.observations[i] for i in sample_idxs])
sampled_full_scans = np.stack([self.full_scans[i] for i in sample_idxs])
if FLAGS.is_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs, sampled_priority_weights
elif FLAGS.is_biased_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs
elif FLAGS.is_self_competition:
sampled_labels = np.stack([self.labels[i] for i in sample_idxs])
sampled_past_losses = np.stack([self.past_losses[i] for i in sampled_labels])
return sampled_actions, sampled_observations, sampled_full_scans, sampled_labels, sampled_past_losses
else:
return sampled_actions, sampled_observations, sampled_full_scans
def update_priorities(self, idxs, priorities):
"""For prioritized experience replay"""
self.priorities[idxs] = priorities
def update_past_losses(self, idxs, losses):
self.past_losses[idxs] = losses
class Agent(snt.AbstractModule):
def __init__(
self,
num_outputs,
name,
is_new=False,
noise_decay=None,
is_double_critic=False,
sampled_full_scans=None,
val_full_scans=None
):
super(Agent, self).__init__(name=name)
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
"projection_size": FLAGS.projection_size or None,
}
clip_value = FLAGS.clip_value
with self._enter_variable_scope():
components = dnc.Components(access_config, controller_config, num_outputs)
self._dnc_core = dnc.DNC(components, num_outputs, clip_value, is_new=False, is_double_critic=is_double_critic)
if is_new:
self._dnc_core_new = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
noise_decay=noise_decay,
sampled_full_scans=sampled_full_scans,
is_noise=True
)
if not val_full_scans is None:
self._dnc_core_val = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
sampled_full_scans=val_full_scans
)
self._initial_state = self._dnc_core.initial_state(FLAGS.batch_size)
#self._action_embedder = snt.Linear(output_size=64)
#self._observation_embedder = snt.Linear(output_size=64)
def _build(self, observations, actions):
#Tiling here is a hack to make inputs the same size
num_tiles = 2 // (actions.get_shape().as_list()[-1] // FLAGS.num_actions)
tiled_actions = tf.tile(actions, [1, 1, num_tiles])
input_sequence = tf.concat([observations, tiled_actions], axis=-1)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core,
inputs=input_sequence,
time_major=False,
initial_state=self._initial_state
)
return output_sequence
def get_new_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_new,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def get_val_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_val,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def spectral_norm(w, iteration=1, in_place_updates=False):
"""Spectral normalization. It imposes Lipschitz continuity by constraining the
spectral norm (maximum singular value) of weight matrices.
Inputs:
w: Weight matrix to spectrally normalize.
iteration: Number of times to apply the power iteration method to
enforce spectral norm.
Returns:
Weight matrix with spectral normalization control dependencies.
"""
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(auto_name("u"),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
if in_place_updates:
#In-place control dependencies bottlenect training
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
else:
#Execute control dependency in parallel with other update ops
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u.assign(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_conv(
inputs,
num_outputs,
stride=1,
kernel_size=3,
padding='VALID',
biases_initializer=tf.zeros_initializer()
):
"""Convolutional layer with spectrally normalized weights."""
w = tf.get_variable(auto_name("kernel"), shape=[kernel_size, kernel_size, inputs.get_shape()[-1], num_outputs])
x = tf.nn.conv2d(input=inputs, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=padding)
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
def conv(
inputs,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
data_format="NHWC",
actv_fn=tf.nn.relu,
is_batch_norm=True,
is_spectral_norm=False,
is_depthwise_sep=False,
extra_batch_norm=False,
biases_initializer=tf.zeros_initializer,
weights_initializer=initializers.xavier_initializer,
transpose=False,
is_training=True
):
"""Convenience function for a strided convolutional or transpositional
convolutional layer.
Intro: https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1.
The order is: Activation (Optional) -> Batch Normalization (optional) -> Convolutions.
Inputs:
inputs: Tensor of shape `[batch_size, height, width, channels]` to apply
convolutions to.
num_outputs: Number of feature channels to output.
kernel_size: Side lenth of square convolutional kernels.
stride: Distance between convolutional kernel applications.
padding: 'SAME' for zero padding where kernels go over the edge.
'VALID' to discard features where kernels go over the edge.
activ_fn: non-linearity to apply after summing convolutions.
is_batch_norm: If True, add batch normalization after activation.
is_spectral_norm: If True, spectrally normalize weights.
is_depthwise_sep: If True, depthwise separate convolutions into depthwise
spatial convolutions, then 1x1 pointwise convolutions.
extra_batch_norm: If True and convolutions are depthwise separable, implement
batch normalization between depthwise and pointwise convolutions.
biases_initializer: Function to initialize biases with. None for no biases.
weights_initializer: Function to initialize weights with. None for no weights.
transpose: If True, apply convolutional layer transpositionally to the
described convolutional layer.
is_training: If True, use training specific operations e.g. batch normalization
update ops.
Returns:
Output of convolutional layer.
"""
x = inputs
num_spatial_dims = len(x.get_shape().as_list()) - 2
if biases_initializer == None:
biases_initializer = lambda: None
if weights_initializer == None:
weights_initializer = lambda: None
if not is_spectral_norm:
#Convolutional layer without spectral normalization
if transpose:
stride0 = 1
if type(stride) == list or is_depthwise_sep or stride % 1:
#Apparently there is no implementation of transpositional
#depthwise separable convolutions, so bilinearly upsample then
#depthwise separably convolute
if kernel_size != 1:
x = tf.image.resize_bilinear(
images=x,
size=stride if type(stride) == list else \
[int(stride*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
stride0 = stride
stride = 1
if type(stride0) == list and not is_depthwise_sep:
layer = tf.contrib.layers.conv2d
elif is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d_transpose
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
if type(stride0) != list:
if (is_depthwise_sep or stride0 % 1) and kernel_size == 1:
x = tf.image.resize_bilinear(
images=x,
size=[int(stride0*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
else:
if num_spatial_dims == 1:
layer = tf.contrib.layers.conv1d
elif num_spatial_dims == 2:
if is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
else:
#Weights are spectrally normalized
x = spectral_norm_conv(
inputs=x,
num_outputs=num_outputs,
stride=stride,
kernel_size=kernel_size,
padding=padding,
biases_initializer=biases_initializer())
if actv_fn:
x = actv_fn(x)
if is_batch_norm and FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def residual_block(inputs, skip=3, is_training=True):
"""Residual block whre the input is added to the signal after skipping some
layers. This architecture is good for learning purturbative transformations.
If no layer is provided, it defaults to a convolutional layer.
Deep residual learning: https://arxiv.org/abs/1512.03385.
Inputs:
inputs: Tensor to apply residual block to. Outputs of every layer will
have the same shape.
skip: Number of layers to skip before adding input to layer output.
layer: Layer to apply in residual block. Defaults to convolutional
layer. Custom layers must support `inputs`, `num_outputs` and `is_training`
arguments.
Returns:
Final output of residual block.
"""
x = x0 = inputs
def layer(inputs, num_outputs, is_training, is_batch_norm, actv_fn):
x = conv(
inputs=inputs,
num_outputs=num_outputs,
is_training=is_training,
actv_fn=actv_fn
)
return x
for i in range(skip):
x = layer(
inputs=x,
num_outputs=x.get_shape()[-1],
is_training=is_training,
is_batch_norm=i < skip - 1,
actv_fn=tf.nn.relu
)
x += x0
if FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
class Generator(snt.AbstractModule):
def __init__(self,
name,
is_training
):
super(Generator, self).__init__(name=name)
self._is_training = is_training
def _build(self, inputs):
x = inputs
std_actv = tf.nn.relu#lambda x: tf.nn.leaky_relu(x, alpha=0.1)
is_training = self._is_training
is_depthwise_sep = False
base_size = 32
#x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x = conv(
x,
num_outputs=32,
is_training=is_training,
actv_fn=std_actv
)
#Encoder
for i in range(1, 3):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
actv_fn=std_actv
)
if i == 2:
low_level = x
#Residual blocks
for _ in range(5): #Number of blocks
x = residual_block(
x,
skip=3,
is_training=is_training
)
#Decoder
for i in range(1, -1, -1):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
transpose=True,
actv_fn=std_actv
)
x = conv(
x,
num_outputs=base_size,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training
)
#Project features onto output image
x = conv(
x,
num_outputs=1,
biases_initializer=None,
actv_fn=None,
is_batch_norm=False,
is_training=is_training
)
return x
def construct_partial_scans(actions, observations):
"""
actions: [batch_size, num_steps, 2]
observations: [batch_size, num_steps, 10]
"""
#Last action unused and the first action is always the same
actions = np.concatenate((np.ones([FLAGS.batch_size, 1, 2]), actions[:,:-1,:]), axis=1)
starts = 0.5*FLAGS.img_side + FLAGS.step_size*(np.cumsum(actions, axis=1) - actions)
#starts = np.zeros(actions.shape)
#starts[:,0,:] = actions[:,0,:]
#for i in range(1, FLAGS.num_steps):
# starts[:,i,:] = actions[:,i,:] + starts[:,i-1,:]
#starts -= actions
#starts *= FLAGS.step_size
#starts += 0.5*FLAGS.img_side
positions = np.stack([starts + i*actions for i in range(FLAGS.step_size)], axis=-2)
x = np.minimum(np.maximum(positions, 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for k in range(FLAGS.num_steps):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[j,k,i,0]), int(x[j,k,i,1])] )
indices = np.array(indices)
indices = tuple([indices[:,i] for i in range(3)])
partial_scans = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
masks = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
partial_scans[indices] = observations.reshape([-1])
masks[indices] = 1
partial_scans /= np.maximum(masks, 1)
masks = np.minimum(masks, 1)
partial_scans = np.stack([partial_scans, masks], axis=-1)
return partial_scans
def target_update_ops(target_network, network, decay=FLAGS.target_decay, l2_norm=False):
t_vars = target_network.variables
v_vars = network.variables
update_ops = []
for t, v in zip(t_vars, v_vars):
if FLAGS.is_generator_batch_norm_tracked or not "BatchNorm" in t.name: #Don't track batch normalization
if l2_norm:
v_new = (1-FLAGS.L2_norm)*v
op = v.assign(v_new)
update_ops.append(op)
op = t.assign(decay*t + (1-decay)*v_new)
update_ops.append(op)
else:
op = t.assign(decay*t + (1-decay)*v)
update_ops.append(op)
print(t.name.replace("target_", "") == v.name, t.name.replace("target_", ""), v.name)
return update_ops
def load_data(shape):
data_ph = tf.placeholder(tf.float32, shape=list(shape))
ds = tf.data.Dataset.from_tensor_slices(tuple([data_ph]))
if FLAGS.is_self_competition:
labels = tf.data.Dataset.range(0, list(shape)[0])
ds = tf.data.Dataset.zip((ds, labels))
ds = ds.shuffle(buffer_size=FLAGS.shuffle_size)
ds = ds.repeat()
ds = ds.batch(FLAGS.batch_size)
ds = ds.prefetch(FLAGS.prefetch_size)
iterator = ds.make_initializable_iterator()
return data_ph, iterator
@tf.custom_gradient
def overwrite_grads(x, y):
print("OG", x, y)
def grad(dy):
return y, None
return x, grad
def infill(data, mask):
return data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
#def infill(data, mask):
# x = np.zeros(data.shape)
# c = (cv2.GaussianBlur(mask.astype(np.float32), (7, 7), 3.5, None, 3.5) > 0).astype(np.float32)
# truth = data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
# x = (truth*c).astype(np.float32)
# return x
def fill(input):
return np.expand_dims(np.stack([infill(img, mask) for img, mask in zip(input[:,:,:,0], input[:,:,:,1])]), -1)
def flip_rotate(img, choice):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
if choice == 0:
return img
elif choice == 1:
return np.rot90(img, 1)
elif choice == 2:
return np.rot90(img, 2)
elif choice == 3:
return np.rot90(img, 3)
elif choice == 4:
return np.flip(img, 0)
elif choice == 5:
return np.flip(img, 1)
elif choice == 6:
return np.flip(np.rot90(img, 1), 0)
else:
return np.flip(np.rot90(img, 1), 1)
def draw_spiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
A spiral
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
return img
def main(unused_argv):
"""Trains the DNC and periodically reports the loss."""
graph = tf.get_default_graph()
action_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.num_actions]
observation_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size]
full_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 1]
partial_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 2]
images = np.load(FLAGS.data_file)
images[np.logical_not(np.isfinite(images))] = 0
images = np.stack([norm_img(x) for x in images])
train_images = images[:int(0.8*len(images))]
val_images = images[int(0.8*len(images)):]
train_data_ph, train_iterator = load_data(train_images.shape)
val_data_ph, val_iterator = load_data(val_images.shape)
if FLAGS.is_self_competition:
(full_scans, labels) = train_iterator.get_next()
(val_full_scans, val_labels) = val_iterator.get_next()
full_scans = full_scans[0]
val_full_scans = val_full_scans[0]
else:
(full_scans, ) = train_iterator.get_next()
(val_full_scans, ) = val_iterator.get_next()
if hasattr(tf, 'ensure_shape'):
full_scans = tf.ensure_shape(full_scans, full_scan_shape)
val_full_scans = tf.ensure_shape(val_full_scans, full_scan_shape)
else:
full_scans = tf.reshape(full_scans, full_scan_shape)
val_full_scans = tf.reshape(full_scans, full_scan_shape)
replay = RingBuffer(
action_shape=action_shape,
observation_shape=observation_shape,
full_scan_shape=full_scan_shape,
batch_size=FLAGS.batch_size,
buffer_size=FLAGS.replay_size,
num_past_losses=train_images.shape[0],
)
replay_actions_ph = tf.placeholder(tf.float32, shape=action_shape, name="replay_action")
replay_observations_ph = tf.placeholder(tf.float32, shape=observation_shape, name="replay_observation")
replay_full_scans_ph = tf.placeholder(tf.float32, shape=full_scan_shape, name="replay_full_scan")
partial_scans_ph = tf.placeholder(tf.float32, shape=partial_scan_shape, name="replay_partial_scan")
is_training_ph = tf.placeholder(tf.bool, name="is_training")
if FLAGS.is_noise_decay:
noise_decay_ph = tf.placeholder(tf.float32, shape=(), name="noise_decay")
else:
noise_decay_ph = None
if FLAGS.supervision_iters:
supervision_ph = tf.placeholder(tf.float32, name="supervision")
else:
supervision_ph = FLAGS.supervision
if FLAGS.is_prioritized_replay:
priority_weights_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="priority_weights")
if FLAGS.is_self_competition:
past_losses_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="past_losses")
batch_size = FLAGS.batch_size
if FLAGS.is_relative_to_spirals:
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
spiral = draw_spiral(coverage=coverage, side=FLAGS.img_side)
ys = [1/i**2 for i in range(9, 2, -1)]
xs = [np.sum(draw_spiral(coverage=c, side=FLAGS.img_side)) / FLAGS.img_side**2 for c in ys]
ub_idx = next(i for i, x in xs if x > coverage)
lb = xs[ub_idx-1]
ub = xs[ub_idx]
input_coverage = ( (coverage - lb)*X + (ub - coverage)*Y ) / (lb - ub)
actor = Agent(
num_outputs=FLAGS.num_actions,
is_new=True,
noise_decay=noise_decay_ph,
sampled_full_scans=full_scans,
val_full_scans=val_full_scans,
name="actor"
)
target_actor = Agent(num_outputs=FLAGS.num_actions, name="target_actor")
critic = Agent(num_outputs=1, is_double_critic=True, name="critic")
target_critic = Agent(num_outputs=1, is_double_critic=True, name="target_critic")
new_observations, new_actions = actor.get_new_experience()
#Last actions are unused
replay_observations = replay_observations_ph[:,:-1,:]
replay_actions = replay_actions_ph[:,:-1,:]
#First action must be added for actors (not critics)
start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
started_replay_actions = tf.concat([start_actions, replay_actions[:,:-1,:]], axis=1)
actions = actor(replay_observations, started_replay_actions)
if FLAGS.is_target_actor:
target_actions = target_actor(replay_observations, started_replay_actions)
elif FLAGS.supervision != 1:
target_actions = tf.stop_gradient(actions)
#The last action is never used, and the first action is diagonally north-east
#Shifting because network expect actions from previous steps to be inputted
#start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
#actions = tf.concat([start_actions, actions[:, :-1, :]], axis=1)
#target_actions = tf.concat([start_actions, target_actions[:, :-1, :]], axis=1)
actor_actions = tf.concat([replay_actions, actions], axis=-1)
qs = critic(replay_observations, actor_actions)
critic_qs = qs[:,:,:1]
actor_qs = qs[:,:,1:]
if FLAGS.is_target_critic:
target_actor_actions = tf.concat([replay_actions, target_actions], axis=-1)
target_actor_qs = target_critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
elif FLAGS.supervision != 1:
target_actor_qs = actor_qs#critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
if not FLAGS.is_infilled:
generator = Generator(name="generator", is_training=is_training_ph)
generation = generator(partial_scans_ph)
else:
generation = tf.py_func(fill, [partial_scans_ph], tf.float32)
if hasattr(tf, 'ensure_shape'):
generation = tf.ensure_shape(generation, full_scan_shape)
else:
generation = tf.reshape(generation, full_scan_shape)
generator_losses = 10*tf.reduce_mean( (generation - replay_full_scans_ph)**2, axis=[1,2,3] )
if FLAGS.is_target_generator and not FLAGS.is_infilled:
target_generator = Generator(name="target_generator", is_training=is_training_ph)
target_generation = target_generator(partial_scans_ph)
target_generator_losses = 10*tf.reduce_mean( (target_generation - replay_full_scans_ph)**2, axis=[1,2,3] )
losses = target_generator_losses #For RL
else:
losses = generator_losses #For RL
val_observations, val_actions = actor.get_val_experience()
unclipped_losses = losses
if FLAGS.is_positive_qs and (FLAGS.is_target_critic or FLAGS.supervision != 1):
target_actor_qs = tf.nn.relu(target_actor_qs)
if FLAGS.norm_generator_losses_decay:
mu = tf.get_variable(name="loss_mean", initializer=tf.constant(1., dtype=tf.float32))
mu_op = mu.assign(FLAGS.norm_generator_losses_decay*mu+(1-FLAGS.norm_generator_losses_decay)*tf.reduce_mean(losses))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mu_op)
losses /= tf.stop_gradient(mu)
if FLAGS.is_clipped_reward:
losses = alrc(losses)
if FLAGS.is_self_competition:
self_competition_losses = tf.where(
past_losses_ph > unclipped_losses,
tf.ones([FLAGS.batch_size]),
tf.zeros([FLAGS.batch_size])
)
losses += self_competition_losses
if FLAGS.over_edge_penalty:
positions = (
0.5 + #middle of image
FLAGS.step_size/(np.sqrt(2)*FLAGS.img_side) + #First step
(FLAGS.step_size/FLAGS.img_side)*tf.cumsum(replay_actions_ph[:,:-1,:], axis=1) # Actions
)
#new_positions = (
# positions - replay_actions_ph[:,:-1,:] + #Go back one action
# (FLAGS.step_size/FLAGS.img_side)*actions #New actions
# )
is_over_edge = tf.logical_or(tf.greater(positions, 1), tf.less(positions, 0))
is_over_edge = tf.logical_or(is_over_edge[:,:,0], is_over_edge[:,:,1])
over_edge_losses = tf.where(
is_over_edge,
FLAGS.over_edge_penalty*tf.ones(is_over_edge.get_shape()),
tf.zeros(is_over_edge.get_shape())
)
over_edge_losses = tf.cumsum(over_edge_losses, axis=1)
if FLAGS.supervision > 0 or FLAGS.is_advantage_actor_critic:
supervised_losses = []
for i in reversed(range(FLAGS.num_steps-1)):
if i == FLAGS.num_steps-1 - 1: #Extra -1 as idxs start from 0
step_loss = tf.expand_dims(losses, axis=-1)
else:
step_loss = FLAGS.gamma*step_loss
if FLAGS.over_edge_penalty:
step_loss += over_edge_losses[:,i:i+1]
supervised_losses.append(step_loss)
supervised_losses = tf.concat(supervised_losses, axis=-1)
if FLAGS.supervision < 1:
bellman_losses = tf.concat(
[FLAGS.gamma*target_actor_qs[:,1:,0], tf.expand_dims(losses, axis=-1)],
axis=-1
)
if FLAGS.over_edge_penalty:
bellman_losses += over_edge_losses
bellman_losses = supervision_ph * supervised_losses + (1 - supervision_ph) * bellman_losses
else:
bellman_losses = supervised_losses
if FLAGS.is_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
critic_losses = tf.reduce_mean( priority_weights_ph*unweighted_critic_losses )
else:
critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2 )
if FLAGS.is_biased_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
if FLAGS.is_clipped_critic:
actor_qs = alrc(actor_qs)
if FLAGS.is_advantage_actor_critic:
actor_losses = tf.reduce_mean( supervised_losses - actor_qs[:,:,0] )
else:
actor_losses = tf.reduce_mean( actor_qs )
#critic_losses /= FLAGS.num_steps
#actor_losses /= FLAGS.num_steps
#Outputs to provide feedback for the developer
info = {
"actor_losses": actor_losses,
"critic_losses": critic_losses,
"generator_losses": tf.reduce_mean(generator_losses)
}
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
info.update( {"priority_weights": unweighted_critic_losses} )
if FLAGS.is_self_competition:
info.update( {"unclipped_losses": unclipped_losses} )
outputs = {
"generation": generation[0,:,:,0],
"truth": replay_full_scans_ph[0,:,:,0],
"input": partial_scans_ph[0,:,:,0]
}
history_op = {
"actions": new_actions,
"observations": new_observations,
"full_scans": full_scans
}
if FLAGS.is_self_competition:
history_op.update( {"labels": labels} )
##Modify actor gradients
#[actor_grads] = tf.gradients(actor_losses, replay_actions_ph)
#actor_losses = overwrite_grads(actions, actor_grads)
start_iter = FLAGS.start_iter
train_iters = FLAGS.train_iters
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #Only use required GPU memory
#config.gpu_options.force_gpu_compatible = True
model_dir = FLAGS.model_dir
log_filepath = model_dir + "log.txt"
save_period = 1; save_period *= 3600
log_file = open(log_filepath, "a")
with tf.Session(config=config) as sess:
if FLAGS.is_target_actor:
if FLAGS.update_frequency <= 1:
update_target_critic_op = target_update_ops(target_actor, actor)
else:
update_target_critic_op = []
initial_update_target_critic_op = target_update_ops(target_actor, actor, decay=0)
else:
update_target_critic_op = []
initial_update_target_critic_op = []
if FLAGS.is_target_critic:
if FLAGS.update_frequency <= 1:
update_target_actor_op = target_update_ops(target_critic, critic)
else:
update_target_actor_op = []
initial_update_target_actor_op = target_update_ops(target_critic, critic, decay=0)
else:
update_target_actor_op = []
initial_update_target_actor_op = []
if FLAGS.is_target_generator and not FLAGS.is_infilled:
if FLAGS.update_frequency <= 1:
update_target_generator_op = target_update_ops(target_generator, generator, l2_norm=FLAGS.L2_norm)
else:
update_target_generator_op = []
initial_update_target_generator_op = target_update_ops(target_generator, generator, decay=0)
else:
update_target_generator_op = []
initial_update_target_generator_op = []
initial_update_target_network_ops = (
initial_update_target_actor_op +
initial_update_target_critic_op +
initial_update_target_generator_op
)
actor_lr = FLAGS.actor_lr
critic_lr = FLAGS.critic_lr
if FLAGS.is_cyclic_generator_learning_rate:
generator_lr = tf.placeholder(tf.float32, name="generator_lr")
else:
generator_lr = FLAGS.generator_lr
#critic_rep = (critic_qs[:,:,0] - bellman_losses)**2
#ps = [critic_qs[0,:,0], target_actor_qs[0,:,0], bellman_losses[0], critic_rep[0]]
#ps = [critic.trainable_variables[0], target_critic.trainable_variables[0]]
ps = []
#p = bellman_losses[0]
#p = generation[0,:,:,0]
train_op_dependencies = [tf.print(p) for p in ps] + tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if not FLAGS.update_frequency:
update_target_network_ops = (
update_target_actor_op +
update_target_critic_op +
update_target_generator_op
)
train_op_dependencies += update_target_network_ops
train_ops = []
with tf.control_dependencies(train_op_dependencies):
actor_train_op = tf.train.AdamOptimizer(learning_rate=actor_lr).minimize(
loss=actor_losses, var_list=actor.trainable_variables)
critic_train_op = tf.train.AdamOptimizer(learning_rate=critic_lr).minimize(
loss=critic_losses, var_list=critic.trainable_variables)
train_ops += [actor_train_op, critic_train_op]
if not FLAGS.is_infilled:
generator_train_op = tf.train.AdamOptimizer(learning_rate=generator_lr).minimize(
loss=generator_losses, var_list=generator.trainable_variables)
train_ops.append(generator_train_op)
else:
generator_train_op = tf.no_op()
feed_dict = {}
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
saver = tf.train.Saver(max_to_keep=1)
noteable_saver = tf.train.Saver(max_to_keep=2)
if start_iter:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
else:
if len(initial_update_target_network_ops):
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
sess.run(train_iterator.initializer, feed_dict={train_data_ph: train_images})
sess.run(val_iterator.initializer, feed_dict={val_data_ph: val_images})
time0 = time.time()
for iter in range(start_iter, train_iters):
if iter < FLAGS.replay_size or not iter % FLAGS.avg_replays:
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
if FLAGS.is_noise_decay:
noise_decay = np.float32( (train_iters - iter)/train_iters )
feed_dict.update( {noise_decay_ph: noise_decay} )
history = sess.run(
history_op,
feed_dict=feed_dict)
replay.add(**history)
#Sample experiences from the replay
if FLAGS.is_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs, sampled_priority_weights = replay.get()
elif FLAGS.is_biased_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs = replay.get()
elif FLAGS.is_self_competition:
sampled_actions, sampled_observations, replay_sampled_full_scans, sampled_labels, sampled_past_losses = replay.get()
else:
sampled_actions, sampled_observations, replay_sampled_full_scans = replay.get()
replay_partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
if not FLAGS.is_infilled:
sampled_full_scans = []
partial_scans = []
spiral_scans = []
for sampled_full_scan, partial_scan in zip(replay_sampled_full_scans, replay_partial_scans):
c = np.random.randint(0, 8)
sampled_full_scans.append( flip_rotate(sampled_full_scan, c) )
partial_scans.append( flip_rotate(partial_scan, c) )
if FLAGS.is_relative_to_spirals:
spiral_scan = spiral * sampled_full_scan
spiral_scans.append( flip_rotate(spiral_scan, c) )
sampled_full_scans = np.stack( sampled_full_scans )
partial_scans = np.stack( partial_scans )
else:
sampled_full_scans = replay_sampled_full_scans
partial_scans = replay_partial_scans
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(True)
}
if FLAGS.is_prioritized_replay:
feed_dict.update({priority_weights_ph: sampled_priority_weights})
if FLAGS.supervision_iters:
supervision = FLAGS.supervision_start + min(iter, FLAGS.supervision_iters)*(FLAGS.supervision_end-FLAGS.supervision_start) / FLAGS.supervision_iters
feed_dict.update( {supervision_ph: supervision } )
if FLAGS.is_self_competition:
feed_dict.update( {past_losses_ph: sampled_past_losses} )
if FLAGS.is_cyclic_generator_learning_rate:
envelope = 0.75**(iter/(train_iters//5))
cycle_half = train_iters//10
cycle_full = 2*cycle_half
cyclic_sawtooth = (max(iter%cycle_full, cycle_half) - min(iter%cycle_full - cycle_half, 0))/cycle_half
cyclic_lr = evenvelope*(0.1 + 0.9*cyclic_sawtooth)
feed_dict.update( {generator_lr: np.float32(cyclic_lr)} )
#Train
if iter in [0, 100, 500] or not iter % 25_000 or (0 <= iter < 10_000 and not iter % 1000) or iter == start_iter:
_, step_info, step_outputs = sess.run([train_ops, info, outputs], feed_dict=feed_dict)
for k in step_outputs:
save_loc = FLAGS.model_dir + k + str(iter)+".tif"
Image.fromarray( (0.5*step_outputs[k]+0.5).astype(np.float32) ).save( save_loc )
else:
_, step_info = sess.run([train_ops, info], feed_dict=feed_dict)
if FLAGS.update_frequency and not iter % FLAGS.update_frequency:
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
if FLAGS.is_prioritized_replay:
replay.update_priorities(sample_idxs, step_info["priority_weights"])
if FLAGS.is_self_competition:
replay.update_past_losses(sampled_labels, step_info["unclipped_losses"])
output = f"Iter: {iter}"
for k in step_info:
if k not in ["priority_weights", "unclipped_losses"]:
output += f", {k}: {step_info[k]}"
if not iter % FLAGS.report_freq:
print(output)
#if "nan" in output:
# saver.restore(
# sess,
# tf.train.latest_checkpoint(model_dir+"model/")
# )
try:
log_file.write(output)
except:
while True:
print("Issue writing log.")
time.sleep(1)
log_file = open(log_filepath, "a")
try:
log_file.write(output)
break
except:
continue
if iter in [train_iters//2-1, train_iters-1]:
noteable_saver.save(sess, save_path=model_dir+"noteable_ckpt/model", global_step=iter)
time0 = time.time()
start_iter = iter
elif time.time() >= time0 + save_period:
saver.save(sess, save_path=model_dir+"model/model", global_step=iter)
time0 = time.time()
val_losses_list = []
for iter in range(0, FLAGS.val_examples//FLAGS.batch_size):
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
sampled_actions, sampled_observations, sampled_full_scans = sess.run(
[val_actions, val_observations, val_full_scans],
feed_dict=feed_dict
)
partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(False)
}
val_losses = sess.run( unclipped_losses, feed_dict=feed_dict )
val_losses_list.append( val_losses )
val_losses = np.concatenate(tuple(val_losses_list), axis=0)
np.save(model_dir + "val_losses.npy", val_losses)
if __name__ == "__main__":
tf.app.run() |
the-stack_0_4665 | """
Reaction Result
===============
"""
class ReactionResult:
"""
The result of a reaction.
"""
__slots__ = [
'_new_atoms',
'_new_bonds',
'_deleted_atoms',
'_deleted_bonds',
]
def __init__(
self,
new_atoms,
new_bonds,
deleted_atoms,
deleted_bonds,
):
"""
Initialize a :class:`.ReactionResult` instance.
Parameters
----------
new_atoms : :class:`tuple` of :class:`.NewAtom`
The new atoms added by the reaction.
new_bonds : :class:`tuple` of :class:`.Bond`
The bonds added by the reaction.
deleted_atoms : :class:`tuple` of :class:`.Atom`
The atoms deleted by the reaction.
deleted_bonds : :class:`tuple` of :class:`.Bond`
The bonds deleted by the reaction.
"""
self._new_atoms = new_atoms
self._new_bonds = new_bonds
self._deleted_atoms = deleted_atoms
self._deleted_bonds = deleted_bonds
def get_new_atoms(self):
"""
Get the new atoms added by the reaction.
Returns
-------
:class:`tuple` of :class:`.NewAtom`
The new atoms added by the reaction.
"""
return self._new_atoms
def get_new_bonds(self):
"""
Get the new bonds added by the reaction.
Returns
-------
:class:`tuple` of :class:`.Bond`
The new bonds added by the reaction.
"""
return self._new_bonds
def get_deleted_atoms(self):
"""
Get the atoms deleted by the reaction.
Returns
-------
:class:`tuple` of :class:`.Atom`
The atoms deleted by the reaction.
"""
return self._deleted_atoms
def get_deleted_bonds(self):
"""
Get the bonds deleted by the reaction.
Returns
-------
:class:`tuple` of :class:`.Bond`
The bonds deleted by the reaction.
"""
return self._deleted_bonds
|
the-stack_0_4666 | import unittest
from time import sleep
from blockchain import Actor, Chain, Config
from blockchain.exceptions import ValidationError
class MyActorTesterOnTestNet(unittest.TestCase):
def setUp(self) -> None:
Config.test_net = True
Config.test_net_wallet_initial_coins = 100
Config.new_block_interval = 1
def test_create_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = actor1.create_transaction(recipient=actor2.address, amount=10)
blockchain.add_transaction(tx.to_dict())
def test_forge_empty_block(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
block = actor1.forge_block()
blockchain.add_block(block.to_dict())
def test_forge_block_with_transaction(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = actor1.create_transaction(recipient=actor2.address, amount=10)
blockchain.add_transaction(tx.to_dict())
block = forger.forge_block()
blockchain.add_block(block.to_dict())
def test_transaction_to_self(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor1.address, amount=10)
blockchain.add_transaction(tx.to_dict())
def test_negative_amount_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=-10)
blockchain.add_transaction(tx.to_dict())
def test_negative_fee_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=10, fee=-1)
blockchain.add_transaction(tx.to_dict())
def test_zero_fee_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=10, fee=0)
blockchain.add_transaction(tx.to_dict())
def test_chain_tx_counter(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
sender = Actor(secret_key="super_secret1!", blockchain=blockchain)
recipient = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = sender.create_transaction(recipient=recipient.address, amount=10)
blockchain.add_transaction(tx.to_dict())
tx_counter_before = sender.chain_tx_counter
block = forger.forge_block()
blockchain.add_block(block.to_dict())
sleep(2) # wait for block creation
tx_counter_after = sender.chain_tx_counter
self.assertGreater(tx_counter_after, tx_counter_before)
def test_actor_balance(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
transaction_amount = 10
transaction_fee = 1
before_transaction_actor2_balance = actor2.balance
before_transaction_actor1_balance = actor1.balance
before_transaction_forger_balance = forger.balance
tx = actor1.create_transaction(
recipient=actor2.address, amount=transaction_amount, fee=transaction_fee
)
blockchain.add_transaction(tx.to_dict())
block = forger.forge_block()
blockchain.add_block(block.to_dict())
sleep(
2
) # Wait until the block is added, we set the block adding interval to 1 in the setUp method
after_transaction_actor2_balance = actor2.balance
after_transaction_actor1_balance = actor1.balance
after_transaction_forger_balance = forger.balance
self.assertEqual(
before_transaction_actor1_balance - transaction_amount - transaction_fee,
after_transaction_actor1_balance,
)
self.assertEqual(
before_transaction_actor2_balance + transaction_amount,
after_transaction_actor2_balance,
)
self.assertEqual(
before_transaction_forger_balance + transaction_fee,
after_transaction_forger_balance,
)
|
the-stack_0_4667 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import search
def test_es(monkeypatch):
search_obj = pretend.stub()
index_obj = pretend.stub(
doc_type=pretend.call_recorder(lambda d: None),
search=pretend.call_recorder(lambda: search_obj),
settings=pretend.call_recorder(lambda **kw: None),
)
index_cls = pretend.call_recorder(lambda name, using: index_obj)
monkeypatch.setattr(search, "Index", index_cls)
doc_types = [pretend.stub(), pretend.stub()]
client = pretend.stub()
request = pretend.stub(
registry={
"elasticsearch.client": client,
"elasticsearch.index": "warehouse",
"search.doc_types": doc_types,
},
)
es = search.es(request)
assert es is search_obj
assert index_cls.calls == [pretend.call("warehouse", using=client)]
assert index_obj.doc_type.calls == [pretend.call(d) for d in doc_types]
assert index_obj.settings.calls == [
pretend.call(
number_of_shards=1,
number_of_replicas=0,
refresh_interval="1s",
)
]
assert index_obj.search.calls == [pretend.call()]
|
the-stack_0_4669 | import pickle
import sys
import requests
from getpass import getpass
from bs4 import BeautifulSoup
import itertools
import os
import sqlite3
import hashlib
from pathlib import Path
fullpath = sys.argv[0]
pathspc = os.path.dirname(fullpath)
data = dict()
dirpath = sys.argv[1] # Gives the absolutepath of directory
obsdirpathlist = dirpath.split('/')[:-1]
obsdirpath = str()
for i in obsdirpathlist:
obsdirpath = obsdirpath + i + '/' # this is observing dir full path it includes slash at the end and beginning
observing_dir = dirpath.split("/")[-1] # this is observing directory eg.outlab30
try:
print("Reading User information ...", end=" ")
f = open(pathspc+'/spc_user_data', 'rb')
data = pickle.load(f)
print("done")
except IOError:
print(red+"Authentication credentials not found"+end_c)
u = input("Enter Username:")
p = getpass("Enter Password:")
data['username'] = u
data['password'] = p
url = input("Enter Server URL:")
if url[len(url)-1] != '/':
url = url + '/'
data['url'] = url
save = input('Would you like to save the configuration? [y/n]:')
if save == 'y' or save == 'Y':
f = open(pathspc+'/spc_user_data','wb')
pickle.dump(data,f)
print("User credentials updated")
f.close()
base_url = data['url']
url = base_url + 'login/'
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def listsmd5unequal(ldict,sdict): # dicts of filenames and md5s
unequalmd5s=[]
for i in ldict:
if ldict[i] != sdict[i] :
unequalmd5s.append(i)
return unequalmd5s
def ifstr1startswithstr2(str1,str2):
if(str1.split('/')[0]==str2):
return True
else:
return False
def falselist(n):
l = []
for i in range(0,n):
l.append("False")
return l
def truelist(n):
l = []
for i in range(0,n):
l.append("True")
return l
insync = str()
empty = []
red = '\033[91m'
green = '\033[92m'
end_c = '\033[0m'
files=dict()
var3 = str()
list777 = []
list77 = []
boollist=[]
for path, subdirs, files in os.walk(dirpath):
for filename in files:
f2 = os.path.join(path, filename)
list777.append(f2)
for i in list777:
serpath = i.replace(obsdirpath,"")
list77.append(serpath)
#this list consists of filepaths same as that of in server
list7 = []
data = dict()
try:
print("Reading User information ...", end=" ")
f = open(pathspc+'/spc_user_data', 'rb')
data = pickle.load(f)
print("done")
except IOError:
print("Authentication credentials not found")
u = input("Enter Username:")
p = getpass("Enter Password:")
data['username'] = u
data['password'] = p
url = input("Enter Server URL:")
if url[len(url)-1] != '/':
url = url + '/'
data['url'] = url
save = input('Would you like to save the configuration? [y/n]:')
if save == 'y' or save == 'Y':
f = open(pathspc+'/spc_user_data','wb')
pickle.dump(data,f)
print("User credentials updated")
f.close()
base_url = data['url']
url = base_url+'login/'
client = requests.session()
try:
print("connecting to server ...", end=" ")
client.get(url)
print("done")
except requests.ConnectionError as e:
print("The following error occured connecting to the server: {}\n Please try again".format(e))
client.close()
sys.exit()
try:
csrf = client.cookies['csrftoken']
except():
print("Error obtaining csrf token")
client.close()
sys.exit()
payload = dict(username=data['username'], password=data['password'], csrfmiddlewaretoken=csrf, next='/upload_file/')
try:
print("Sending request ...")
r = client.post(url, data=payload, headers=dict(Referer=url))
r.raise_for_status()
if r.status_code == 200:
print("Request sent ...")
if r.url == url:
print("User authentication failed. Please try again")
client.close()
sys.exit()
print("Reading files ...")
r1 = client.get(base_url)
soup = BeautifulSoup(r1.text, 'html.parser')
productDivs = soup.findAll(attrs = {"id" : "filepath"})
productDivs2 = soup.findAll('a', attrs = {"id" : "filename"})
productDivs3 = soup.findAll(attrs = {"id" : "md5sum"})
productDivs5 = soup.findAll(attrs = {"id" : "deletefile"})
productDivs6 = soup.findAll('a', attrs = {"id" : "startsync"})
productDivs7 = soup.findAll('a', attrs = {"id" : "stopsync"})
md5list={}
for link,l in zip(productDivs5,productDivs3):
pathinserver = link.string
if(ifstr1startswithstr2(pathinserver,observing_dir)==True):
list7.append(pathinserver)
md5list[pathinserver] = l.string.split()[1] #dict of filenames and md5sums of all files in server
insync = productDivs6[0].string
if insync=="True":
print(red+"Sync is going on in some other device.Please wait for sometime."+end_c)
sys.exit()
elif insync=="False":
try:
var = base_url+productDivs6[0]['href']
client.get(var,allow_redirects=True)
except() as e:
print("Error connecting: {}".format(e))
except() as e:
print("Error connecting: {}".format(e))
def download(listoffiles):
for lit in listoffiles:
for link,li in zip(productDivs2,productDivs):
if (li.string.split()[2]+link.string == lit):
var = base_url[:-1] + link['href']
try:
r2 = client.get(var, allow_redirects=True)
filep = str()
filen = obsdirpath+lit #absolute file path in local
l = filen.split('/')[1:-1]
for i in l:
filep = filep + '/' + i
path = Path(filep)
path.mkdir(parents=True, exist_ok=True)
f1 = open(filen, 'wb')
f1.write(r2.content)
f1.close()
except() as e:
print("Error connecting: {}".format(e))
def upload(listoffiles):
for lit in listoffiles:
l = lit.split('/')
l.pop()
fil = str()
for i in l:
fil = fil+i+"/"
filepath = fil
files = {'document': open(obsdirpath+lit, 'rb')}
try:
r2 = client.post(base_url+'upload_file/', data={'filepath': filepath, 'csrfmiddlewaretoken': r.cookies['csrftoken']}, files=files)
if r2.url == base_url:
print("File upload successful")
else:
print("An error occured")
except() as e:
print("error posting file: {}".format(e))
try:
serverset = set(list7)
localset = set(list77)
localmd5list = {}
inser = list(serverset - localset)
inloc = list(localset - serverset)
inboth = list(localset.intersection(serverset))
for i in inboth:
localmd5list[i] = md5sum(i)
inboth_difmd = listsmd5unequal(localmd5list,md5list)
print()
if (serverset == localset) and len(inboth_difmd) == 0:
print(green+"The directory is same in both client and server"+end_c)
else:
print("The observed directory have some differences from the files in cloud")
print(green+"Files in cloud not in the local directory :"+end_c)
print(inser)
print()
print(green+"Files in the local directory that are not in the cloud :"+end_c)
print(inloc)
print()
print("Choose one of the below options: ")
print("1. Change the directory of local as that of in cloud")
print("2. Change the files in cloud same as that of in local")
print("3. Merge both the local and files in cloud")
print("4. Don't change anything")
x = input()
if(int(x)==1):
for lit in inloc:
os.remove(obsdirpath+lit)
download(inser+inboth_difmd)
print(green+"Done"+end_c)
elif(int(x)==2):
for lit in inser:
for link in productDivs5:
if(link.string == lit):
var = base_url[:-1]+link['href']
try:
r2 = client.get(var, allow_redirects=True)
print("Deleting ...", end=" ")
print(green+"done"+end_c)
except() as e:
print(red+"Error connecting: {}".format(e)+end_c)
upload(inloc+inboth_difmd)
print(green+"Done"+end_c)
elif(int(x)==3):
download(inser)
upload(inloc)
if len(inboth_difmd) != 0:
print("There are some files in the server different from that of local files with same name.Please let us know what to do by choosing one of the three options:")
print(green+"Files present in both cloud and local directory but with different file content :"+end_c)
print(inboth_difmd)
print()
print("1.Download all the files from server")
print("2.Upload all the local fles to server")
print("3.Chose manually what to do with each file")
inp = input()
if inp == '1':
download(inboth_difmd)
elif inp == '2':
upload(inboth_difmd)
elif inp == '3':
for lit in inboth_difmd:
print("1.Download the file from server")
print("2.Upload the local file to server")
inp == input()
if inp == '1':
for link,li in zip(productDivs2,productDivs):
if (li.string.split()[2]+link.string == lit):
var = base_url[:-1] + link['href']
try:
r2 = client.get(var, allow_redirects=True)
filep = str()
filen = obsdirpath+lit #absolute file path in local
l = filen.split('/')[1:-1]
for i in l:
filep = filep + '/' + i
path = Path(filep)
path.mkdir(parents=True, exist_ok=True)
f1 = open(filen, 'wb')
f1.write(r2.content)
f1.close()
except() as e:
print("Error connecting: {}".format(e))
elif inp == '2':
l = lit.split('/')
l.pop()
for i in l:
fil = fil+i+"/"
filepath = fil
files = {'document': open(obsdirpath+lit, 'rb')}
try:
r2 = client.post(base_url+'upload_file/', data={'filepath': filepath, 'csrfmiddlewaretoken': r.cookies['csrftoken']}, files=files)
if r2.url == base_url:
print("File upload successful")
else:
print("An error occured")
except() as e:
print("error posting file: {}".format(e))
print("Done")
else:
print("Nothing is changed")
try:
var = base_url+productDivs7[0]['href']
client.get(var,allow_redirects=True)
except() as e:
print("Error connecting: {}".format(e))
except requests.exceptions.HTTPError as e:
print('HTTP Error: {}'.format(e))
except requests.exceptions.RequestException as e:
print('Connection Error: {}'.format(e))
client.close()
sys.exit()
client.close() |
the-stack_0_4671 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLuigi(PythonPackage):
"""Workflow mgmgt + task scheduling + dependency resolution"""
homepage = "https://github.com/spotify/luigi"
pypi = "luigi/luigi-2.8.3.tar.gz"
version('2.8.3', sha256='8b5c84a3c3f4df07309056d3b98348b93c054f1931b7ee22fc29e7989f645c9e')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:4.99', type=('build', 'run'))
depends_on('py-python-daemon@:2.1', type=('build', 'run'))
depends_on('[email protected]', when='@2.8.3:', type=('build', 'run'))
|
the-stack_0_4674 | '''
validate survey simulations using CMX data.
updates
-------
* 5/19/2020: created script and test to compare which wavelength range I should
use for the exposure time correction factor
'''
import os
import h5py
import fitsio
import numpy as np
import astropy.units as u
# -- feasibgs --
from feasibgs import util as UT
from feasibgs import catalogs as Cat
from feasibgs import forwardmodel as FM
# -- desihub --
import desispec.io
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if 'NERSC_HOST' not in os.environ:
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir = '/global/cscratch1/sd/chahah/feasibgs/cmx/survey_sims/'
def validate_spectral_pipeline():
''' validate the spectral pipeline by
1. constructing spectra from fiber acceptance fraction scaled smoothed CMX
spectra with CMX sky surface brightness
2. compare noise levels to CMX observations
'''
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 70502
date = 20200225
expid = 52113
ispec = 0
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# get sky surface brightness by correcting for the throughput on the CMX
# sky data
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# exposure time
_frame = desispec.io.read_frame(f_sky('b').replace('sky-', 'frame-'))
exptime = _frame.meta['EXPTIME']
print('exp.time = %.fs' % exptime)
# get which are good fibers from coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print('fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print('airmass = %.2f' % airmass)
# select BGS spectra
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.random.choice(np.arange(len(gal_cut))[gal_cut], size=5,
replace=False)
igals = np.arange(len(coadd['FIBER']))[coadd['FIBER'] == 143]
for igal in igals:
# source flux is the smoothed CMX spetra
source_flux = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igal,:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
np.atleast_2d(source_flux * fibloss), # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=None)
# barebone specsim pipeline for comparison
from specsim.simulator import Simulator
desi = Simulator(config, num_fibers=1)
desi.observation.exposure_time = exptime * u.s
desi.atmosphere._surface_brightness_dict[desi.atmosphere.condition] = \
np.interp(desi.atmosphere._wavelength, wave, sky_bright) * \
desi.atmosphere.surface_brightness.unit
desi.atmosphere._extinct_emission = False
desi.atmosphere._moon = None
desi.atmosphere.airmass = airmass # high airmass
desi.simulate(source_fluxes=np.atleast_2d(source_flux) * 1e-17 * desi.simulated['source_flux'].unit,
fiber_acceptance_fraction=np.tile(fibloss,
np.atleast_2d(source_flux).shape))
random_state = np.random.RandomState(0)
desi.generate_random_noise(random_state, use_poisson=True)
scale=1e17
waves, fluxes, ivars, ivars_electron = [], [], [], []
for table in desi.camera_output:
_wave = table['wavelength'].astype(float)
_flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
_flux = _flux * scale
_ivar = table['flux_inverse_variance'].T.astype(float)
_ivar = _ivar / scale**2
waves.append(_wave)
fluxes.append(_flux[0])
ivars.append(_ivar[0])
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(coadd_wave, coadd_flux[igal,:] * fibloss, c='C0', lw=1,
label='(coadd flux) x (fib.loss)')
for i_b, band in enumerate(['b', 'r', 'z']):
lbl = None
if band == 'b': lbl = 'spectral sim.'
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C1', lw=1,
label=lbl)
sub.plot(waves[i_b], fluxes[i_b] *fibloss, c='C2', lw=1, ls=':')
sub.plot(wave, source_flux * fibloss, c='k', lw=1, ls='--',
label='source flux')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., 5.)
sub = fig.add_subplot(212)
sub.plot(coadd_wave, coadd_ivar[igal,:] * fibloss**-2, c='C0', lw=1,
label=r'(coadd ivar) / (fib.loss$)^2$')
for i_b, band in enumerate(['b', 'r', 'z']):
sub.plot(bgs.wave[band], bgs.ivar[band][0], c='C1', lw=1)
sub.plot(waves[i_b], ivars[i_b] * fibloss**-2, c='C2', lw=1, ls=':')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlabel('wavelength [$A$]', fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
fig.savefig(os.path.join(dir, 'valid.spectral_pipeline.exp%i.%i.png' % (expid, igal)),
bbox_inches='tight')
return None
def validate_spectral_pipeline_GAMA_source():
''' compare the fiber flux scaled source spectra from spectral simulations
pipeline to fiber loss corrected cframes CMX data for overlapping GAMA G12
galaxies.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 70502 #[66014, 70502] #66014 is with low transparency
date = 20200225
expids = [52112]#, 52113, 52114, 52115, 52116] # terrible FWHM
#tileid = 66014 # low transparency
#date = 20200314
#expids = [55432]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read in GAMA + Legacy catalo g
cata = Cat.GamaLegacy()
g12 = cata.Read('g12', dr_gama=3, dr_legacy=7)
g12_ra = g12['legacy-photo']['ra']
g12_dec = g12['legacy-photo']['dec']
Ng12 = len(g12_ra)
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
template_match = bgs3._GamaLegacy(g12)
hasmatch = (template_match != -999)
# ra/dec cut for GAMA so we only keep ones near the tile
cut_gama = ((g12_ra > 174.0) & (g12_ra < 186.0) & (g12_dec > -3.0) & (g12_dec < 2.0) & hasmatch)
g12_ra = g12_ra[cut_gama]
g12_dec = g12_dec[cut_gama]
g12_z = g12['gama-spec']['z'][cut_gama]
g12_rfib = UT.flux2mag(g12['legacy-photo']['fiberflux_r'])[cut_gama]
g12_rmag_gama = g12['gama-photo']['r_model'][cut_gama] # r-band magnitude from GAMA (SDSS) photometry
print('%i galaxies in GAMA G12 + Legacy' % len(g12_ra))
# match coadd objects to G12+legacy catalog based on RA and Dec
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
match_gama, coadd_fluxes = [], []
for ispec in ispecs:
# select BGS galaxies from the coadds
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
# select ones that are in GAMA by matching RA and Dec
match = spherematch(g12_ra, g12_dec,
coadd['TARGET_RA'][gal_cut], coadd['TARGET_DEC'][gal_cut],
0.000277778)
m_gama = match[0]
m_coadd = match[1]
match_gama.append(m_gama)
coadd_fluxes.append(coadd_flux[gal_cut,:][m_coadd])
match_gama = np.concatenate(match_gama)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
print(' %i matches to G12' % len(match_gama))
# generate spectra for the following overlapping galaxies
gama_samp = np.arange(Ng12)[cut_gama][match_gama]
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
emline_flux = s_bgs.EmissionLineFlux(g12, index=gama_samp, dr_gama=3, silent=True) # emission lines from GAMA
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
g12_rfib[match_gama],
g12_z[match_gama],
np.repeat(100.0, len(match_gama)),
seed=1,
templateid=template_match[gama_samp],
emflux=emline_flux,
mag_em=g12_rmag_gama[match_gama]
)
igals = np.random.choice(np.arange(len(match_gama))[magnorm_flag], size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
#sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101), c='k',
# ls=':', lw=0.5, label='smoothed (coadd flux)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775 ,
c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss,
c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_flux[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
if expid == 55432:
sub.set_ylim(-0.5, 3.)
else:
sub.set_ylim(-0.5, 10.)
#sub.set_ylim(1e-1, None)
#sub.set_yscale('log')
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source_flux.exp%i.png' % expid), bbox_inches='tight')
plt.close()
return None
def validate_spectral_pipeline_source():
''' compare the color-matched and fiber flux scaled source spectra from the
spectral simulation to the fiber loss corrected cframes CMX data. This is
because the GAMA comparison was a bust.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from scipy.spatial import cKDTree as KDTree
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI redshifts, which will be used for constructing the source spectra
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
mbgs = FM.BGStree()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
coadd_fluxes, s_fluxes = [], []
for ispec in ispecs:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
rmag = UT.flux2mag(coadd['FLUX_R'], method='log')[gal_cut]
gmag = UT.flux2mag(coadd['FLUX_G'], method='log')[gal_cut]
rfib = UT.flux2mag(coadd['FIBERFLUX_R'], method='log')[gal_cut]
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
print(' %i matches to VI' % len(m_vi))
# match to templates
temp_rmag = mbgs.meta['SDSS_UGRIZ'].data[:,2]
temp_gmag = mbgs.meta['SDSS_UGRIZ'].data[:,1]
temp_meta = np.vstack([
mbgs.meta['Z'].data,
temp_rmag,
temp_gmag - temp_rmag]).T
tree = KDTree(temp_meta)
# match CMX galaxies to templates
_, match_temp = tree.query(np.vstack([
ztrue[m_vi], rmag[m_coadd], (gmag - rmag)[m_coadd]]).T)
# in some cases there won't be a match from KDTree.query
# we flag these with -999
has_match = ~(match_temp >= len(mbgs.meta['TEMPLATEID']))
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
rfib[m_coadd][has_match],
ztrue[m_vi][has_match],
np.repeat(100.0, np.sum(has_match)),
seed=1,
templateid=match_temp[has_match],
emflux=None,
mag_em=None)
coadd_fluxes.append(coadd_flux[gal_cut][m_coadd][has_match])
s_fluxes.append(s_flux)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
s_fluxes = np.concatenate(s_fluxes, axis=0)
igals = np.random.choice(np.arange(s_fluxes.shape[0]), size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775, c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss, c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_fluxes[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-0.5, 6)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess_specsim_discrepancy(dchi2=40.):
''' This ended up being a useless test because the smoothed CMX spectra
that I was using as the source spectra has no features to fit the redshfits!
currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of simulated CMX exposure (smoothed fib.loss corrected
source spectra + CMX sky)
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
for ispec in ispecs:
print(' petal %i' % ispec)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.arange(len(gal_cut))[gal_cut]
print(' %i BGS galaxies' % np.sum(gal_cut))
if os.path.isfile(fexp): continue
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# source flux is the smoothed CMX spetra
source_flux = np.zeros((len(igals), len(wave)))
for i in range(len(igals)):
source_flux[i,:] = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igals[i],:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
source_flux * fibloss, # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
frr = run_redrock(fexp, overwrite=False)
for ispec in ispecs:
print(' petal %i' % ispec)
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
sim = desispec.io.read_spectra(fexp)
# randomly check 3 galaxies
igals = np.random.choice(np.arange(np.sum(gal_cut)), size=3, replace=False)
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_flux[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.flux[band][igal,:] / fibloss, c='C1',
label='sim / fib.loss')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_flux.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_ivar[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.ivar[band][igal,:] *
fibloss**2, c='C1', label='sim x (fib.loss$)^2$')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_ivar.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
# read in single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# match VI to exposure based on target ids
_, m_vi, m_sim = np.intersect1d(vi_id, targetid, return_indices=True)
print('%i matches to VI' % len(m_vi))
print(' ', ztrue[m_vi][:5])
print(' ', rr_coadd_z[m_sim][:5])
print(' ', rr_sim_z[m_sim][:5])
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rr_sim_zs = []
rr_sim_zwarns = []
rr_sim_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_sim], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_sim])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_sim])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_sim])
rr_sim_zs.append(rr_sim_z[m_sim])
rr_sim_zwarns.append(rr_sim_zwarn[m_sim])
rr_sim_dchi2s.append(rr_sim_dchi2[m_sim])
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
rr_sim_zs = np.concatenate(rr_sim_zs)
rr_sim_zwarns = np.concatenate(rr_sim_zwarns)
rr_sim_dchi2s = np.concatenate(rr_sim_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_zs, ztrues, rr_sim_zwarns,
deltachi2=rr_sim_dchi2s, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='specsim')
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess(dchi2=40.):
''' currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of spectral simulations run with CMX sky and transparency
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI table
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
# read GAMA-Legacy source fluxes
wave_s, flux_s, meta_s = source_spectra()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = transp * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
# get petals
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
# simulated exposure
fexp = os.path.join(dir, 'spectralsim_source.cmx_sky.exp%i.fits' % expid)
if not os.path.isfile(fexp):
# get sky brightness for exposure
sky_brights = []
for ispec in ispecs:
print(' petal %i' % ispec)
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
sky_brights.append(sky_bright)
sky_brights = np.array(sky_brights)
# median sky brightness of the petals
sky_bright = np.median(sky_brights, axis=0)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s * transp, # scale by transparency
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
# run redrock
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# compile single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
# match VI to exposure based on target ids
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_coadd], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_coadd])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_coadd])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_coadd])
print('%i matches to VI' % len(rmags))
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_z, meta_s['zred'], rr_sim_zwarn,
deltachi2=rr_sim_dchi2, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(meta_s['r_mag'], zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='spectral sim')
sub.text(19.5, 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectralsim_source.cmx_sky.zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def tnom(dchi2=40., emlines=True):
''' Calculate z-success rate for nominal dark time exposure with different
tnom exposure times. For each tnom, use the z-success rate to determine
r_lim, the r magnitude that gets 95% completeness.
'''
np.random.seed(0)
# nominal exposure times
if dchi2 == 40:
texps = [100 + 20 * i for i in range(11)][::2]
elif dchi2 == 100:
texps = [200 + 10 * i for i in range(11)][::2]
# true redshift and r-magnitude
_, _, meta = source_spectra()
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
r_fib = meta['r_mag_apflux']
# generate spectra for nominal dark sky exposures and run redrock
frr_noms = []
for texp in texps:
spec_nom = nomdark_spectra(texp)
# run redrock on nominal dark sky exposure spectra
if emlines:
fnom = os.path.join(dir, 'exp_spectra.nominal_dark.%.fs.fits' % texp)
else:
fnom = os.path.join(dir, 'exp_spectra.nominal_dark.noemission.%.fs.fits' % texp)
frr_nom = run_redrock(fnom, overwrite=False)
frr_noms.append(frr_nom)
rmags = np.linspace(17, 20, 31)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
# for each tnom, calculate rlim from the z-sucess rates
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
# determine rlim
zs_rmag = []
for _r in rmags:
brighter = (r_mag < _r)
zs_rmag.append(np.sum(zs_nom[brighter]) / np.sum(brighter))
crit = (np.array(zs_rmag) < 0.95) & (rmags > 18)
if np.sum(crit) > 0:
rlim = np.min(rmags[crit])
else:
rlim = np.max(rmags)
print('--- tnom = %.fs ---' % texp)
print(' total z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
print(' 95percent complete rlim = %.1f' % rlim)
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs_nom, range=[15,22],
nbins=28, bin_min=10)
sub.plot(wmean, rate, label=r'%.fs; $r_{\rm lim}= %.1f$' % (texp, rlim))
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.dchi2_%i.png' % dchi2),
bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([18, 25], [1., 1.], c='k', ls=':')
# nominal exposure z-success rate as a function of fiber magnitude
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
wmean, rate, err_rate = UT.zsuccess_rate(r_fib, zs_nom, range=[18,23],
nbins=28, bin_min=10)
sub.plot(wmean, rate, err_rate, label=r'%.fs' % texp)
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim([18., 22.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.r_fib.dchi2_%i.png' % dchi2),
bbox_inches='tight')
return None
def texp_factor_wavelength(emlines=True):
''' Q: Should the exposure time correction factor be determined by sky
surface brightness ratio at 5000A or 6500A?
sky surface brightness ratio = (sky surface brightness) / (nominal dark sky)
We will answer this by constructing a set of nominal dark sky exposure
spectra with 150s exposure time, getting the redshift success rate for
these spectra. Then we'll compare the redshift success rate for
1. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 5000A)
2. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 6500A)
We use CMX sky brightness during bright exposures.
Whichever redshift success rate is coser to the nominal dark exposure z
success rate will determine the exposure factor
updates
-------
* David Schlegel was surprised that 6500A agreed better. He finds that
5000A agrees better. He suggested I run this test without emission lines
* 06/11/2020: Read noise term in the SNR calculation cannot be ignored when
our nominal exposure time is low. New fsky values calculated for CMX
exposures including read noise.
'''
np.random.seed(0)
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(
config.atmosphere.sky, 'surface_brightness', as_dict=True)
Isky_nom = [wave, nominal_surface_brightness_dict['dark']]
# generate spectra for nominal dark sky exposure as reference
spec_nom = nomdark_spectra(150, emlines=emlines)
# run redrock on nominal dark sky exposure spectra
frr_nom = run_redrock(os.path.join(dir,
'exp_spectra.nominal_dark%s.150s.fits' % ['.noemission', ''][emlines]),
overwrite=False)
# read in CMX sky data
skies = cmx_skies()
# select CMX exposures when the sky was brighter than dark time. In
# principle we should focus on bright exposures (i.e. 2.5x nominal).
# we also remove exposures from 20200314 which has strange sky fluxes.
#bright = (((skies['sky_ratio_5000'] > 1.) | (skies['sky_ratio_7000'] > 1.))
# & (skies['date'] != 20200314))
#print('%i exposures with sky ratios > 1 and not taken during March 14' % len(expids))
bright = (((skies['fsky_5000'] > 1.5) | (skies['fsky_7000'] > 1.5))
& (skies['date'] != 20200314))
expids = np.unique(skies['expid'][bright])[:5]
print('%i exposures with fsky > 1.5 and not taken during March 14' % len(expids))
#np.random.choice(np.unique(skies['expid'][bright]), size=5, replace=False)
# generate exposure spectra for select CMX sky surface brightnesses with
# exposure times scaled by (1) sky ratio at 5000A (2) sky ratio at 6500A
for expid in expids:
print('--- expid = %i ---' % expid)
is_exp = (skies['expid'] == expid)
# get median sky surface brightnesses for exposure
Isky = bs_coadd(
[skies['wave_b'], skies['wave_r'], skies['wave_z']],
[
np.median(skies['sky_sb_b'][is_exp], axis=0),
np.median(skies['sky_sb_r'][is_exp], axis=0),
np.median(skies['sky_sb_z'][is_exp], axis=0)]
)
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(Isky_nom[0], Isky_nom[1], c='k', lw=0.5)
sub.plot(Isky[0], Isky[1], c='C0', lw=0.5)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('flux', fontsize=20)
sub.set_ylim(0., 10.)
sub = fig.add_subplot(212)
for band in ['b', 'r', 'z']:
sub.plot(spec_nom.wave[band], spec_nom.flux[band][0,:], c='k', lw=1)
# get median sky ratios for the exposure
for i, _w in enumerate([5000, 7000]):
_fexp = np.median(skies['fsky_%i' % _w ][is_exp])
print(' fexp at %iA = %.2f' % (_w, _fexp))
print(' sky ratio = %.2f' % (np.median(skies['sky_ratio_%i' % _w][is_exp])))
# generate exposure spectra for expid CMX sky
_fspec = os.path.join(dir, 'exp_spectra.exp%i%s.fexp_%i.fits' %
(expid, ['.noemission', ''][emlines], _w))
_spec = exp_spectra(
Isky, # sky surface brightness
150. * _fexp, # exposure time
1.1, # same airmass
_fspec,
emlines=emlines
)
# run redrock on the exposure spectra
frr = run_redrock(_fspec, qos='debug')
# plot comparing the exp spectra to the nominal dark spectra
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b':
lbl = ('at %iA' % _w)
sub.plot(_spec.wave[band], _spec.flux[band][0,:], c='C%i' % i,
lw=1, label=lbl)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('flux', fontsize=20)
sub.set_ylim(0., 10.)
sub.legend(loc='upper right', fontsize=20, ncol=3)
fig.savefig(_fspec.replace('.fexp_%i.fits' % _w, '.png'), bbox_inches='tight')
plt.close()
_, _, meta = source_spectra(emlines=emlines)
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
dchi2 = 40. # minimum delta chi2
# read redrock outputs and compare which exposure factor does better
# at reproducing the nomimal dark exposure redshift success rate.
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
print('nominal z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs_nom, range=[15,22],
nbins=28, bin_min=10)
_plt_nom = sub.errorbar(wmean, rate, err_rate, fmt='.k', elinewidth=2, markersize=10)
zs_5000, zs_7000 = [], []
for expid in expids:
print('--- expid = %i ---' % expid)
zss = []
for i, _w in enumerate([5000, 7000]):
rr = fitsio.read(os.path.join(dir,
'zbest.exp_spectra.exp%i%s.fexp_%i.fits' %
(expid, ['.noemission', ''][emlines], _w)))
_zs = UT.zsuccess(rr['Z'], ztrue, rr['ZWARN'],
deltachi2=rr['DELTACHI2'], min_deltachi2=dchi2)
zss.append(_zs)
print(' fexp at %i z-success = %.2f' % (_w, np.sum(_zs)/float(len(_zs))))
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, _zs, range=[15,22],
nbins=28, bin_min=10)
_plt, = sub.plot(wmean, rate, c='C%i' % i)
if expid == expids[0]:
if i == 0: _plts = [_plt_nom]
_plts.append(_plt)
zs_5000.append(zss[0])
zs_7000.append(zss[1])
zs_5000 = np.concatenate(zs_5000)
zs_7000 = np.concatenate(zs_7000)
print('-----------------------')
print('nominal z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
print('fexp at 5000A z-success = %.2f ' % (np.sum(zs_5000)/float(len(zs_5000))))
print('fexp at 7000A z-success = %.2f ' % (np.sum(zs_7000)/float(len(zs_7000))))
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(_plts,
['nominal dark 150s',
r'CMX exp. $f_{\rm sky}[5000A]$',
r'CMX exp. $f_{\rm sky}[7000A]$'],
loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'zsuccess.exp_spectra%s.fsky.png' % ['.noemission', ''][emlines]),
bbox_inches='tight')
return None
def reproduce_vi_zsuccess():
''' **validating the spectral sims pipeline** I will test the spectral
simulation pipeline by trying to reproduce the redshift success rate of VI
Round 1 Tile 66003. I will construct spectral sims using Tile 66003
exposure sky brightness and exposure times then run redrock on them.
'''
np.random.seed(0)
Isky_nom = _Isky_nominal_dark()
# read in CMX sky data
skies = cmx_skies()
# select VI exposures of Tile 66003
vi_tile = (skies['tileid'] == 66003)
uniq_exps = np.unique(skies['expid'][vi_tile])
print('%i exposures with TILEID=66003' % len(uniq_exps))
print(uniq_exps)
for expid in uniq_exps:
print('\n--- expid = %i ---' % expid)
is_exp = (skies['expid'] == expid)
# get median sky surface brightnesses for exposure
Isky = bs_coadd(
[skies['wave_b'], skies['wave_r'], skies['wave_z']],
[
np.median(skies['sky_sb_b'][is_exp], axis=0),
np.median(skies['sky_sb_r'][is_exp], axis=0),
np.median(skies['sky_sb_z'][is_exp], axis=0)]
)
# median airmass
airmass_exp = np.median(skies['airmass'][is_exp])
print(' median airmass = %.2f' % airmass_exp)
# exposure time of exposure
texp = skies['exptime'][is_exp][0]
print(' exposure time = %.1f' % texp)
assert np.all(skies['exptime'][is_exp] == texp)
# generate exposure spectra for expid CMX sky
_fspec = os.path.join(dir, 'exp_spectra.exp%i%s.texp%.f.fits' %
(expid, '.noemission', texp))
_spec = exp_spectra(
Isky, # sky surface brightness
texp, # exposure time
airmass_exp, # same airmass
_fspec,
emlines=False
)
# run redrock on the exposure spectra
frr = run_redrock(_fspec, qos='debug')
_, _, meta = source_spectra(emlines=False)
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
dchi2 = 40. # minimum delta chi2
# read redrock outputs for each exposure
zs_exps = []
for expid in expids:
print('--- expid = %i ---' % expid)
rr = fitsio.read(os.path.join(dir, 'zbest.exp_spectra.exp%i%s.texp%.f.fits' %
(expid, '.noemission', texp)))
_zs = UT.zsuccess(rr['Z'], ztrue, rr['ZWARN'],
deltachi2=rr['DELTACHI2'], min_deltachi2=dchi2)
zs_exps.append(_zs)
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
fig = plt.figure(figsize=(18,6))
for i, expid, zs in zip(range(len(expids)), expids, zs_exps):
sub = fig.add_subplot(1, 3, i+1)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
# load VI z success rate for single exposure
wmean, rate, err_rate = np.loadtxt(os.path.join(dir_coadd,
'vi_zsuccess.dchi2_40.coadd-66003-20200315-%i.txt' % expid))
sub.errorbar(wmean, rate, err_rate, fmt='.k', label='VI $z$ success')
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs, range=[15,22],
nbins=28, bin_min=10)
sub.plot(wmean, rate, c='C%i' % i, label='Spectral Sim.')
if i == 2: sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
if i == 1: sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
if i == 0:
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'reproduce_vi_zsuccess.png'), bbox_inches='tight')
return None
def _Isky_nominal_dark():
''' surface brightness of nominal dark sky
'''
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(
config.atmosphere.sky, 'surface_brightness', as_dict=True)
return [wave, nominal_surface_brightness_dict['dark']]
def _SNR_test():
''' Q: Why is scaling the exposure time by the sky brightness ratio scaling
not producing spectra with roughly the same SNR?
The SNR of the spectra is approximately
SNR = S x sqrt(texp/sky)
This means that if the sky is twice as bright but you increase texp by 2,
you would get the same SNR. This, however, does not seem to be the case for
the SNR for the `exp_spectra` output.
In this script I will generate spectra with uniform sky brightness
'''
np.random.seed(0)
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
# get throughput for the cameras
import specsim.instrument
from specsim.simulator import Simulator
config = desisim.simexp._specsim_config_for_wave(wave.value, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
fig = plt.figure(figsize=(20,15))
sub0 = fig.add_subplot(321)
sub1 = fig.add_subplot(323)
sub2 = fig.add_subplot(325)
sub3 = fig.add_subplot(322)
sub4 = fig.add_subplot(324)
sub5 = fig.add_subplot(326)
for ii, i in enumerate([0, 5, 10]):
# read in source spectra
print('sky = %i' % (i+1))
wave_s, flux_s, _ = source_spectra(emlines=False)
#'''
_fspec = os.path.join(dir, 'exp_spectra.snr_test.sky%i.fits' % (i+1))
Isky = [wave, np.ones(len(wave)) * (i + 1.)]
_spec = exp_spectra(
Isky, # sky surface brightness
150. * (i + 1.), # exposure time
1.1, # same airmass
_fspec,
emlines=False
)
# plot comparing the exp spectra to the nominal dark spectra
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = ('sky = %i, texp = %.f' % ((i+1), 150.*(i+1.)))
sub0.plot(_spec.wave[band], _spec.flux[band][0,:], c='C%i' % ii, lw=1, label=lbl)
sub1.plot(_spec.wave[band], _spec.flux[band][1,:], c='C%i' % ii, lw=1, label=lbl)
sub2.plot(_spec.wave[band], _spec.flux[band][2,:], c='C%i' % ii, lw=1, label=lbl)
sub3.plot(_spec.wave[band], _spec.ivar[band][0,:], c='C%i' % ii, lw=1, label=lbl)
sub4.plot(_spec.wave[band], _spec.ivar[band][1,:], c='C%i' % ii, lw=1, label=lbl)
sub5.plot(_spec.wave[band], _spec.ivar[band][2,:], c='C%i' % ii, lw=1, label=lbl)
sub0.plot(wave_s, flux_s[0,:], c='k', lw=1, ls='--')
sub1.plot(wave_s, flux_s[1,:], c='k', lw=1, ls='--')
sub2.plot(wave_s, flux_s[2,:], c='k', lw=1, ls='--')
'''
# barebone specsim pipeline for comparison
desi = Simulator(config, num_fibers=flux_s.shape[0])
desi.observation.exposure_time = 150. * (i + 1.) * u.s
desi.atmosphere._surface_brightness_dict[desi.atmosphere.condition] = \
np.ones(len(desi.atmosphere._wavelength)) * (i + 1.) * \
desi.atmosphere.surface_brightness.unit
desi.atmosphere._extinct_emission = False
desi.atmosphere._moon = None
desi.atmosphere.airmass = 1.1
source_flux = np.array([np.clip(np.interp(wave, wave_s, _flux_s), 0, None) for _flux_s in flux_s])
desi.simulate(source_fluxes=source_flux * 1e-17 * desi.simulated['source_flux'].unit)
random_state = np.random.RandomState(0)
desi.generate_random_noise(random_state, use_poisson=True)
scale=1e17
waves, fluxes, ivars, ivars_electron = [], [], [], []
lbl = ('sky=%i' % (i+1))
for table in desi.camera_output:
print(' source', table['num_source_electrons'][0][:5])
print(' sky', table['num_sky_electrons'][0][:5])
print(' dark', table['num_dark_electrons'][0][:5])
print(' RN', table['read_noise_electrons'][0][:5]**2)
_wave = table['wavelength'].astype(float)
_flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
_flux = _flux * scale
_ivar = table['flux_inverse_variance'].T.astype(float)
_ivar = _ivar / scale**2
sub0.plot(_wave, _flux[0], c='C%i' % ii, lw=1, label=lbl)
sub1.plot(_wave, _flux[1], c='C%i' % ii, lw=1, label=lbl)
sub2.plot(_wave, _flux[2], c='C%i' % ii, lw=1, label=lbl)
sub3.plot(_wave, _ivar[0], c='C%i' % ii, lw=1, label=lbl)
sub4.plot(_wave, _ivar[1], c='C%i' % ii, lw=1, label=lbl)
sub5.plot(_wave, _ivar[2], c='C%i' % ii, lw=1, label=lbl)
lbl = None
'''
sub2.set_xlabel('wavelength', fontsize=20)
sub0.set_xlim(3.6e3, 9.8e3)
sub1.set_xlim(3.6e3, 9.8e3)
sub2.set_xlim(3.6e3, 9.8e3)
sub3.set_xlim(3.6e3, 9.8e3)
sub4.set_xlim(3.6e3, 9.8e3)
sub5.set_xlim(3.6e3, 9.8e3)
sub1.set_ylabel('flux', fontsize=20)
sub4.set_ylabel('ivar', fontsize=20)
sub0.set_ylim(0., 10.)
sub1.set_ylim(0., 10.)
sub2.set_ylim(0., 10.)
sub0.legend(loc='upper right', fontsize=15)
fig.savefig(os.path.join(dir, 'snr_test.png'), bbox_inches='tight')
plt.close()
return None
def cmx_skies():
''' read in CMX sky data. The sky surface brightnesses are generated
from the flat fielded sky data that's throughput corrected.
'''
fskies = h5py.File('/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/sky_fibers.cmx.v1.hdf5', 'r')
skies = {}
for k in fskies.keys():
skies[k] = fskies[k][...]
return skies
def source_spectra(emlines=True):
''' read GAMA-matched fiber-magnitude scaled BGS source spectra
These source spectra are created for GAMA objects. their spectra is
constructed from continuum that's template matched to the broadband
colors and emission lines from GAMA data (properly flux calibrated).
Then the spectra is scaled down to the r-band fiber magnitude. They
therefore do not require fiber acceptance fractions.
'''
fsource = os.path.join(dir,
'GALeg.g15.sourceSpec%s.1000.seed0.hdf5' % ['.noemission', ''][emlines])
if not os.path.isfile(fsource):
seed = 0
np.random.seed(seed)
# read in GAMA-Legacy catalog with galaxies in both GAMA and Legacy surveys
cata = Cat.GamaLegacy()
gleg = cata.Read('g15', dr_gama=3, dr_legacy=7, silent=True)
# extract meta-data of galaxies
redshift = gleg['gama-spec']['z']
absmag_ugriz = cata.AbsMag(gleg, kcorr=0.1, H0=70, Om0=0.3, galext=False) # ABSMAG k-correct to z=0.1
r_mag_apflux = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1]) # aperture flux
r_mag_gama = gleg['gama-photo']['r_petro'] # r-band magnitude from GAMA (SDSS) photometry
ha_gama = gleg['gama-spec']['ha_flux'] # halpha line flux
ngal = len(redshift) # number of galaxies
vdisp = np.repeat(100.0, ngal) # velocity dispersions [km/s]
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
match = bgs3._GamaLegacy(gleg)
hasmatch = (match != -999)
criterion = hasmatch
# randomly pick a few more than 5000 galaxies from the catalog that have
# matching templates because some of the galaxies will have issues where the
# emission line is brighter than the photometric magnitude.
subsamp = np.random.choice(np.arange(ngal)[criterion], int(1.1 * 1000), replace=False)
# generate noiseless spectra for these galaxies
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
# emission line fluxes from GAMA data
if emlines:
emline_flux = s_bgs.EmissionLineFlux(gleg, index=subsamp, dr_gama=3, silent=True) # emission lines from GAMA
mag_em = r_mag_gama[subsamp]
else:
emline_flux = None
mag_em = None
flux, wave, magnorm_flag = s_bgs.Spectra(
r_mag_apflux[subsamp],
redshift[subsamp],
vdisp[subsamp],
seed=1,
templateid=match[subsamp],
emflux=emline_flux,
mag_em=mag_em,
silent=True)
# only keep 1000 galaxies
isubsamp = np.random.choice(np.arange(len(subsamp))[magnorm_flag], 1000, replace=False)
subsamp = subsamp[isubsamp]
# save to file
fsub = h5py.File(fsource, 'w')
fsub.create_dataset('zred', data=redshift[subsamp])
fsub.create_dataset('absmag_ugriz', data=absmag_ugriz[:,subsamp])
fsub.create_dataset('r_mag_apflux', data=r_mag_apflux[subsamp])
fsub.create_dataset('r_mag_gama', data=r_mag_gama[subsamp])
for grp in gleg.keys():
group = fsub.create_group(grp)
for key in gleg[grp].keys():
group.create_dataset(key, data=gleg[grp][key][subsamp])
fsub.create_dataset('flux', data=flux[isubsamp, :])
fsub.create_dataset('wave', data=wave)
fsub.close()
# read in source spectra
source = h5py.File(fsource, 'r')
wave_s = source['wave'][...]
flux_s = source['flux'][...]
meta = {}
for k in ['r_mag_apflux', 'r_mag_gama', 'zred', 'absmag_ugriz']:
meta[k] = source[k][...]
meta['r_mag'] = UT.flux2mag(source['legacy-photo']['flux_r'][...], method='log')
source.close()
return wave_s, flux_s, meta
def nomdark_spectra(texp, emlines=True):
''' spectra observed during nominal dark sky for 150s. This will
serve as the reference spectra for a number of tests.
'''
if emlines:
fexp = os.path.join(dir, 'exp_spectra.nominal_dark.%.fs.fits' % texp)
else:
fexp = os.path.join(dir, 'exp_spectra.nominal_dark.noemission.%.fs.fits' % texp)
if os.path.isfile(fexp):
bgs = desispec.io.read_spectra(fexp)
else:
import desisim.simexp
from desimodel.io import load_throughput
# read nominal dark sky surface brightness
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(config.atmosphere.sky, 'surface_brightness', as_dict=True)
Isky = [wave, nominal_surface_brightness_dict['dark']]
# read in source spectra
wave_s, flux_s, _ = source_spectra(emlines=emlines)
# simulate the exposures and save to file
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s,
exptime=texp,
airmass=1.1,
Isky=Isky,
filename=fexp)
return bgs
def exp_spectra(Isky, exptime, airmass, fexp, emlines=True, overwrite=False):
''' spectra observed at the specified
- sky surface brightness
- exposure time
- airmass
'''
if os.path.isfile(fexp):
bgs = desispec.io.read_spectra(fexp)
else:
import desisim.simexp
from desimodel.io import load_throughput
# read in source spectra
wave_s, flux_s, _ = source_spectra(emlines=emlines)
# simulate the exposures and save to file
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s,
exptime=exptime,
airmass=airmass,
Isky=Isky,
filename=fexp)
return bgs
def run_redrock(fspec, qos='regular', overwrite=False):
''' run redrock on given spectra file
'''
frr = os.path.join(os.path.dirname(fspec),
'redrock.%s' % os.path.basename(fspec).replace('.fits', '.h5'))
fzb = os.path.join(os.path.dirname(fspec),
'zbest.%s' % os.path.basename(fspec))
if not os.path.isfile(fzb) or overwrite:
print('running redrock on %s' % os.path.basename(fspec))
script = '\n'.join([
"#!/bin/bash",
"#SBATCH -N 1",
"#SBATCH -C haswell",
"#SBATCH -q %s" % qos,
'#SBATCH -J rr_%s' % os.path.basename(fspec).replace('.fits', ''),
'#SBATCH -o _rr_%s.o' % os.path.basename(fspec).replace('.fits', ''),
"#SBATCH -t 00:10:00",
"",
"export OMP_NUM_THREADS=1",
"export OMP_PLACES=threads",
"export OMP_PROC_BIND=spread",
"",
"",
"conda activate desi",
"",
"srun -n 32 -c 2 --cpu-bind=cores rrdesi_mpi -o %s -z %s %s" % (frr, fzb, fspec),
""])
# create the script.sh file, execute it and remove it
f = open('script.slurm','w')
f.write(script)
f.close()
os.system('sbatch script.slurm')
os.system('rm script.slurm')
return fzb
def bs_coadd(waves, sbrights):
''' bullshit hack to combine wavelengths and surface brightnesses of the 3
cameras...
'''
from scipy.interpolate import interp1d
from desimodel.io import load_throughput
# read nominal dark sky surface brightness
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
outwave = np.arange(round(wavemin, 1), wavemax, 0.8)
sbrights_interp = []
for wave, sbright in zip(waves, sbrights):
fintrp = interp1d(wave, sbright, fill_value=0., bounds_error=False)
sbrights_interp.append(fintrp(outwave))
outsbright = np.amax(sbrights_interp, axis=0)
return outwave, outsbright
if __name__=="__main__":
#_SNR_test()
#texp_factor_wavelength()
#texp_factor_wavelength(emlines=False) # without emission lines
#tnom(dchi2=40)
#tnom(dchi2=100)
#validate_spectral_pipeline()
#validate_spectral_pipeline_source()
#validate_spectral_pipeline_GAMA_source()
#validate_cmx_zsuccess_specsim_discrepancy()
#validate_cmx_zsuccess(dchi2=40.)
reproduce_vi_zsuccess()
|
the-stack_0_4675 | from typing import Dict
import numpy as np
import torch
import torch.optim as optim
from allennlp.data.dataset_readers.stanford_sentiment_tree_bank import (
StanfordSentimentTreeBankDatasetReader,
)
from allennlp.data.iterators import BucketIterator
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.seq2vec_encoders import (
Seq2VecEncoder,
PytorchSeq2VecWrapper,
)
from allennlp.modules.text_field_embedders import (
TextFieldEmbedder,
BasicTextFieldEmbedder,
)
from allennlp.modules.token_embedders import Embedding
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
from allennlp.training.trainer import Trainer
import torch
import torch.nn.functional as F
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.modules.input_variational_dropout import InputVariationalDropout
# EMBEDDING_DIM = 128
# HIDDEN_DIM = 128
@Model.register("lstm_classifier")
class LstmClassifier(Model):
def __init__(
self,
word_embeddings: TextFieldEmbedder,
encoder: Seq2VecEncoder,
vocab: Vocabulary,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: RegularizerApplicator = RegularizerApplicator(),
var_dropout: float = 0.35,
) -> None:
super().__init__(vocab)
# We need the embeddings to convert word IDs to their vector representations
self.word_embeddings = word_embeddings
self.encoder = encoder
self._variational_dropout = InputVariationalDropout(var_dropout)
# After converting a sequence of vectors to a single vector, we feed it into
# a fully-connected linear layer to reduce the dimension to the total number of labels.
self.linear = torch.nn.Linear(
in_features=encoder.get_output_dim(),
out_features=vocab.get_vocab_size("labels"),
)
self._accuracy = CategoricalAccuracy()
self._f1_measure = FBetaMeasure(average="macro")
self.loss_function = torch.nn.CrossEntropyLoss()
# Instances are fed to forward after batching.
# Fields are passed through arguments with the same name.
def forward(
self, tokens: Dict[str, torch.Tensor], label: torch.Tensor = None
) -> torch.Tensor:
mask = get_text_field_mask(tokens)
# Forward pass
embeddings = self.word_embeddings(tokens)
embeddings = self._variational_dropout(embeddings)
encoder_out = self.encoder(embeddings, mask)
logits = self.linear(encoder_out)
probs = F.softmax(logits, dim=-1)
output = {"logits": logits, "probs": probs}
if label is not None:
self._accuracy(logits, label)
self._f1_measure(logits, label)
output["loss"] = self.loss_function(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
all_metrics.update(
{"accuracy": self._accuracy.get_metric(reset=reset)}
)
all_metrics.update(
{"f1": self._f1_measure.get_metric(reset=reset)["fscore"]}
)
return all_metrics
|
the-stack_0_4676 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2021 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module to ease the creation and management of applications."""
import os
from pathlib import Path
import click
from ..commands import Commands, ContainersCommands, InstallCommands, \
LocalCommands, RequirementsCommands, UpgradeCommands
from ..errors import InvenioCLIConfigError
from ..helpers.cli_config import CLIConfig
from ..helpers.cookiecutter_wrapper import CookiecutterWrapper
from .assets import assets
from .containers import containers
from .install import install
from .packages import packages
from .services import services
from .utils import calculate_instance_path, pass_cli_config, run_steps
@click.group()
@click.version_option()
@click.pass_context
def invenio_cli(ctx):
"""Initialize CLI context."""
invenio_cli.add_command(assets)
invenio_cli.add_command(containers)
invenio_cli.add_command(install)
invenio_cli.add_command(packages)
invenio_cli.add_command(services)
@invenio_cli.command('check-requirements')
@click.option('--development', '-d', default=False, is_flag=True,
help='Check development requirements.')
def check_requirements(development):
"""Checks the system fulfills the pre-requirements."""
click.secho("Checking pre-requirements...", fg="green")
steps = RequirementsCommands.check(development)
on_fail = "Pre requisites not met."
on_success = "All requisites are fulfilled."
run_steps(steps, on_fail, on_success)
@invenio_cli.command()
def shell():
"""Shell command."""
Commands.shell()
@invenio_cli.command()
@click.option('--debug/--no-debug', '-d/', default=False, is_flag=True,
help='Enable Flask development mode (default: disabled).')
def pyshell(debug):
"""Python shell command."""
Commands.pyshell(debug=debug)
@invenio_cli.command()
@click.argument('flavour', type=click.Choice(['RDM'], case_sensitive=False),
default='RDM', required=False)
@click.option('-t', '--template', required=False,
help='Cookiecutter path or git url to template')
@click.option('-c', '--checkout', required=False,
help='Branch, tag or commit to checkout if --template is a git url') # noqa
def init(flavour, template, checkout):
"""Initializes the application according to the chosen flavour."""
click.secho('Initializing {flavour} application...'.format(
flavour=flavour), fg='green')
template_checkout = (template, checkout)
cookiecutter_wrapper = CookiecutterWrapper(flavour, template_checkout)
try:
click.secho("Running cookiecutter...", fg='green')
project_dir = cookiecutter_wrapper.cookiecutter()
click.secho("Writing invenio-invenio_cli config file...", fg='green')
saved_replay = cookiecutter_wrapper.get_replay()
instance_path = calculate_instance_path(project_dir)
CLIConfig.write(project_dir, flavour, saved_replay, instance_path)
click.secho("Creating logs directory...", fg='green')
os.mkdir(Path(project_dir) / "logs")
except Exception as e:
click.secho(str(e), fg='red')
finally:
cookiecutter_wrapper.remove_config()
@invenio_cli.command()
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--debug/--no-debug', '-d/', default=True, is_flag=True,
help='Enable/disable debug mode including auto-reloading '
'(default: enabled).')
@click.option('--services/--no-services', '-s/-n', default=True, is_flag=True,
help='Enable/disable dockerized services (default: enabled).')
@pass_cli_config
def run(cli_config, host, port, debug, services):
"""Starts the local development server.
NOTE: this only makes sense locally so no --local option
"""
commands = LocalCommands(cli_config)
commands.run(host=host, port=str(port), debug=debug, services=services)
@invenio_cli.command()
@pass_cli_config
def destroy(cli_config):
"""Removes all associated resources (containers, images, volumes)."""
commands = Commands(cli_config)
services = ContainersCommands(cli_config)
click.secho(
"Destroying containers, volumes, virtual environment...", fg="green")
steps = commands.destroy() # Destroy virtual environment
steps.extend(services.destroy()) # Destroy services
on_fail = "Failed to destroy instance. You can destroy only services " + \
"using the services command: invenio-cli services destroy"
on_success = "Instance destroyed."
run_steps(steps, on_fail, on_success)
@invenio_cli.command()
@click.option('--script', required=True,
help='The path of custom migration script.'
)
def upgrade(script):
"""Upgrades the current instance to a newer version."""
steps = UpgradeCommands.upgrade(script)
on_fail = "Upgrade failed."
on_success = "Upgrade sucessfull."
run_steps(steps, on_fail, on_success)
|
the-stack_0_4679 | import math
import random
from external.agent import AIAgent as ExtrAIAgent
class Game():
# gap is the gap in pixels between the south Pipe and North Pipe.
def __init__(self,cvsHeight=512,cvsWidth=800,pipeHeight=242,pipeWidth=52, fgHeight=118, birdHeight=38,birdWidth=26):
self.cvsHeight=cvsHeight
self.cvsWidth=cvsWidth
self.pipeHeight=pipeHeight
self.pipeWidth=pipeWidth
self.fgHeight=fgHeight
self.birdHeight=birdHeight
self.birdWidth=birdWidth
self.gap = 85
self.bXStart=40
self.bYStart=150
# the constant is the south Pipe position, and it is calculating by adding the gap to the north Pipe.
self.constant=self.pipeHeight+self.gap
# the bird X and Y positions.
self.bX = self.bXStart
self.bY = self.bYStart
self.velY=0
# the bird falls by 1.5 pixels at a time.
self.gravity = -9.8
self.forceY=0
self.deltaForceY=300
# we initiate the players score
self.score = 0
# reward
self.reward=0
#action
self.action=0
self.actionToTake=0
self.prevAction=0
self.manual=False
self.error=False
self.fps=0
self.stateHistory=[]
self.pipe = []
self.pipe.append({'x' : self.cvsWidth,'y' : 0})
self.gameover=False
self.mAgent=ManualAgent()
self.aiAgent=AIAgent()
def moveUp(self):
self.forceY=self.deltaForceY
def reset(self):
self.pipe = []
self.pipe.append({'x' : self.cvsWidth,'y' : 0})
self.velY=0
self.bY = self.bYStart
self.score = 0
self.forceY=0
self.action=0
def getNearestPipe(self):
minDist=self.cvsWidth*20
nearestPole=-1
for i in range(len(self.pipe)):
point=self.pipe[i]
dstFromBird=(point['x']+self.pipeWidth)-self.bX
if dstFromBird>0 and dstFromBird<minDist:
minDist=dstFromBird
nearestPole=i
return nearestPole
def updateGameLogic(self):
for i in range(len(self.pipe)):
point=self.pipe[i]
self.pipe[i]['x']-=1
if self.pipe[i]['x']==self.cvsWidth-188:
self.pipe.append({
'x' : self.cvsWidth,
'y' : math.floor(random.random()*self.pipeHeight)-self.pipeHeight
})
if (self.bX+self.birdWidth>=point['x'] and self.bX<=point['x']
and (self.bY<=point['y']+self.pipeHeight or self.bY+self.birdHeight>=point['y']+self.pipeHeight+self.gap)
or self.bY+self.birdHeight>=self.cvsHeight-self.fgHeight
or self.bY<=0):
self.reward=-10
self.gameover=True
break
if(point['x']==10):
self.score+=1
self.reward=10
for i in range(len(self.pipe)):
if(self.pipe[i]['x']<=-188):
del self.pipe[i]
break
def takeAction(self):
nearestPole=self.getNearestPipe()
state={
'bX':self.bX/self.cvsWidth,
'bY':self.bY/self.cvsHeight,
'pX1':self.pipe[nearestPole]['x']/self.cvsWidth,
'pX2':self.pipe[nearestPole]['x']/self.cvsWidth,
'pY1':(self.pipe[nearestPole]['y']+self.pipeHeight)/self.cvsHeight,
'pY2':(self.pipe[nearestPole]['y']+self.pipeHeight+self.gap)/self.cvsHeight,
'velY':self.velY,
'action':self.action
}
if(self.manual):
self.action=self.mAgent.getAction(state,self.reward)
else:
self.action=self.aiAgent.getAction(state,self.reward)
self.velY=self.velY+(self.gravity+self.deltaForceY*self.action)*(0.015)
self.bY-= self.velY
self.reward=0
self.action=0
def getGameState(self):
state={
'bX':self.bX,
'bY':self.bY,
'pipe':self.pipe,
'gap':self.gap,
'action':self.action,
'score':self.score,
'gameover':self.gameover,
'reward':self.reward
}
return state
class Agent():
def __init__(self):
self.action=0
def getAction(self,state,reward):
return self.action
def setNextAction(self, action):
self.action=action
class ManualAgent(Agent):
def __init__(self):
super().__init__()
def getAction(self,state,reward):
action=self.action
self.action=0
return action
class AIAgent(Agent):
def __init__(self):
super().__init__()
self.externalAgent=ExtrAIAgent()
def getAction(self,state,reward):
self.action=self.externalAgent.takeAction(state,reward)
return self.action
|
the-stack_0_4680 | import random
import time
from subprocess import Popen
# adjust tempo here
tempo = 90
lookup = {
'C': 'c',
'D': 'd',
'E': 'e',
'F': 'f',
'G': 'g',
'A': 'hey',
'B': 'b',
'Bb': 'b flat',
'Eb': 'e flat',
'Ab': 'hey flat',
'Db': 'd flat',
'Gb': 'g flat',
'C#': 'c sharp',
'F#': 'f sharp',
}
pop_sound = '/System/Library/Sounds/Pop.aiff'
tink_sound = '/System/Library/Sounds/Tink.aiff'
notes = list(lookup.keys())
quarter_note_duration_seconds = 60.0 / tempo
measure_quarter_notes = 4
min_num_measures = 4
max_num_measures = 4
# Process
p = None
def speak(text: str) -> None:
p = Popen(['say', text])
def metronome_sound(path: str) -> None:
Popen(['afplay', path])
def rand_note() -> str:
return notes[random.randint(0, len(notes) - 1)]
def main():
keys = [rand_note()]
i = 1
while i < 101:
# print new key
n = rand_note()
# skip consecutive identical keys
if keys[i - 1] != n:
keys.append(n)
i += 1
for idx, k in enumerate(keys[:100]):
measures = random.randint(min_num_measures, max_num_measures)
# print new key
print(f'<{k}> ({measures})')
speak(lookup[k])
for m in range(1, measures + 1):
if m == measures - 1:
next_key = keys[idx + 1]
next_key_spoken = lookup[next_key]
speak(f'next key is {next_key_spoken}')
print(f'The next key is {next_key}....')
print(f'Measure {m}')
for q in range(0, measure_quarter_notes):
metronome_sound(pop_sound if q > 0 else tink_sound)
print(f'* {q + 1}')
time.sleep(quarter_note_duration_seconds)
if p:
p.terminate()
if __name__ == '__main__':
main()
|
the-stack_0_4681 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import sys
import warnings
from typing import Any, Iterable, Optional, Union, overload, TYPE_CHECKING
import numpy
from pyspark import RDD, SparkContext, since
from pyspark.streaming.dstream import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint,
LinearModel,
_regression_train_wrapper,
StreamingLinearAlgorithm,
)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
from pyspark.mllib.linalg import Vector
from pyspark.mllib.regression import LabeledPoint
if TYPE_CHECKING:
from pyspark.mllib._typing import VectorLike
__all__ = [
"LogisticRegressionModel",
"LogisticRegressionWithSGD",
"LogisticRegressionWithLBFGS",
"SVMModel",
"SVMWithSGD",
"NaiveBayesModel",
"NaiveBayes",
"StreamingLogisticRegressionWithSGD",
]
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights: Vector, intercept: float) -> None:
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold: Optional[float] = None
@since("1.4.0")
def setThreshold(self, value: float) -> None:
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property # type: ignore[misc]
@since("1.4.0")
def threshold(self) -> Optional[float]:
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since("1.4.0")
def clearThreshold(self) -> None:
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@overload
def predict(self, test: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, test: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, test: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 1.4.0
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
.. versionadded:: 0.9.0
Parameters
----------
weights : :py:class:`pyspark.mllib.linalg.Vector`
Weights computed for every feature.
intercept : float
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not be a single value, so the intercepts will be part of the
weights.)
numFeatures : int
The dimension of the features.
numClasses : int
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(numpy.array([0.0, 1.0]))
1
>>> lrm.predict(numpy.array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(numpy.array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except BaseException:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
"""
def __init__(
self, weights: Vector, intercept: float, numFeatures: int, numClasses: int
) -> None:
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // ( # type: ignore[attr-defined]
self._numClasses - 1
)
self._weightsMatrix = self._coeff.toArray().reshape(
self._numClasses - 1, self._dataWithBiasSize
)
@property # type: ignore[misc]
@since("1.4.0")
def numFeatures(self) -> int:
"""
Dimension of the features.
"""
return self._numFeatures
@property # type: ignore[misc]
@since("1.4.0")
def numClasses(self) -> int:
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@overload
def predict(self, x: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 0.9.0
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept # type: ignore[attr-defined]
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
assert self._weightsMatrix is not None
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize: # type: ignore[attr-defined]
for i in range(0, self._numClasses - 1):
margin = (
x.dot(self._weightsMatrix[i][0 : x.size]) # type: ignore[attr-defined]
+ self._weightsMatrix[i][x.size] # type: ignore[attr-defined]
)
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i]) # type: ignore[attr-defined]
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since("1.4.0")
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "LogisticRegressionModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path
)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
def __repr__(self) -> str:
return (
"pyspark.mllib.LogisticRegressionModel: intercept = {}, "
"numFeatures = {}, numClasses = {}, threshold = {}"
).format(self._intercept, self._numFeatures, self._numClasses, self._threshold)
class LogisticRegressionWithSGD:
"""
Train a classification model for Binary Logistic Regression using Stochastic Gradient Descent.
.. versionadded:: 0.9.0
.. deprecated:: 2.0.0
Use ml.classification.LogisticRegression or LogisticRegressionWithLBFGS.
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
step: float = 1.0,
miniBatchFraction: float = 1.0,
initialWeights: Optional["VectorLike"] = None,
regParam: float = 0.01,
regType: str = "l2",
intercept: bool = False,
validateData: bool = True,
convergenceTol: float = 0.001,
) -> LogisticRegressionModel:
"""
Train a logistic regression model on the given data.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
step : float, optional
The step parameter used in SGD.
(default: 1.0)
miniBatchFraction : float, optional
Fraction of data to be used for each SGD iteration.
(default: 1.0)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
regType : str, optional
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
convergenceTol : float, optional
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.",
FutureWarning,
)
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainLogisticRegressionModelWithSGD",
rdd,
int(iterations),
float(step),
float(miniBatchFraction),
i,
float(regParam),
regType,
bool(intercept),
bool(validateData),
float(convergenceTol),
)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS:
"""
Train a classification model for Multinomial/Binary Logistic Regression
using Limited-memory BFGS.
Standard feature scaling and L2 regularization are used by default.
.. versionadded:: 1.2.0
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
initialWeights: Optional["VectorLike"] = None,
regParam: float = 0.0,
regType: str = "l2",
intercept: bool = False,
corrections: int = 10,
tolerance: float = 1e-6,
validateData: bool = True,
numClasses: int = 2,
) -> LogisticRegressionModel:
"""
Train a logistic regression model on the given data.
.. versionadded:: 1.2.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
regType : str, optional
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
corrections : int, optional
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
tolerance : float, optional
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
numClasses : int, optional
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
Examples
--------
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainLogisticRegressionModelWithLBFGS",
rdd,
int(iterations),
i,
float(regParam),
regType,
bool(intercept),
int(corrections),
float(tolerance),
bool(validateData),
int(numClasses),
)
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
.. versionadded:: 0.9.0
Parameters
----------
weights : :py:class:`pyspark.mllib.linalg.Vector`
Weights computed for every feature.
intercept : float
Intercept computed for this model.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(numpy.array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except BaseException:
... pass
"""
def __init__(self, weights: Vector, intercept: float) -> None:
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@overload
def predict(self, x: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 0.9.0
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept # type: ignore[attr-defined]
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since("1.4.0")
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "SVMModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD:
"""
Train a Support Vector Machine (SVM) using Stochastic Gradient Descent.
.. versionadded:: 0.9.0
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
step: float = 1.0,
regParam: float = 0.01,
miniBatchFraction: float = 1.0,
initialWeights: Optional["VectorLike"] = None,
regType: str = "l2",
intercept: bool = False,
validateData: bool = True,
convergenceTol: float = 0.001,
) -> SVMModel:
"""
Train a support vector machine on the given data.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
step : float, optional
The step parameter used in SGD.
(default: 1.0)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
miniBatchFraction : float, optional
Fraction of data to be used for each SGD iteration.
(default: 1.0)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regType : str, optional
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
convergenceTol : float, optional
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainSVMModelWithSGD",
rdd,
int(iterations),
float(step),
float(regParam),
float(miniBatchFraction),
i,
regType,
bool(intercept),
bool(validateData),
float(convergenceTol),
)
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader["NaiveBayesModel"]):
"""
Model for Naive Bayes classifiers.
.. versionadded:: 0.9.0
Parameters
----------
labels : :py:class:`numpy.ndarray`
List of labels.
pi : :py:class:`numpy.ndarray`
Log of class priors, whose dimension is C, number of labels.
theta : :py:class:`numpy.ndarray`
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(numpy.array([0.0, 1.0]))
0.0
>>> model.predict(numpy.array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
def __init__(self, labels: numpy.ndarray, pi: numpy.ndarray, theta: numpy.ndarray) -> None:
self.labels = labels
self.pi = pi
self.theta = theta
@overload
def predict(self, x: "VectorLike") -> numpy.float64:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[numpy.float64]:
...
@since("0.9.0")
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[numpy.float64, RDD[numpy.float64]]:
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[
numpy.argmax(self.pi + x.dot(self.theta.transpose())) # type: ignore[attr-defined]
]
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "NaiveBayesModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path
)
# Can not unpickle array.array from Pickle in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes:
"""
Train a Multinomial Naive Bayes model.
.. versionadded:: 0.9.0
"""
@classmethod
def train(cls, data: RDD[LabeledPoint], lambda_: float = 1.0) -> NaiveBayesModel:
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the `Multinomial NB <http://tinyurl.com/lsdw6p>`_ which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as `Bernoulli NB <http://tinyurl.com/p7c96j6>`_.
The input feature values must be nonnegative.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
lambda\\_ : float, optional
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
.. versionadded:: 1.5.0
Parameters
----------
stepSize : float, optional
Step size for each iteration of gradient descent.
(default: 0.1)
numIterations : int, optional
Number of iterations run for each batch of data.
(default: 50)
miniBatchFraction : float, optional
Fraction of each batch of data to use for updates.
(default: 1.0)
regParam : float, optional
L2 Regularization parameter.
(default: 0.0)
convergenceTol : float, optional
Value used to determine when to terminate iterations.
(default: 0.001)
"""
def __init__(
self,
stepSize: float = 0.1,
numIterations: int = 50,
miniBatchFraction: float = 1.0,
regParam: float = 0.0,
convergenceTol: float = 0.001,
) -> None:
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model: Optional[LogisticRegressionModel] = None
super(StreamingLogisticRegressionWithSGD, self).__init__(model=self._model)
@since("1.5.0")
def setInitialWeights(
self, initialWeights: "VectorLike"
) -> "StreamingLogisticRegressionWithSGD":
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2 # type: ignore[attr-defined]
)
return self
@since("1.5.0")
def trainOn(self, dstream: "DStream[LabeledPoint]") -> None:
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd: RDD[LabeledPoint]) -> None:
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd,
self.numIterations,
self.stepSize,
self.miniBatchFraction,
self._model.weights, # type: ignore[union-attr]
regParam=self.regParam,
convergenceTol=self.convergenceTol,
)
dstream.foreachRDD(update)
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = (
SparkSession.builder.master("local[4]").appName("mllib.classification tests").getOrCreate()
)
globs["sc"] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
the-stack_0_4682 | # Ref: https://github.com/doliom/basic-petri-net
from place import P
from transition import T
from model import Model
from arc import Arc
def main():
#places with marking {3.wait, done, free}
p1 = P("p1", 3) # Wait
p2 = P("p2", 0) # Inside
p3 = P("p3", 1) # Done
p4 = P("p4", 1) # Free
p5 = P("p5", 0) # Busy
p6 = P("p6", 0) # Docu
#transitions
t1 = T("t1") # Start
t2 = T("t2") # Change
t3 = T("t3") # End
#arcs
arc1 = Arc(name="from p1 to t1", prevP=p1, nextT=t1, n=1)
arc2 = Arc(name="from t1 to p2", nextP=p2, n=1)
arc3 = Arc(name="from p4 to t1", prevP=p4, nextT=t1, n=1)
arc4 = Arc(name="from t1 to p5", nextP=p5, n=1)
arc5 = Arc(name="from t3 to p4", nextP=p4, n=1)
arc6 = Arc(name="from p6 to t3", prevP=p6, nextT=t3, n=1)
arc7 = Arc(name="from p2 to t2", prevP=p2, nextT=t2, n=1)
arc8 = Arc(name="from t2 to p3", nextP=p3, n=1)
arc9 = Arc(name="from p5 to t2", prevP=p5, nextT=t2, n=1)
arc10 = Arc(name="from t2 to p6", nextP=p6, n=1)
t1.inArcs = [arc1, arc3]
t1.outArcs = [arc2, arc4]
t2.inArcs = [arc7, arc9]
t2.outArcs = [arc8, arc10]
t3.inArcs = [arc6]
t3.outArcs = [arc5]
places = [p1, p2, p3, p4, p5, p6]
transitions = [t1, t2, t3]
petriNet = Model(places, transitions)
petriNet.simulate(100)
# t1.inArcs = [arc1]
# t1.outArcs = [arc2, arc3]
# t2.inArcs = [arc4, arc6]
# t2.outArcs = [arc5, arc7]
# t3.inArcs = [arc8]
# t3.outArcs = [arc9]
# t4.inArcs = [arc10]
# t4.outArcs = [arc11]
def printInit(places):
print("Init state")
for p in places:
print("Position: {} -------- Markers: {}".format(p.name, p.tokens))
print("\n")
if __name__ == "__main__":
main() |
the-stack_0_4683 | import asyncio
from concurrent.futures import Executor, ProcessPoolExecutor
from functools import partial
import logging
from multiprocessing import freeze_support
from aiohttp import web
import aiohttp_cors
import black
import click
# This is used internally by tests to shut down the server prematurely
_stop_signal = asyncio.Event()
VERSION_HEADER = "X-Protocol-Version"
LINE_LENGTH_HEADER = "X-Line-Length"
PYTHON_VARIANT_HEADER = "X-Python-Variant"
SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER = "X-Skip-Numeric-Underscore-Normalization"
FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
BLACK_HEADERS = [
VERSION_HEADER,
LINE_LENGTH_HEADER,
PYTHON_VARIANT_HEADER,
SKIP_STRING_NORMALIZATION_HEADER,
SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER,
FAST_OR_SAFE_HEADER,
]
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
@click.option(
"--bind-host", type=str, help="Address to bind the server to.", default="localhost"
)
@click.option("--bind-port", type=int, help="Port to listen on", default=45484)
@click.version_option(version=black.__version__)
def main(bind_host: str, bind_port: int) -> None:
logging.basicConfig(level=logging.INFO)
app = make_app()
ver = black.__version__
black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
def make_app() -> web.Application:
app = web.Application()
executor = ProcessPoolExecutor()
cors = aiohttp_cors.setup(app)
resource = cors.add(app.router.add_resource("/"))
cors.add(
resource.add_route("POST", partial(handle, executor=executor)),
{
"*": aiohttp_cors.ResourceOptions(
allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*"
)
},
)
return app
async def handle(request: web.Request, executor: Executor) -> web.Response:
try:
if request.headers.get(VERSION_HEADER, "1") != "1":
return web.Response(
status=501, text="This server only supports protocol version 1"
)
try:
line_length = int(
request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
)
except ValueError:
return web.Response(status=400, text="Invalid line length header value")
py36 = False
pyi = False
if PYTHON_VARIANT_HEADER in request.headers:
value = request.headers[PYTHON_VARIANT_HEADER]
if value == "pyi":
pyi = True
else:
try:
major, *rest = value.split(".")
if int(major) == 3 and len(rest) > 0:
if int(rest[0]) >= 6:
py36 = True
except ValueError:
return web.Response(
status=400, text=f"Invalid value for {PYTHON_VARIANT_HEADER}"
)
skip_string_normalization = bool(
request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
)
skip_numeric_underscore_normalization = bool(
request.headers.get(SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER, False)
)
fast = False
if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
fast = True
mode = black.FileMode.from_configuration(
py36=py36,
pyi=pyi,
skip_string_normalization=skip_string_normalization,
skip_numeric_underscore_normalization=skip_numeric_underscore_normalization,
)
req_bytes = await request.content.read()
charset = request.charset if request.charset is not None else "utf8"
req_str = req_bytes.decode(charset)
loop = asyncio.get_event_loop()
formatted_str = await loop.run_in_executor(
executor,
partial(
black.format_file_contents,
req_str,
line_length=line_length,
fast=fast,
mode=mode,
),
)
return web.Response(
content_type=request.content_type, charset=charset, text=formatted_str
)
except black.NothingChanged:
return web.Response(status=204)
except black.InvalidInput as e:
return web.Response(status=400, text=str(e))
except Exception as e:
logging.exception("Exception during handling a request")
return web.Response(status=500, text=str(e))
def patched_main() -> None:
freeze_support()
black.patch_click()
main()
if __name__ == "__main__":
patched_main()
|
the-stack_0_4693 | from collections.abc import Iterable
from itertools import repeat
from typing import Callable, TypeVar
T = TypeVar('T')
def count_horizontal_vertical_overlaps(
lines: list[tuple[tuple[int, int], tuple[int, int]]]) -> int:
vents: dict[tuple[int, int], bool] = {}
overlaps = 0
def add(start: int, end: int, mk_coord: Callable[[int], tuple[int, int]]
) -> None:
nonlocal overlaps
for i in range(min(start, end), max(start, end) + 1):
c = mk_coord(i)
if c in vents:
if not vents[c]:
overlaps += 1
vents[c] = True
else:
vents[c] = False
for (x1, y1), (x2, y2) in lines:
if x1 == x2:
add(y1, y2, lambda y: (x1, y))
elif y1 == y2:
add(x1, x2, lambda x: (x, y1))
return overlaps
def mk_range(start: int, end: int) -> Iterable[int]:
if start < end:
return range(start, end + 1)
if start > end:
return range(start, end - 1, -1)
return repeat(start)
def count_all_overlaps(
lines: list[tuple[tuple[int, int], tuple[int, int]]]) -> int:
vents: dict[tuple[int, int], bool] = {}
overlaps = 0
for (x1, y1), (x2, y2) in lines:
for c in zip(mk_range(x1, x2), mk_range(y1, y2)):
if c in vents:
if not vents[c]:
overlaps += 1
vents[c] = True
else:
vents[c] = False
return overlaps
def pair(it: Iterable[T]) -> tuple[T, T]:
a, b = it
return a, b
with open('input.txt') as f:
lines = [pair(pair(map(int, c.split(',')))
for c in l.rstrip().split(' -> ')) for l in f]
print(count_horizontal_vertical_overlaps(lines))
print(count_all_overlaps(lines))
|
the-stack_0_4694 | from typing import Dict, Optional, Tuple
import numpy as np
from stable_baselines.common import vec_env
from imitation.util import rollout
class Buffer:
"""A FIFO ring buffer for NumPy arrays of a fixed shape and dtype.
Supports random sampling with replacement.
"""
capacity: int
"""The number of data samples that can be stored in this buffer."""
sample_shapes: Dict[str, Tuple[int, ...]]
"""The shapes of each data sample stored in this buffer."""
_arrays: Dict[str, np.ndarray]
"""The underlying NumPy arrays (which actually store the data)."""
_n_data: int
"""The number of samples currently stored in this buffer.
An integer in `range(0, self.capacity + 1)`. This attribute is the return
value of `self.__len__`.
"""
_idx: int
"""The index of the first row that new data should be written to.
An integer in `range(0, self.capacity)`.
"""
def __init__(self, capacity: int,
sample_shapes: Dict[str, Tuple[int, ...]],
dtypes: Dict[str, np.dtype]):
"""Constructs a Buffer.
Args:
capacity: The number of samples that can be stored.
sample_shapes: A dictionary mapping string keys to the shape of
samples associated with that key.
dtypes (`np.dtype`-like): A dictionary mapping string keys to the dtype
of samples associated with that key.
Raises:
KeyError: `sample_shapes` and `dtypes` have different keys.
"""
if sample_shapes.keys() != dtypes.keys():
raise KeyError("sample_shape and dtypes keys don't match")
self.capacity = capacity
self.sample_shapes = {k: tuple(shape) for k, shape in sample_shapes.items()}
self._arrays = {k: np.zeros((capacity,) + shape, dtype=dtypes[k])
for k, shape in self.sample_shapes.items()}
self._n_data = 0
self._idx = 0
@classmethod
def from_data(cls, data: Dict[str, np.ndarray]) -> "Buffer":
"""Constructs and return a Buffer containing only the provided data.
The returned Buffer is at full capacity and ready for sampling.
Args:
data: A dictionary mapping keys to data arrays. The arrays may differ
in their shape, but should agree in the first axis.
Raises:
ValueError: `data` is empty.
ValueError: `data` has items mapping to arrays differing in the
length of their first axis.
"""
capacities = [arr.shape[0] for arr in data.values()]
capacities = np.unique(capacities)
if len(data) == 0:
raise ValueError("No keys in data.")
if len(capacities) > 1:
raise ValueError("Keys map to different length values")
capacity = capacities[0]
sample_shapes = {k: arr.shape[1:] for k, arr in data.items()}
dtypes = {k: arr.dtype for k, arr in data.items()}
buf = cls(capacity, sample_shapes, dtypes)
buf.store(data)
return buf
def store(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Args:
data: A dictionary mapping keys `k` to arrays with shape
`(n_samples,) + self.sample_shapes[k]`, where `n_samples` is less
than or equal to `self.capacity`.
Raises:
ValueError: `data` is empty.
ValueError: If `n_samples` is greater than `self.capacity`.
ValueError: data is the wrong shape.
"""
expected_keys = set(self.sample_shapes.keys())
missing_keys = expected_keys.difference(data.keys())
unexpected_keys = set(data.keys()).difference(expected_keys)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys {missing_keys}")
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected keys {unexpected_keys}")
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
if len(n_samples) > 1:
raise ValueError("Keys map to different length values.")
n_samples = n_samples[0]
if n_samples == 0:
raise ValueError("Trying to store empty data.")
if n_samples > self.capacity:
raise ValueError("Not enough capacity to store data.")
for k, arr in data.items():
if arr.shape[1:] != self.sample_shapes[k]:
raise ValueError(f"Wrong data shape for {k}")
new_idx = self._idx + n_samples
if new_idx > self.capacity:
n_remain = self.capacity - self._idx
# Need to loop around the buffer. Break into two "easy" calls.
self._store_easy({k: arr[:n_remain] for k, arr in data.items()})
assert self._idx == 0
self._store_easy({k: arr[n_remain:] for k, arr in data.items()})
else:
self._store_easy(data)
def _store_easy(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Requires that `len(data) <= self.capacity - self._idx`. Updates `self._idx`
to be the insertion point of the next call to `_store_easy` call,
looping back to `self._idx = 0` if necessary.
Also updates `self._n_data`.
Args:
data: Same as in `self.store`'s docstring, except with the additional
constraint `len(data) <= self.capacity - self._idx`.
"""
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
assert len(n_samples) == 1
n_samples = n_samples[0]
assert n_samples <= self.capacity - self._idx
idx_hi = self._idx + n_samples
for k, arr in data.items():
self._arrays[k][self._idx:idx_hi] = arr
self._idx = idx_hi % self.capacity
self._n_data = min(self._n_data + n_samples, self.capacity)
def sample(self, n_samples: int) -> Dict[str, np.ndarray]:
"""Uniformly sample `n_samples` samples from the buffer with replacement.
Args:
n_samples: The number of samples to randomly sample.
Returns:
samples (np.ndarray): An array with shape
`(n_samples) + self.sample_shape`.
Raises:
ValueError: The buffer is empty.
"""
if len(self) == 0:
raise ValueError("Buffer is empty")
ind = np.random.randint(len(self), size=n_samples)
return {k: buffer[ind] for k, buffer in self._arrays.items()}
def __len__(self) -> int:
"""Returns the number of samples stored in the buffer."""
assert 0 <= self._n_data <= self.capacity
return self._n_data
class ReplayBuffer:
"""Buffer for Transitions."""
capacity: int
"""The number of data samples that can be stored in this buffer."""
def __init__(self, capacity: int,
venv: Optional[vec_env.VecEnv] = None, *,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: Optional[np.dtype] = None,
act_dtype: Optional[np.dtype] = None):
"""Constructs a ReplayBuffer.
Args:
capacity: The number of samples that can be stored.
venv: The environment whose action and observation
spaces can be used to determine the data shapes of the underlying
buffers. Overrides all the following arguments.
obs_shape: The shape of the observation space.
act_shape: The shape of the action space.
obs_dtype: The dtype of the observation space.
act_dtype: The dtype of the action space.
Raises:
ValueError: Couldn't infer the observation and action shapes and dtypes
from the arguments.
"""
params = [obs_shape, act_shape, obs_dtype, act_dtype]
if venv is not None:
if np.any([x is not None for x in params]):
raise ValueError("Specified shape or dtype and environment.")
obs_shape = tuple(venv.observation_space.shape)
act_shape = tuple(venv.action_space.shape)
obs_dtype = venv.observation_space.dtype
act_dtype = venv.action_space.dtype
else:
if np.any([x is None for x in params]):
raise ValueError("Shape or dtype missing and no environment specified.")
self.capacity = capacity
sample_shapes = {
'obs': obs_shape,
'acts': act_shape,
'next_obs': obs_shape,
'rews': (),
'dones': (),
}
dtypes = {
'obs': obs_dtype,
'acts': act_dtype,
'next_obs': obs_dtype,
'rews': np.float32,
'dones': np.bool,
}
self._buffer = Buffer(capacity, sample_shapes=sample_shapes, dtypes=dtypes)
@classmethod
def from_data(cls, transitions: rollout.Transitions) -> "ReplayBuffer":
"""Construct and return a ReplayBuffer containing only the provided data.
The returned ReplayBuffer is at full capacity and ready for sampling.
Args:
transitions: Transitions to store.
Returns:
A new ReplayBuffer.
Raises:
ValueError: obs and next_obs have a different dtype.
"""
if transitions.obs.dtype != transitions.next_obs.dtype:
raise ValueError("obs and next_obs must have the same dtype.")
capacity, *obs_shape = transitions.obs.shape
_, *act_shape = transitions.acts.shape
instance = cls(capacity=capacity, obs_shape=obs_shape, act_shape=act_shape,
obs_dtype=transitions.obs.dtype,
act_dtype=transitions.acts.dtype)
instance.store(transitions)
return instance
def sample(self, n_samples: int) -> rollout.Transitions:
"""Sample obs-act-obs triples.
Args:
n_samples: The number of samples.
Returns:
A Transitions named tuple containing n_samples transitions.
"""
sample = self._buffer.sample(n_samples)
return rollout.Transitions(**sample)
def store(self, transitions: rollout.Transitions) -> None:
"""Store obs-act-obs triples.
Args:
transitions: Transitions to store.
Raises:
ValueError: The arguments didn't have the same length.
"""
lengths = [len(arr) for arr in transitions]
if len(set(lengths)) != 1:
raise ValueError("Arguments must have the same length.")
self._buffer.store(transitions._asdict())
def __len__(self):
return len(self._buffer)
|
the-stack_0_4695 | from plenum.common.constants import TRUSTEE, STEWARD, NODE
from stp_core.common.log import getlogger
from indy_common.constants import OWNER, POOL_UPGRADE, TGB, TRUST_ANCHOR, NYM, \
POOL_CONFIG, SCHEMA, CLAIM_DEF, \
POOL_RESTART, VALIDATOR_INFO
from indy_common.roles import Roles
logger = getlogger()
# TODO: make this class the only point of authorization and checking permissions!
# There are some duplicates of this logic in *_req_handler classes
class Authoriser:
ValidRoles = (TRUSTEE, TGB, STEWARD, TRUST_ANCHOR, None)
AuthMap = {
'{}_role__{}'.format(NYM, TRUSTEE):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, TGB):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, STEWARD):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, TRUST_ANCHOR):
{TRUSTEE: [], STEWARD: []},
'{}_role__'.format(NYM):
{TRUSTEE: [], TGB: [], STEWARD: [], TRUST_ANCHOR: []},
'{}_role_{}_'.format(NYM, TRUSTEE):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, TGB):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, STEWARD):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, TRUST_ANCHOR):
{TRUSTEE: []},
'{}_<any>_<any>_<any>'.format(SCHEMA):
{TRUSTEE: [], STEWARD: [], TRUST_ANCHOR: []},
'{}_<any>_<any>_<any>'.format(CLAIM_DEF):
{TRUSTEE: [OWNER, ], STEWARD: [OWNER, ], TRUST_ANCHOR: [OWNER, ]},
'{}_verkey_<any>_<any>'.format(NYM):
{r: [OWNER] for r in ValidRoles},
'{}_services__[VALIDATOR]'.format(NODE):
{STEWARD: [OWNER, ]},
# INDY-410 - steward allowed to demote/promote its validator
'{}_services_[VALIDATOR]_[]'.format(NODE):
{TRUSTEE: [], STEWARD: [OWNER, ]},
'{}_services_[]_[VALIDATOR]'.format(NODE):
{TRUSTEE: [], STEWARD: [OWNER, ]},
'{}_node_ip_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_node_port_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_client_ip_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_client_port_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_blskey_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_action__start'.format(POOL_UPGRADE):
{TRUSTEE: [], TGB: []},
'{}_action_start_cancel'.format(POOL_UPGRADE):
{TRUSTEE: [], TGB: []},
'{}_action_<any>_<any>'.format(POOL_RESTART):
{TRUSTEE: []},
'{}_action_<any>_<any>'.format(POOL_CONFIG):
{TRUSTEE: [], TGB: []},
'{}_<any>_<any>_<any>'.format(VALIDATOR_INFO):
{TRUSTEE: [], STEWARD: []},
}
@staticmethod
def isValidRole(role) -> bool:
return role in Authoriser.ValidRoles
@staticmethod
def getRoleFromName(roleName) -> bool:
if not roleName:
return
return Roles[roleName].value
@staticmethod
def isValidRoleName(roleName) -> bool:
if not roleName:
return True
try:
Authoriser.getRoleFromName(roleName)
except KeyError:
return False
return True
@staticmethod
def authorised(typ, actorRole, field=None, oldVal=None, newVal=None,
isActorOwnerOfSubject=None) -> (bool, str):
field = field if field is not None else ""
oldVal = '' if oldVal is None else \
str(oldVal).replace('"', '').replace("'", '')
newVal = '' if newVal is None else \
str(newVal).replace('"', '').replace("'", '')
key = '_'.join([typ, field, oldVal, newVal])
if key not in Authoriser.AuthMap:
any_value = '_'.join([typ, field, '<any>', '<any>'])
if any_value not in Authoriser.AuthMap:
any_field = '_'.join([typ, "<any>", '<any>', '<any>'])
if any_field not in Authoriser.AuthMap:
msg = "key '{}' not found in authorized map".format(key)
logger.debug(msg)
return False, msg
else:
key = any_field
else:
key = any_value
roles = Authoriser.AuthMap[key]
if actorRole not in roles:
roles_as_str = [Roles.nameFromValue(role) for role in roles.keys()]
return False, '{} not in allowed roles {}'.\
format(Roles.nameFromValue(actorRole), roles_as_str)
roleDetails = roles[actorRole]
if len(roleDetails) == 0:
return True, ''
else:
r = OWNER in roleDetails and isActorOwnerOfSubject
msg = '' if r else 'Only owner is allowed'
return r, msg
|
the-stack_0_4696 | # -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import shutil
import subprocess as sp
import sys
from concurrent import futures
from tempfile import mkdtemp, mkstemp
from typing import List, Tuple
import cx_Oracle
from pyinterprod import logger
from pyinterprod.utils import oracle
from .database import Database
from . import contrib
HMM_SUFFIX = ".hmm"
SEQ_SUFFIX = ".fa"
DOM_SUFFIX = ".tab"
OUT_SUFFIX = ".out"
def calc_dir_size(dirpath: str) -> int:
size = 0
for root, dirs, files in os.walk(dirpath):
for f in files:
size += os.path.getsize(os.path.join(root, f))
return size
def create_tables(url: str):
con = cx_Oracle.connect(url)
cur = con.cursor()
for table in ("CLAN_MATCH", "CLAN_MEMBER", "CLAN"):
oracle.drop_table(cur, table, purge=True)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN
(
CLAN_AC VARCHAR2(25) NOT NULL,
DBCODE CHAR(1) NOT NULL,
NAME VARCHAR2(100) DEFAULT NULL,
DESCRIPTION VARCHAR2(4000) DEFAULT NULL,
CONSTRAINT PK_CLAN
PRIMARY KEY (CLAN_AC),
CONSTRAINT FK_CLAN$DBCODE
FOREIGN KEY (DBCODE)
REFERENCES INTERPRO.CV_DATABASE (DBCODE)
ON DELETE CASCADE
)
"""
)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN_MEMBER
(
CLAN_AC VARCHAR2(25) NOT NULL,
MEMBER_AC VARCHAR2(25) NOT NULL,
LEN NUMBER NOT NULL,
SCORE FLOAT NOT NULL,
CONSTRAINT PK_CLAN_MEMBER
PRIMARY KEY (CLAN_AC, MEMBER_AC),
CONSTRAINT UQ_CLAN_MEMBER$MEMBER_AC
UNIQUE (MEMBER_AC),
CONSTRAINT FK_CLAN_MEMBER$CLAN_AC
FOREIGN KEY (CLAN_AC)
REFERENCES INTERPRO.CLAN (CLAN_AC)
ON DELETE CASCADE,
CONSTRAINT FK_CLAN_MEMBER$MEMBER_AC
FOREIGN KEY (MEMBER_AC)
REFERENCES INTERPRO.METHOD (METHOD_AC)
ON DELETE CASCADE
)
"""
)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN_MATCH
(
QUERY_AC VARCHAR2(25) NOT NULL,
TARGET_AC VARCHAR2(25) NOT NULL,
EVALUE FLOAT NOT NULL,
DOMAINS CLOB NOT NULL,
CONSTRAINT PK_CLAN_MATCH
PRIMARY KEY (QUERY_AC, TARGET_AC),
CONSTRAINT FK_CLAN_MATCH
FOREIGN KEY (QUERY_AC)
REFERENCES INTERPRO.CLAN_MEMBER (MEMBER_AC)
ON DELETE CASCADE
)
"""
)
cur.close()
con.close()
def load_sequence(seqfile: str) -> str:
seq = ""
with open(seqfile, "rt") as fh:
next(fh)
for line in fh:
seq += line.rstrip()
return seq
def iter_models(hmmdb: str):
with open(hmmdb, "rt") as fh:
reg_acc = re.compile(r"ACC\s+(\w+)", flags=re.M)
reg_name = re.compile(r"^NAME\s+(PTHR\d+)\.(SF\d+)?", flags=re.M)
hmm = ""
for line in fh:
hmm += line
if line[:2] == "//":
m = reg_acc.search(hmm)
if m:
accession = m.group(1)
else:
# PANTHER: accessions in the NAME field
m = reg_name.search(hmm)
accession, prefix = m.groups()
if prefix is not None:
accession += ':' + prefix
yield accession, hmm
hmm = ""
def iter_sequences(seqfile: str):
with open(seqfile, "rt") as fh:
buffer = ""
accession = identifier = None
for line in fh:
if line[0] == ">":
if buffer and identifier:
yield identifier, accession, buffer
m = re.match(r">(gnl\|CDD\|\d+)\s+(cd\d+),", line)
if m:
identifier, accession = m.groups()
else:
accession = identifier = None
buffer = ""
buffer += line
if buffer and identifier:
yield identifier, accession, buffer
def load_hmmscan_results(outfile: str, tabfile: str) -> List[dict]:
alignments = load_domain_alignments(outfile)
targets = {}
with open(tabfile, "rt") as fh:
i = 0
for line in fh:
if line[0] == "#":
continue
cols = re.split(r"\s+", line.rstrip(), maxsplit=22)
name = cols[0]
# Pfam entries end with a mark followed by a number
acc = cols[1].split(".")[0]
if acc == "-":
# Panther accessions are under the `target_name` column
acc = name
if acc in targets:
t = targets[acc]
else:
t = targets[acc] = {
"name": name,
"accession": acc,
"tlen": int(cols[2]),
"qlen": int(cols[5]),
# full sequence
"evalue": float(cols[6]),
"evaluestr": cols[6],
"score": float(cols[7]),
"bias": float(cols[8]),
"domains": []
}
t["domains"].append({
# this domain
# conditional E-value
"cevalue": float(cols[11]),
"cevaluestr": cols[11],
# independent E-value
"ievalue": float(cols[12]),
"ievaluestr": cols[12],
"score": float(cols[13]),
"bias": float(cols[14]),
"coordinates": {
# target (as we scan an HMM DB)
"hmm": {
"start": int(cols[15]),
"end": int(cols[16])
},
# query
"ali": {
"start": int(cols[17]),
"end": int(cols[18])
},
"env": {
"start": int(cols[19]),
"end": int(cols[20])
},
},
"sequences": alignments[i]
})
i += 1
return list(targets.values())
def load_domain_alignments(file: str) -> List[Tuple[str, str]]:
"""
Parse the output file of hmmscan and load domain alignments.
Example of alignments:
== domain 1 score: 25.3 bits; conditional E-value: 5.2e-09
Cytochrome_c4 11 llalaalal.alaaaadaeagaaklaea......gaaavkaCaaCHGadGnsaaaaayPrLAgqsaaYlakqLkdfrsg 82
l++l+a+++ ++ a+++ e++a+k++ea + ++C +CHG+d ++a+ P+L ++Y +++++++ ++
Cytochrome_Bsub_c550-consensus 18 LVVLLAVNGgSKDAEEEKEEEAEKSEEAeaeaegEEIFKQKCISCHGKDLEGAVG---PNLEKVGSKYSEEEIAKIIEN 93
34444444442223333333333333336666856777899***********766...***************999887 PP
Cytochrome_c4 83 errknpMaplakaLsdqdiedlaaYfaaq 111
k +M a+ sd++ +++a+++a++
Cytochrome_Bsub_c550-consensus 94 G--KGAM--PAAIVSDDEAKAVAKWLAEK 118
3..3344..46678999999999999986 PP
Since the sequence is too long, the domain is represented with two "blocks".
The "== domain" line might be followed by a consensus structure annotation line (not the case here).
Each block has four lines:
1. consensus of the target profile
2. matches between the query sequence and target profile (**can be empty**)
3. query sequence
4. posterior probability of each aligned residue
:param file: hmmscan output file
:return: a list of alignments, represented by a tuple of two sequences (query, target)
"""
alignments = []
query_seq = target_seq = ""
with open(file, "rt") as fh:
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith(">> "):
# New model
# target_name = line[3:]
if query_seq:
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
elif line.startswith("== domain"):
# New domain
if query_seq:
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
line = next(fh).strip()
block = []
while line or len(block) < 4:
block.append(line)
line = next(fh).strip()
del block[:-4]
target_seq += block[0].split()[2]
query_seq += block[2].split()[2]
elif line == "Internal pipeline statistics summary:":
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
elif query_seq:
# New block of domain
block = []
while line or len(block) < 4:
block.append(line)
line = next(fh).strip()
del block[:-4]
target_seq += block[0].split()[2]
query_seq += block[2].split()[2]
return alignments
def load_compass_results(outfile) -> List[dict]:
# p1 = re.compile(r"length\s*=\s*(\d+)")
p2 = re.compile(r"Evalue\s*=\s*([\d.e\-]+)")
targets = {}
block = 0
query_id = None
query_seq = ""
target_id = None
target_seq = ""
length = None
evalue = None
evalue_str = None
pos_start = None
with open(outfile, "rt") as fh:
for line in fh:
line = line.rstrip()
if line.startswith("Subject="):
"""
Format:
Subject= cd154/cd15468.fa
length=413 filtered_length=413 Neff=1.000
Smith-Waterman score = 254 Evalue = 3.36e-16
(the path after "Subject=" might be truncated)
"""
if target_id:
targets[target_id] = {
"id": target_id,
"evalue": evalue,
"evaluestr": evalue_str,
"length": length,
"start": pos_start,
"end": pos_start + len(query_seq.replace('=', '')) - 1,
"sequences": {
"query": query_seq,
"target": target_seq
}
}
query_id = None
query_seq = None
target_id = None
target_seq = None
line = next(fh)
# length = int(p1.match(line).group(1))
line = next(fh)
evalue_str = p2.search(line).group(1)
try:
evalue = float(evalue_str)
except ValueError:
evalue = 0
block = 1
elif line.startswith("Parameters:"):
# Footer: end of results
break
elif not block:
continue
elif line:
"""
First block:
gnl|CDD|271233 1 PSFIPGPT==TPKGCTRIPSFSLSDTHWCYTHNVILSGCQDHSKSNQYLSLGVIKTNSDG
CONSENSUS_1 1 PSFIPGPT==TPKGCTRIPSFSLSDTHWCYTHNVILSGCQDHSKSNQYLSLGVIKTNSDG
P++IP+ T C+R PSF++S+ + YT+ V ++CQDH + +Y+++GVI+ ++ G
CONSENSUS_2 1 PNLIPADTGLLSGECVRQPSFAISSGIYAYTYLVRKGSCQDHRSLYRYFEVGVIRDDGLG
gnl|CDD|271230 1 PNLIPADTGLLSGECVRQPSFAISSGIYAYTYLVRKGSCQDHRSLYRYFEVGVIRDDGLG
(following blocks do not have the start position between the ID and the sequence)
"""
query = line.split()
next(fh)
next(fh)
next(fh)
target = next(fh).split()
if block == 1:
query_id = query[0]
pos_start = int(query[1])
query_seq = query[2]
target_id = target[0]
target_seq = target[2]
else:
query_seq += query[1]
target_seq += target[1]
block += 1
targets[target_id] = {
"id": target_id,
"evalue": evalue,
"evaluestr": evalue_str,
"length": length,
"start": pos_start,
"end": pos_start + len(query_seq.replace('=', '')) - 1,
"sequences": {
"query": query_seq,
"target": target_seq
}
}
return list(targets.values())
def run_compass(seqfile: str, database: str, outfile: str):
args = ["compass_vs_db", "-i", seqfile, "-d", database, "-o", outfile]
process = sp.run(args=args, stderr=sp.DEVNULL, stdout=sp.DEVNULL)
if process.returncode == 0:
return True
try:
os.remove(outfile)
except FileNotFoundError:
pass
return False
def run_hmmemit(hmmdb: str, seqfile: str):
sp.run(args=["hmmemit", "-c", "-o", seqfile, hmmdb],
stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True)
def run_hmmscan(hmmdb: str, seqfile: str, domfile: str, outfile: str) -> bool:
args = ["hmmscan", "-o", outfile, "--domtblout", domfile, "--cpu", "1",
hmmdb, seqfile]
process = sp.run(args=args, stderr=sp.DEVNULL, stdout=sp.DEVNULL)
if process.returncode == 0:
return True
for f in (domfile, outfile):
try:
os.remove(f)
except FileNotFoundError:
pass
return False
def update_cdd_clans(url: str, database: Database, cddmasters: str,
cddid: str, fam2supfam: str, **kwargs):
threads = kwargs.get("threads")
tmpdir = kwargs.get("tmpdir")
if tmpdir:
os.makedirs(tmpdir, exist_ok=True)
logger.info("deleting old clans")
con = cx_Oracle.connect(url)
cur = con.cursor()
cur.execute("DELETE FROM INTERPRO.CLAN WHERE DBCODE = :1",
(database.identifier,))
con.commit()
cur.close()
con.close()
clans = contrib.cdd.get_clans(cddid, fam2supfam)
clans_to_insert = {}
mem2clan = {}
for c in clans:
clans_to_insert[c.accession] = c
for m in c.members:
mem2clan[m["accession"]] = (c.accession, m["score"])
logger.info("parsing representative sequences")
workdir = mkdtemp(dir=tmpdir)
fd, files_list = mkstemp(dir=workdir)
id2acc = {}
seqfiles = {}
with open(fd, "wt") as fh:
for model_id, model_acc, sequence in iter_sequences(cddmasters):
if model_acc not in mem2clan or model_acc in seqfiles:
continue
subdir = os.path.join(workdir, model_acc[:5])
try:
os.mkdir(subdir)
except FileExistsError:
pass
prefix = os.path.join(subdir, model_acc)
seqfile = prefix + SEQ_SUFFIX
with open(seqfile, "wt") as fh2:
fh2.write(sequence)
fh.write(f"{seqfile}\n")
seqfiles[model_acc] = prefix
id2acc[model_id] = model_acc
logger.info("building profile database")
fd, database = mkstemp(dir=workdir)
os.close(fd)
os.remove(database)
sp.run(["mk_compass_db", "-i", files_list, "-o", database],
stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True)
with futures.ThreadPoolExecutor(max_workers=threads) as executor:
logger.info("querying sequences")
fs = {}
for model_acc, prefix in seqfiles.items():
seqfile = prefix + SEQ_SUFFIX
outfile = prefix + OUT_SUFFIX
f = executor.submit(run_compass, seqfile, database, outfile)
fs[f] = (model_acc, prefix)
con = cx_Oracle.connect(url)
cur = con.cursor()
cur2 = con.cursor()
cur2.setinputsizes(25, 25, cx_Oracle.DB_TYPE_BINARY_DOUBLE,
cx_Oracle.DB_TYPE_CLOB)
clan_sql = "INSERT INTO INTERPRO.CLAN VALUES (:1, :2, :3, :4)"
memb_sql = "INSERT INTO INTERPRO.CLAN_MEMBER VALUES (:1, :2, :3, :4)"
mtch_sql = "INSERT INTO INTERPRO.CLAN_MATCH VALUES (:1, :2, :3, :4)"
completed = errors = progress = 0
for f in futures.as_completed(fs):
model_acc, prefix = fs[f]
completed += 1
if not f.result():
logger.error(f"{model_acc}")
errors += 1
continue
clan_acc, score = mem2clan[model_acc]
sequence = load_sequence(prefix + SEQ_SUFFIX)
try:
clan = clans_to_insert.pop(clan_acc)
except KeyError:
# Clan already inserted
pass
else:
cur.execute(clan_sql, (clan.accession, database.identifier,
clan.name, clan.description))
cur.execute(memb_sql, (clan_acc, model_acc, len(sequence), score))
matches = []
for target in load_compass_results(prefix + OUT_SUFFIX):
target_acc = id2acc[target["id"]]
if target_acc == model_acc:
continue
matches.append((
model_acc,
target_acc,
target["evalue"],
json.dumps([(target["start"], target["end"])])
))
if matches:
cur2.executemany(mtch_sql, matches)
pc = completed * 100 // len(fs)
if pc > progress:
progress = pc
logger.debug(f"{progress:>10}%")
con.commit()
cur.close()
cur2.close()
con.close()
size = calc_dir_size(workdir)
logger.info(f"disk usage: {size / 1024 ** 2:,.0f} MB")
shutil.rmtree(workdir)
if errors:
raise RuntimeError(f"{errors} error(s)")
def update_hmm_clans(url: str, database: Database, hmmdb: str, **kwargs):
clan_source = kwargs.get("source")
threads = kwargs.get("threads")
tmpdir = kwargs.get("tmpdir")
if tmpdir:
os.makedirs(tmpdir, exist_ok=True)
logger.info("deleting old clans")
con = cx_Oracle.connect(url)
cur = con.cursor()
cur.execute("DELETE FROM INTERPRO.CLAN WHERE DBCODE = :1",
(database.identifier,))
con.commit()
cur.close()
con.close()
logger.info("loading new clans")
if database.name.lower() == "panther":
clans = contrib.panther.get_clans(url)
def getsubdir(x): return x[:7]
elif database.name.lower() == "pfam":
clans = contrib.pfam.get_clans(clan_source)
def getsubdir(x): return x[:5]
elif database.name.lower() == "pirsf":
clans = contrib.pirsf.get_clans(clan_source)
def getsubdir(x): return x[:8]
else:
raise NotImplementedError()
clans_to_insert = {}
mem2clan = {}
for c in clans:
clans_to_insert[c.accession] = c
for m in c.members:
mem2clan[m["accession"]] = (c.accession, m["score"])
workdir = mkdtemp(dir=tmpdir)
num_duplicates = 0
with futures.ThreadPoolExecutor(max_workers=threads) as executor:
logger.info("emitting consensus sequences")
fs = {}
models = set()
for model_acc, hmm in iter_models(hmmdb):
if model_acc not in mem2clan:
# Ignore models not belonging to a clan
continue
elif model_acc in models:
num_duplicates += 1
continue
subdir = os.path.join(workdir, getsubdir(model_acc))
try:
os.mkdir(subdir)
except FileExistsError:
pass
prefix = os.path.join(subdir, model_acc)
hmmfile = prefix + HMM_SUFFIX
with open(hmmfile, "wt") as fh:
fh.write(hmm)
seqfile = prefix + SEQ_SUFFIX
f = executor.submit(run_hmmemit, hmmfile, seqfile)
fs[f] = model_acc
models.add(model_acc)
done, not_done = futures.wait(fs)
if not_done:
shutil.rmtree(workdir)
raise RuntimeError(f"{len(not_done)} error(s)")
elif num_duplicates:
shutil.rmtree(workdir)
raise RuntimeError(f"HMM database {hmmdb} contains "
f"{num_duplicates} duplicated models.")
logger.info("searching consensus sequences")
fs = {}
for model_acc in models:
prefix = os.path.join(workdir, getsubdir(model_acc), model_acc)
seqfile = prefix + SEQ_SUFFIX
outfile = prefix + OUT_SUFFIX
domfile = prefix + DOM_SUFFIX
f = executor.submit(run_hmmscan, hmmdb, seqfile, domfile, outfile)
fs[f] = model_acc
con = cx_Oracle.connect(url)
cur = con.cursor()
cur2 = con.cursor()
cur2.setinputsizes(25, 25, cx_Oracle.DB_TYPE_BINARY_DOUBLE,
cx_Oracle.DB_TYPE_CLOB)
clan_sql = "INSERT INTO INTERPRO.CLAN VALUES (:1, :2, :3, :4)"
memb_sql = "INSERT INTO INTERPRO.CLAN_MEMBER VALUES (:1, :2, :3, :4)"
mtch_sql = "INSERT INTO INTERPRO.CLAN_MATCH VALUES (:1, :2, :3, :4)"
completed = errors = progress = 0
for f in futures.as_completed(fs):
model_acc = fs[f]
completed += 1
if not f.result():
logger.error(f"{model_acc}")
errors += 1
continue
prefix = os.path.join(workdir, getsubdir(model_acc), model_acc)
outfile = prefix + OUT_SUFFIX
domfile = prefix + DOM_SUFFIX
clan_acc, score = mem2clan[model_acc]
sequence = load_sequence(prefix + SEQ_SUFFIX)
try:
clan = clans_to_insert.pop(clan_acc)
except KeyError:
# Clan already inserted
pass
else:
cur.execute(clan_sql, (clan.accession, database.identifier,
clan.name, clan.description))
cur.execute(memb_sql, (clan_acc, model_acc, len(sequence), score))
matches = []
for target in load_hmmscan_results(outfile, domfile):
if target["accession"] == model_acc:
continue
domains = []
for dom in target["domains"]:
domains.append((
dom["coordinates"]["ali"]["start"],
dom["coordinates"]["ali"]["end"]
))
matches.append((
model_acc,
target["accession"],
target["evalue"],
json.dumps(domains)
))
if matches:
cur2.executemany(mtch_sql, matches)
pc = completed * 100 // len(fs)
if pc > progress:
progress = pc
logger.debug(f"{progress:>10}%")
con.commit()
cur.close()
cur2.close()
con.close()
size = calc_dir_size(workdir)
logger.info(f"disk usage: {size / 1024 ** 2:,.0f} MB")
shutil.rmtree(workdir)
if errors:
raise RuntimeError(f"{errors} error(s)")
def remove_hmm_duplicates():
prog = "python -m pyinterprod.interpro.clan"
description = ("Simple command line interface to stream an HMM file "
"without repeated models.")
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument("hmmdb", help="an HMM file")
options = parser.parse_args()
accessions = set()
for acc, hmm in iter_models(options.hmmdb):
if acc in accessions:
continue
accessions.add(acc)
print(hmm, end='')
if __name__ == '__main__':
try:
remove_hmm_duplicates()
except BrokenPipeError as exc:
sys.exit(exc.errno)
|
the-stack_0_4702 | #!/bin/python3
import os
import re
# Complete the happyLadybugs function below.
def happyLadybugs(b):
# find if exists any single letter
if b.count('_') == 0 and len(re.sub(r'((.)\2+)', '', b)) != 0:
return 'NO'
for a in set(b):
if a != '_' and b.count(a) == 1:
return 'NO'
return 'YES'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
g = int(input())
for g_itr in range(g):
n = int(input())
b = input()
result = happyLadybugs(b)
fptr.write(result + '\n')
fptr.close()
|
the-stack_0_4704 | """Receive signals from a keyboard and use it as a remote control."""
# pylint: disable=import-error
import threading
import logging
import os
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DEVICE_DESCRIPTOR = "device_descriptor"
DEVICE_ID_GROUP = "Device description"
DEVICE_NAME = "device_name"
DOMAIN = "keyboard_remote"
ICON = "mdi:remote"
KEY_CODE = "key_code"
KEY_VALUE = {"key_up": 0, "key_down": 1, "key_hold": 2}
KEYBOARD_REMOTE_COMMAND_RECEIVED = "keyboard_remote_command_received"
KEYBOARD_REMOTE_CONNECTED = "keyboard_remote_connected"
KEYBOARD_REMOTE_DISCONNECTED = "keyboard_remote_disconnected"
TYPE = "type"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Exclusive(DEVICE_DESCRIPTOR, DEVICE_ID_GROUP): cv.string,
vol.Exclusive(DEVICE_NAME, DEVICE_ID_GROUP): cv.string,
vol.Optional(TYPE, default="key_up"): vol.All(
cv.string, vol.Any("key_up", "key_down", "key_hold")
),
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the keyboard_remote."""
config = config.get(DOMAIN)
keyboard_remote = KeyboardRemote(hass, config)
def _start_keyboard_remote(_event):
keyboard_remote.run()
def _stop_keyboard_remote(_event):
keyboard_remote.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_keyboard_remote)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_keyboard_remote)
return True
class KeyboardRemoteThread(threading.Thread):
"""This interfaces with the inputdevice using evdev."""
def __init__(self, hass, device_name, device_descriptor, key_value):
"""Construct a thread listening for events on one device."""
self.hass = hass
self.device_name = device_name
self.device_descriptor = device_descriptor
self.key_value = key_value
if self.device_descriptor:
self.device_id = self.device_descriptor
else:
self.device_id = self.device_name
self.dev = self._get_keyboard_device()
if self.dev is not None:
_LOGGER.debug("Keyboard connected, %s", self.device_id)
else:
_LOGGER.debug(
"Keyboard not connected, %s. " "Check /dev/input/event* permissions",
self.device_id,
)
id_folder = "/dev/input/by-id/"
if os.path.isdir(id_folder):
from evdev import InputDevice, list_devices
device_names = [
InputDevice(file_name).name for file_name in list_devices()
]
_LOGGER.debug(
"Possible device names are: %s. "
"Possible device descriptors are %s: %s",
device_names,
id_folder,
os.listdir(id_folder),
)
threading.Thread.__init__(self)
self.stopped = threading.Event()
self.hass = hass
def _get_keyboard_device(self):
"""Get the keyboard device."""
from evdev import InputDevice, list_devices
if self.device_name:
devices = [InputDevice(file_name) for file_name in list_devices()]
for device in devices:
if self.device_name == device.name:
return device
elif self.device_descriptor:
try:
device = InputDevice(self.device_descriptor)
except OSError:
pass
else:
return device
return None
def run(self):
"""Run the loop of the KeyboardRemote."""
from evdev import categorize, ecodes
if self.dev is not None:
self.dev.grab()
_LOGGER.debug("Interface started for %s", self.dev)
while not self.stopped.isSet():
# Sleeps to ease load on processor
time.sleep(0.05)
if self.dev is None:
self.dev = self._get_keyboard_device()
if self.dev is not None:
self.dev.grab()
self.hass.bus.fire(
KEYBOARD_REMOTE_CONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
_LOGGER.debug("Keyboard re-connected, %s", self.device_id)
else:
continue
try:
event = self.dev.read_one()
except IOError: # Keyboard Disconnected
self.dev = None
self.hass.bus.fire(
KEYBOARD_REMOTE_DISCONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
_LOGGER.debug("Keyboard disconnected, %s", self.device_id)
continue
if not event:
continue
if event.type is ecodes.EV_KEY and event.value is self.key_value:
_LOGGER.debug(categorize(event))
self.hass.bus.fire(
KEYBOARD_REMOTE_COMMAND_RECEIVED,
{
KEY_CODE: event.code,
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
class KeyboardRemote:
"""Sets up one thread per device."""
def __init__(self, hass, config):
"""Construct a KeyboardRemote interface object."""
self.threads = []
for dev_block in config:
device_descriptor = dev_block.get(DEVICE_DESCRIPTOR)
device_name = dev_block.get(DEVICE_NAME)
key_value = KEY_VALUE.get(dev_block.get(TYPE, "key_up"))
if device_descriptor is not None or device_name is not None:
thread = KeyboardRemoteThread(
hass, device_name, device_descriptor, key_value
)
self.threads.append(thread)
def run(self):
"""Run all event listener threads."""
for thread in self.threads:
thread.start()
def stop(self):
"""Stop all event listener threads."""
for thread in self.threads:
thread.stopped.set()
|
the-stack_0_4705 | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import copy
import numpy as np
import pickle
from DatabaseManager.database import Database
from Utilities.misc import Printer
#========================================================================
class ResultsHandler(Printer):
DB_ATTRIBUTES = {'status': 'string',
'job_id': 'string',
'repetition': 'integer',
'work_dir': 'string',
'exp_identifier': 'string',
'parameters': 'pickle',
'objectives': 'pickle',
'author': 'pickle'}
PROCESSED_JOBS = []
def __init__(self, settings, verbose = True):
Printer.__init__(self, 'RESULTS HANDLER', color = 'yellow')
self.settings = settings
self.verbose = verbose
self._create_database()
def _create_database(self):
db_settings = self.settings['results_database']
self.database = Database(db_settings['path'], self.DB_ATTRIBUTES,
db_settings['database_type'], verbose = self.verbose)
def process_results(self, results_dict):
results_dict['status'] = 'new'
self.database.add(results_dict)
def remove_results(self, identifier):
self._print('removing feedback for %s' % identifier)
condition = {'exp_identifier': identifier}
self.database.remove_all(condition)
def get_new_results(self):
condition = {'status': 'new'}
new_results_list = self.database.fetch_all(condition)
# check, if:
# - for a given experiment
# - and a given job_id
# --> all repetitions are executed
new_results = {}
for result in new_results_list:
exp_identifier = result['exp_identifier']
job_id = result['job_id']
if exp_identifier in new_results.keys():
if job_id in new_results[exp_identifier]:
new_results[exp_identifier][job_id].append(result)
else:
new_results[exp_identifier][job_id] = [result]
else:
new_results[exp_identifier] = {job_id: [result]}
# get those jobs, for which we have all the results
completed_jobs = []
for exp_identifier in new_results.keys():
# get experiment
for experiment in self.settings['experiments']:
if experiment['name'] == exp_identifier:
break
num_repetitions = experiment['repetitions']
for job_id in new_results[exp_identifier]:
if len(new_results[exp_identifier][job_id]) == num_repetitions:
completed_jobs.append(job_id)
return completed_jobs
# separate the new feedbacks by name and by repetition
# new_results = {}
# condition = {'status': 'new'}
# new_result_list = self.database.fetch_all(condition)
# separate the new feedbacks by name
# new_results = {}
# for result in new_result_list:
# if result['exp_identifier'] in new_results.keys():
# new_results[result['exp_identifier']].append(result)
# else:
# new_results[result['exp_identifier']] = [result]
# return new_results
def analyze_new_results(self, job_id):
# get experiments with the defined job_id
condition = {'job_id': job_id}
results = self.database.fetch_all(condition)
# copy information to the processed dictionary
processed = {}
for att in ['job_id', 'work_dir', 'exp_identifier', 'parameters', 'author']:
processed[att] = copy.deepcopy(results[0][att])
processed['loss'] = {}
# perform operations on results
exp_identifier = results[0]['exp_identifier']
for experiment in self.settings['experiments']:
if experiment['name'] == exp_identifier:
break
for objective in experiment['objectives']:
name = objective['name']
operation = objective['operation']
# get all results
# print('RESULT', results)
values = np.array([result['objectives'][name] for result in results])
if operation == 'average':
value = np.mean(values)
elif operation == 'std_rel':
value = np.std(values) / np.mean(values)
else:
raise NotImplementedError()
processed['loss']['%s_%s' % (name, operation)] = value
setattr(self, 'info_dict_%s' % job_id, copy.deepcopy(processed))
self.PROCESSED_JOBS.append(job_id)
def set_all_to_used(self, job_id):
condition = {'job_id': job_id, 'status': 'new'}
update = {'status': 'used'}
self.database.update(condition, update)
# def set_all_to_used(self, exp_identifier):
# condition = {'exp_identifier': exp_identifier, 'status': 'new'}
# update = {'status': 'used'}
# self.database.update(condition, update)
#========================================================================
|
the-stack_0_4706 | #!/usr/bin/python
from Solution import Solution
obj = Solution()
#A = [1,1,1,2,2,3]
#A = [0,0,1,1,1,1,2,3,3]
#A = [1, 1, 1, 1]
A = [1]
print(obj.removeDuplicates(A)) |
the-stack_0_4707 | import itertools
import argparse
import datetime
import os
import sys
import re
import time
import numpy as np
import argparse
def filldict(listKeys, listValues):
mydict = {}
for key, value in zip(listKeys, listValues):
mydict[key] = value
return mydict
def generate_script_body(param_dict):
script_body='''#!/bin/bash
cd /home/babbatem/projects/skills_kin/ben_dapg
source /home/babbatem/envs/skills_kin/bin/activate
export GYM_ENV={}
echo $GYM_ENV
python my_job_script.py --config {} --output {}
'''
script_body=script_body.format(param_dict['env'],
param_dict['config'],
param_dict['output'])
return script_body
def get_config_file_dapg():
config= \
"""{
# general inputs
'env' : '%s',
'algorithm' : 'DAPG',
'seed' : %i,
'num_cpu' : 3,
'save_freq' : 25,
'eval_rollouts' : 1,
# Demonstration data and behavior cloning
'demo_file' : '%s',
'bc_batch_size' : 32,
'bc_epochs' : 5,
'bc_learn_rate' : 1e-3,
# RL parameters (all params related to PG, value function, DAPG etc.)
'policy_size' : (32, 32),
'vf_batch_size' : 64,
'vf_epochs' : 2,
'vf_learn_rate' : 1e-3,
'rl_step_size' : 0.05,
'rl_gamma' : 0.995,
'rl_gae' : 0.97,
'rl_num_traj' : 20,
'rl_num_iter' : 10,
'lam_0' : 1e-2,
'lam_1' : 0.95,
'init_log_std' : 1
}
"""
return config
def get_config_file_npg():
config= \
"""{
# general inputs
'env' : '%s',
'algorithm' : 'NPG',
'seed' : %i,
'num_cpu' : 3,
'save_freq' : 25,
'eval_rollouts' : 1,
# RL parameters (all params related to PG, value function, DAPG etc.)
'policy_size' : (32, 32),
'vf_batch_size' : 64,
'vf_epochs' : 2,
'vf_learn_rate' : 1e-3,
'rl_step_size' : 0.05,
'rl_gamma' : 0.995,
'rl_gae' : 0.97,
'rl_num_traj' : 20,
'rl_num_iter' : 10,
'lam_0' : 0,
'lam_1' : 0,
'init_log_std' : 1,
}
"""
return config
def submit(param_dict, job_details):
script_body = generate_script_body(param_dict)
objectname = param_dict['algo'] + '-' \
+ param_dict['env-short'] + '-' \
+ str(param_dict['seed'])
jobfile = "scripts/{}/{}".format(param_dict['name'], objectname)
with open(jobfile, 'w') as f:
f.write(script_body)
cmd="qsub {} {}".format(job_details, jobfile)
os.system(cmd)
return 0
def main(args):
KEYS = ['seed', 'env', 'algo', 'config', 'output', 'name', 'env-short']
SEEDS = np.arange(5)
# TODO: make this mapping correct
full_env_names_dict = {'drawer': 'kuka_gym:KukaDrawer-v0',
'microwave': 'kuka_gym:KukaCabinet-v0',
'dynamic': 'kuka_gym:KukaDynamic-v0'}
full_env_name = full_env_names_dict[args.env]
if args.gpu:
request = '-l long -l vf=32G -l gpus=1 -q gpus*'
else:
request = '-l long -l vf=32G -pe smp 3'
os.makedirs('experiments' + '/' + args.exp_name, exist_ok=True)
config_root = 'experiments' + '/' + args.exp_name + '/' + args.env + '/configs/'
output_root = 'experiments' + '/' + args.exp_name + '/' + args.env + '/outputs/'
os.makedirs('scripts/%s' % args.exp_name, exist_ok=True)
os.makedirs(config_root, exist_ok=True)
os.makedirs(output_root, exist_ok=True)
k=0
for i in range(len(SEEDS)):
# get the config text
if args.algo == 'dapg':
config = get_config_file_dapg()
elif args.algo == 'npg':
config = get_config_file_npg()
else:
print('Invalid algorithm name [dapg, npg]')
raise ValueError
demo_path = '/home/babbatem/projects/skills_kin/sim/data/kuka_%s_demo.pickle'
demo_path = demo_path % args.env
if args.algo == 'dapg':
config=config % (full_env_name, SEEDS[i], demo_path)
else:
config=config % (full_env_name, SEEDS[i])
config_path = config_root + args.algo + str(SEEDS[i]) + '.txt'
config_writer = open(config_path,'w')
config_writer.write(config)
config_writer.close()
output_path = output_root + args.algo + str(SEEDS[i])
element = [SEEDS[i],
full_env_name,
args.algo,
config_path,
output_path,
args.exp_name,
args.env]
param_dict = filldict(KEYS, element)
submit(param_dict, request)
k+=1
print(k)
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument('-t', '--test', action='store_true', help='don\'t submit, just count')
parser.add_argument('-n', '--exp-name', required=True, type=str, help='parent directory for jobs')
parser.add_argument('-g', '--gpu', action='store_true', help='request gpus')
parser.add_argument('-e', '--env', type=str, help='microwave, drawer, or dynamic')
parser.add_argument('-a', '--algo', type=str, help='dapg or npg')
args=parser.parse_args()
main(args)
|
the-stack_0_4708 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
import webob.dec
import webob.exc
from nova.api import openstack as openstack_api
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
class APITest(test.NoDBTestCase):
def setUp(self):
super(APITest, self).setUp()
self.wsgi_app = fakes.wsgi_app()
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
return openstack_api.FaultWrapper(inner_app)
def test_malformed_json(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '{'
req.headers["content-type"] = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 400)
def test_malformed_xml(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '<hi im not xml>'
req.headers["content-type"] = "application/xml"
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 415)
def test_vendor_content_type_json(self):
ctype = 'application/vnd.openstack.compute+json'
req = webob.Request.blank('/')
req.headers['Accept'] = ctype
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, ctype)
jsonutils.loads(res.body)
def test_exceptions_are_converted_to_faults_webob_exc(self):
@webob.dec.wsgify
def raise_webob_exc(req):
raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
# api.application = raise_webob_exc
api = self._wsgi_app(raise_webob_exc)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_api_fault(self):
@webob.dec.wsgify
def raise_api_fault(req):
exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
return wsgi.Fault(exc)
# api.application = raise_api_fault
api = self._wsgi_app(raise_api_fault)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('itemNotFound', resp.body)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_exception(self):
@webob.dec.wsgify
def fail(req):
raise Exception("Threw an exception")
# api.application = fail
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.NovaException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, resp.body)
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in six.iteritems(exception_type.headers):
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], str(value))
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_nova_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_nova_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
class APITestV21(APITest):
def setUp(self):
super(APITestV21, self).setUp()
self.wsgi_app = fakes.wsgi_app_v21()
# TODO(alex_xu): Get rid of the case translate NovaException to
# HTTPException after V2 api code removed. Because V2.1 API required raise
# HTTPException explicitly, so V2.1 API needn't such translation.
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.